Merge branch 'master' of https://github.com/Roblox/luau into ctv-forward-declare

This commit is contained in:
JohnnyMorganz 2022-11-11 10:43:12 +00:00
commit 795ed9c789
98 changed files with 4694 additions and 3480 deletions

View file

@ -0,0 +1,68 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Def.h"
#include "Luau/TypedAllocator.h"
#include "Luau/TypeVar.h"
#include "Luau/Variant.h"
#include <memory>
namespace Luau
{
struct Negation;
struct Conjunction;
struct Disjunction;
struct Equivalence;
struct Proposition;
using Connective = Variant<Negation, Conjunction, Disjunction, Equivalence, Proposition>;
using ConnectiveId = Connective*; // Can and most likely is nullptr.
struct Negation
{
ConnectiveId connective;
};
struct Conjunction
{
ConnectiveId lhs;
ConnectiveId rhs;
};
struct Disjunction
{
ConnectiveId lhs;
ConnectiveId rhs;
};
struct Equivalence
{
ConnectiveId lhs;
ConnectiveId rhs;
};
struct Proposition
{
DefId def;
TypeId discriminantTy;
};
template<typename T>
const T* get(ConnectiveId connective)
{
return get_if<T>(connective);
}
struct ConnectiveArena
{
TypedAllocator<Connective> allocator;
ConnectiveId negation(ConnectiveId connective);
ConnectiveId conjunction(ConnectiveId lhs, ConnectiveId rhs);
ConnectiveId disjunction(ConnectiveId lhs, ConnectiveId rhs);
ConnectiveId equivalence(ConnectiveId lhs, ConnectiveId rhs);
ConnectiveId proposition(DefId def, TypeId discriminantTy);
};
} // namespace Luau

View file

@ -132,15 +132,16 @@ struct HasPropConstraint
std::string prop;
};
struct RefinementConstraint
// result ~ if isSingleton D then ~D else unknown where D = discriminantType
struct SingletonOrTopTypeConstraint
{
DefId def;
TypeId resultType;
TypeId discriminantType;
};
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint,
BinaryConstraint, IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint,
HasPropConstraint, RefinementConstraint>;
HasPropConstraint, SingletonOrTopTypeConstraint>;
struct Constraint
{

View file

@ -2,6 +2,7 @@
#pragma once
#include "Luau/Ast.h"
#include "Luau/Connective.h"
#include "Luau/Constraint.h"
#include "Luau/DataFlowGraphBuilder.h"
#include "Luau/Module.h"
@ -26,11 +27,13 @@ struct DcrLogger;
struct Inference
{
TypeId ty = nullptr;
ConnectiveId connective = nullptr;
Inference() = default;
explicit Inference(TypeId ty)
explicit Inference(TypeId ty, ConnectiveId connective = nullptr)
: ty(ty)
, connective(connective)
{
}
};
@ -38,11 +41,13 @@ struct Inference
struct InferencePack
{
TypePackId tp = nullptr;
std::vector<ConnectiveId> connectives;
InferencePack() = default;
explicit InferencePack(TypePackId tp)
explicit InferencePack(TypePackId tp, const std::vector<ConnectiveId>& connectives = {})
: tp(tp)
, connectives(connectives)
{
}
};
@ -61,6 +66,14 @@ struct ConstraintGraphBuilder
// The root scope of the module we're generating constraints for.
// This is null when the CGB is initially constructed.
Scope* rootScope;
// Constraints that go straight to the solver.
std::vector<ConstraintPtr> constraints;
// Constraints that do not go to the solver right away. Other constraints
// will enqueue them during solving.
std::vector<ConstraintPtr> unqueuedConstraints;
// A mapping of AST node to TypeId.
DenseHashMap<const AstExpr*, TypeId> astTypes{nullptr};
// A mapping of AST node to TypePackId.
@ -73,6 +86,7 @@ struct ConstraintGraphBuilder
// Defining scopes for AST nodes.
DenseHashMap<const AstStatTypeAlias*, ScopePtr> astTypeAliasDefiningScopes{nullptr};
NotNull<const DataFlowGraph> dfg;
ConnectiveArena connectiveArena;
int recursionCount = 0;
@ -126,6 +140,8 @@ struct ConstraintGraphBuilder
*/
NotNull<Constraint> addConstraint(const ScopePtr& scope, std::unique_ptr<Constraint> c);
void applyRefinements(const ScopePtr& scope, Location location, ConnectiveId connective);
/**
* The entry point to the ConstraintGraphBuilder. This will construct a set
* of scopes, constraints, and free types that can be solved later.
@ -167,10 +183,10 @@ struct ConstraintGraphBuilder
* surrounding context. Used to implement bidirectional type checking.
* @return the type of the expression.
*/
Inference check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType = {});
Inference check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType = {}, bool forceSingleton = false);
Inference check(const ScopePtr& scope, AstExprConstantString* string, std::optional<TypeId> expectedType);
Inference check(const ScopePtr& scope, AstExprConstantBool* bool_, std::optional<TypeId> expectedType);
Inference check(const ScopePtr& scope, AstExprConstantString* string, std::optional<TypeId> expectedType, bool forceSingleton);
Inference check(const ScopePtr& scope, AstExprConstantBool* bool_, std::optional<TypeId> expectedType, bool forceSingleton);
Inference check(const ScopePtr& scope, AstExprLocal* local);
Inference check(const ScopePtr& scope, AstExprGlobal* global);
Inference check(const ScopePtr& scope, AstExprIndexName* indexName);
@ -180,6 +196,7 @@ struct ConstraintGraphBuilder
Inference check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType);
Inference check(const ScopePtr& scope, AstExprTypeAssertion* typeAssert);
Inference check(const ScopePtr& scope, AstExprTable* expr, std::optional<TypeId> expectedType);
std::tuple<TypeId, TypeId, ConnectiveId> checkBinary(const ScopePtr& scope, AstExprBinary* binary, std::optional<TypeId> expectedType);
TypePackId checkLValues(const ScopePtr& scope, AstArray<AstExpr*> exprs);
@ -243,16 +260,8 @@ struct ConstraintGraphBuilder
void prepopulateGlobalScope(const ScopePtr& globalScope, AstStatBlock* program);
};
/**
* Collects a vector of borrowed constraints from the scope and all its child
* scopes. It is important to only call this function when you're done adding
* constraints to the scope or its descendants, lest the borrowed pointers
* become invalid due to a container reallocation.
* @param rootScope the root scope of the scope graph to collect constraints
* from.
* @return a list of pointers to constraints contained within the scope graph.
* None of these pointers should be null.
/** Borrow a vector of pointers from a vector of owning pointers to constraints.
*/
std::vector<NotNull<Constraint>> collectConstraints(NotNull<Scope> rootScope);
std::vector<NotNull<Constraint>> borrowConstraints(const std::vector<ConstraintPtr>& constraints);
} // namespace Luau

View file

@ -76,8 +76,8 @@ struct ConstraintSolver
DcrLogger* logger;
explicit ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, ModuleName moduleName, NotNull<ModuleResolver> moduleResolver,
std::vector<RequireCycle> requireCycles, DcrLogger* logger);
explicit ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, std::vector<NotNull<Constraint>> constraints,
ModuleName moduleName, NotNull<ModuleResolver> moduleResolver, std::vector<RequireCycle> requireCycles, DcrLogger* logger);
// Randomize the order in which to dispatch constraints
void randomize(unsigned seed);
@ -110,7 +110,7 @@ struct ConstraintSolver
bool tryDispatch(const FunctionCallConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const HasPropConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const RefinementConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const SingletonOrTopTypeConstraint& c, NotNull<const Constraint> constraint);
// for a, ... in some_table do
// also handles __iter metamethod

View file

@ -17,10 +17,8 @@ struct SingletonTypes;
using ModulePtr = std::shared_ptr<Module>;
bool isSubtype(
TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice,
bool anyIsTop = true);
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);
class TypeIds
{
@ -169,12 +167,26 @@ struct NormalizedStringType
bool isSubtype(const NormalizedStringType& subStr, const NormalizedStringType& superStr);
// A normalized function type is either `never` (represented by `nullopt`)
// A normalized function type can be `never`, the top function type `function`,
// or an intersection of function types.
// NOTE: type normalization can fail on function types with generics
// (e.g. because we do not support unions and intersections of generic type packs),
// so this type may contain `error`.
using NormalizedFunctionType = std::optional<TypeIds>;
//
// NOTE: type normalization can fail on function types with generics (e.g.
// because we do not support unions and intersections of generic type packs), so
// this type may contain `error`.
struct NormalizedFunctionType
{
NormalizedFunctionType();
bool isTop = false;
// TODO: Remove this wrapping optional when clipping
// FFlagLuauNegatedFunctionTypes.
std::optional<TypeIds> parts;
void resetToNever();
void resetToTop();
bool isNever() const;
};
// A normalized generic/free type is a union, where each option is of the form (X & T) where
// * X is either a free type or a generic
@ -234,12 +246,14 @@ struct NormalizedType
NormalizedType(NotNull<SingletonTypes> singletonTypes);
NormalizedType(const NormalizedType&) = delete;
NormalizedType(NormalizedType&&) = default;
NormalizedType() = delete;
~NormalizedType() = default;
NormalizedType(const NormalizedType&) = delete;
NormalizedType& operator=(const NormalizedType&) = delete;
NormalizedType(NormalizedType&&) = default;
NormalizedType& operator=(NormalizedType&&) = default;
NormalizedType& operator=(NormalizedType&) = delete;
};
class Normalizer
@ -291,7 +305,7 @@ public:
bool unionNormalWithTy(NormalizedType& here, TypeId there, int ignoreSmallerTyvars = -1);
// ------- Negations
NormalizedType negateNormal(const NormalizedType& here);
std::optional<NormalizedType> negateNormal(const NormalizedType& here);
TypeIds negateAll(const TypeIds& theres);
TypeId negate(TypeId there);
void subtractPrimitive(NormalizedType& here, TypeId ty);

View file

@ -38,11 +38,6 @@ struct Scope
std::unordered_map<Symbol, Binding> bindings;
TypePackId returnType;
std::optional<TypePackId> varargPack;
// All constraints belonging to this scope.
std::vector<ConstraintPtr> constraints;
// Constraints belonging to this scope that are queued manually by other
// constraints.
std::vector<ConstraintPtr> unqueuedConstraints;
TypeLevel level;

View file

@ -34,7 +34,6 @@ struct ToStringOptions
size_t maxTableLength = size_t(FInt::LuauTableTypeMaximumStringifierLength); // Only applied to TableTypeVars
size_t maxTypeLength = size_t(FInt::LuauTypeMaximumStringifierLength);
ToStringNameMap nameMap;
std::optional<ToStringNameMap> DEPRECATED_nameMap;
std::shared_ptr<Scope> scope; // If present, module names will be added and types that are not available in scope will be marked as 'invalid'
std::vector<std::string> namedFunctionOverrideArgNames; // If present, named function argument names will be overridden
};
@ -42,7 +41,6 @@ struct ToStringOptions
struct ToStringResult
{
std::string name;
ToStringNameMap DEPRECATED_nameMap;
bool invalid = false;
bool error = false;

View file

@ -281,14 +281,14 @@ private:
TypeId singletonType(bool value);
TypeId singletonType(std::string value);
TypeIdPredicate mkTruthyPredicate(bool sense);
TypeIdPredicate mkTruthyPredicate(bool sense, TypeId emptySetTy);
// TODO: Return TypeId only.
std::optional<TypeId> filterMapImpl(TypeId type, TypeIdPredicate predicate);
std::pair<std::optional<TypeId>, bool> filterMap(TypeId type, TypeIdPredicate predicate);
public:
std::pair<std::optional<TypeId>, bool> pickTypesFromSense(TypeId type, bool sense);
std::pair<std::optional<TypeId>, bool> pickTypesFromSense(TypeId type, bool sense, TypeId emptySetTy);
private:
TypeId unionOfTypes(TypeId a, TypeId b, const ScopePtr& scope, const Location& location, bool unifyFreeTypes = true);

View file

@ -35,7 +35,7 @@ std::vector<TypeId> flatten(TypeArena& arena, NotNull<SingletonTypes> singletonT
* identity) types.
* @param types the input type list to reduce.
* @returns the reduced type list.
*/
*/
std::vector<TypeId> reduceUnion(const std::vector<TypeId>& types);
/**
@ -45,7 +45,7 @@ std::vector<TypeId> reduceUnion(const std::vector<TypeId>& types);
* @param arena the type arena to allocate the new type in, if necessary
* @param ty the type to remove nil from
* @returns a type with nil removed, or nil itself if that were the only option.
*/
*/
TypeId stripNil(NotNull<SingletonTypes> singletonTypes, TypeArena& arena, TypeId ty);
} // namespace Luau

View file

@ -115,6 +115,7 @@ struct PrimitiveTypeVar
Number,
String,
Thread,
Function,
};
Type type;
@ -504,14 +505,6 @@ struct NeverTypeVar
{
};
// Invariant 1: there should never be a reason why such UseTypeVar exists without it mapping to another type.
// Invariant 2: UseTypeVar should always disappear across modules.
struct UseTypeVar
{
DefId def;
NotNull<Scope> scope;
};
// ~T
// TODO: Some simplification step that overwrites the type graph to make sure negation
// types disappear from the user's view, and (?) a debug flag to disable that
@ -522,9 +515,9 @@ struct NegationTypeVar
using ErrorTypeVar = Unifiable::Error;
using TypeVariant = Unifiable::Variant<TypeId, PrimitiveTypeVar, BlockedTypeVar, PendingExpansionTypeVar, SingletonTypeVar, FunctionTypeVar,
TableTypeVar, MetatableTypeVar, ClassTypeVar, AnyTypeVar, UnionTypeVar, IntersectionTypeVar, LazyTypeVar, UnknownTypeVar, NeverTypeVar,
UseTypeVar, NegationTypeVar>;
using TypeVariant =
Unifiable::Variant<TypeId, PrimitiveTypeVar, BlockedTypeVar, PendingExpansionTypeVar, SingletonTypeVar, FunctionTypeVar, TableTypeVar,
MetatableTypeVar, ClassTypeVar, AnyTypeVar, UnionTypeVar, IntersectionTypeVar, LazyTypeVar, UnknownTypeVar, NeverTypeVar, NegationTypeVar>;
struct TypeVar final
{
@ -644,13 +637,14 @@ public:
const TypeId stringType;
const TypeId booleanType;
const TypeId threadType;
const TypeId functionType;
const TypeId trueType;
const TypeId falseType;
const TypeId anyType;
const TypeId unknownType;
const TypeId neverType;
const TypeId errorType;
const TypeId falsyType; // No type binding!
const TypeId falsyType; // No type binding!
const TypeId truthyType; // No type binding!
const TypePackId anyTypePack;

View file

@ -61,7 +61,6 @@ struct Unifier
ErrorVec errors;
Location location;
Variance variance = Covariant;
bool anyIsTop = false; // If true, we consider any to be a top type. If false, it is a familiar but weird mix of top and bottom all at once.
bool normalize; // Normalize unions and intersections if necessary
bool useScopes = false; // If true, we use the scope hierarchy rather than TypeLevels
CountMismatch::Context ctx = CountMismatch::Arg;
@ -131,6 +130,7 @@ public:
Unifier makeChildUnifier();
void reportError(TypeError err);
LUAU_NOINLINE void reportError(Location location, TypeErrorData data);
private:
bool isNonstrictMode() const;

View file

@ -58,13 +58,15 @@ public:
constexpr int tid = getTypeId<T>();
typeId = tid;
new (&storage) TT(value);
new (&storage) TT(std::forward<T>(value));
}
Variant(const Variant& other)
{
static constexpr FnCopy table[sizeof...(Ts)] = {&fnCopy<Ts>...};
typeId = other.typeId;
tableCopy[typeId](&storage, &other.storage);
table[typeId](&storage, &other.storage);
}
Variant(Variant&& other)
@ -192,7 +194,6 @@ private:
return *static_cast<const T*>(lhs) == *static_cast<const T*>(rhs);
}
static constexpr FnCopy tableCopy[sizeof...(Ts)] = {&fnCopy<Ts>...};
static constexpr FnMove tableMove[sizeof...(Ts)] = {&fnMove<Ts>...};
static constexpr FnDtor tableDtor[sizeof...(Ts)] = {&fnDtor<Ts>...};

View file

@ -155,10 +155,6 @@ struct GenericTypeVarVisitor
{
return visit(ty);
}
virtual bool visit(TypeId ty, const UseTypeVar& utv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const NegationTypeVar& ntv)
{
return visit(ty);
@ -321,8 +317,6 @@ struct GenericTypeVarVisitor
traverse(a);
}
}
else if (auto utv = get<UseTypeVar>(ty))
visit(ty, *utv);
else if (auto ntv = get<NegationTypeVar>(ty))
visit(ty, *ntv);
else if (!FFlag::LuauCompleteVisitor)

View file

@ -17,7 +17,9 @@
LUAU_FASTFLAGVARIABLE(LuauSetMetaTableArgsCheck, false)
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauBuiltInMetatableNoBadSynthetic, false)
LUAU_FASTFLAG(LuauOptionalNextKey)
LUAU_FASTFLAG(LuauReportShadowedTypeAlias)
LUAU_FASTFLAG(LuauNewLibraryTypeNames)
/** FIXME: Many of these type definitions are not quite completely accurate.
*
@ -276,18 +278,38 @@ void registerBuiltinGlobals(TypeChecker& typeChecker)
addGlobalBinding(typeChecker, "string", it->second.type, "@luau");
// next<K, V>(t: Table<K, V>, i: K?) -> (K, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(typeChecker, arena, genericK)}});
addGlobalBinding(typeChecker, "next",
arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})}), "@luau");
if (FFlag::LuauOptionalNextKey)
{
// next<K, V>(t: Table<K, V>, i: K?) -> (K?, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(typeChecker, arena, genericK)}});
TypePackId nextRetsTypePack = arena.addTypePack(TypePack{{makeOption(typeChecker, arena, genericK), genericV}});
addGlobalBinding(typeChecker, "next", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, nextRetsTypePack}), "@luau");
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, nilType}});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, nextRetsTypePack});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, nilType}});
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K, V), Table<K, V>, nil)
addGlobalBinding(typeChecker, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K, V), Table<K, V>, nil)
addGlobalBinding(
typeChecker, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
}
else
{
// next<K, V>(t: Table<K, V>, i: K?) -> (K, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(typeChecker, arena, genericK)}});
addGlobalBinding(typeChecker, "next",
arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})}), "@luau");
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, nilType}});
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K, V), Table<K, V>, nil)
addGlobalBinding(
typeChecker, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
}
TypeId genericMT = arena.addType(GenericTypeVar{"MT"});
@ -319,7 +341,12 @@ void registerBuiltinGlobals(TypeChecker& typeChecker)
if (TableTypeVar* ttv = getMutable<TableTypeVar>(pair.second.typeId))
{
if (!ttv->name)
ttv->name = toString(pair.first);
{
if (FFlag::LuauNewLibraryTypeNames)
ttv->name = "typeof(" + toString(pair.first) + ")";
else
ttv->name = toString(pair.first);
}
}
}
@ -370,18 +397,38 @@ void registerBuiltinGlobals(Frontend& frontend)
addGlobalBinding(frontend, "string", it->second.type, "@luau");
// next<K, V>(t: Table<K, V>, i: K?) -> (K, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(frontend, arena, genericK)}});
addGlobalBinding(frontend, "next",
arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})}), "@luau");
if (FFlag::LuauOptionalNextKey)
{
// next<K, V>(t: Table<K, V>, i: K?) -> (K?, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(frontend, arena, genericK)}});
TypePackId nextRetsTypePack = arena.addTypePack(TypePack{{makeOption(frontend, arena, genericK), genericV}});
addGlobalBinding(frontend, "next", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, nextRetsTypePack}), "@luau");
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, frontend.singletonTypes->nilType}});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, nextRetsTypePack});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, frontend.singletonTypes->nilType}});
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K, V), Table<K, V>, nil)
addGlobalBinding(frontend, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K?, V), Table<K, V>, nil)
addGlobalBinding(
frontend, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
}
else
{
// next<K, V>(t: Table<K, V>, i: K?) -> (K, V)
TypePackId nextArgsTypePack = arena.addTypePack(TypePack{{mapOfKtoV, makeOption(frontend, arena, genericK)}});
addGlobalBinding(frontend, "next",
arena.addType(FunctionTypeVar{{genericK, genericV}, {}, nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})}), "@luau");
TypePackId pairsArgsTypePack = arena.addTypePack({mapOfKtoV});
TypeId pairsNext = arena.addType(FunctionTypeVar{nextArgsTypePack, arena.addTypePack(TypePack{{genericK, genericV}})});
TypePackId pairsReturnTypePack = arena.addTypePack(TypePack{{pairsNext, mapOfKtoV, frontend.singletonTypes->nilType}});
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K?) -> (K, V), Table<K, V>, nil)
addGlobalBinding(
frontend, "pairs", arena.addType(FunctionTypeVar{{genericK, genericV}, {}, pairsArgsTypePack, pairsReturnTypePack}), "@luau");
}
TypeId genericMT = arena.addType(GenericTypeVar{"MT"});
@ -413,7 +460,12 @@ void registerBuiltinGlobals(Frontend& frontend)
if (TableTypeVar* ttv = getMutable<TableTypeVar>(pair.second.typeId))
{
if (!ttv->name)
ttv->name = toString(pair.first);
{
if (FFlag::LuauNewLibraryTypeNames)
ttv->name = "typeof(" + toString(pair.first) + ")";
else
ttv->name = toString(pair.first);
}
}
}
@ -623,7 +675,7 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionAssert(
if (head.size() > 0)
{
auto [ty, ok] = typechecker.pickTypesFromSense(head[0], true);
auto [ty, ok] = typechecker.pickTypesFromSense(head[0], true, typechecker.singletonTypes->nilType);
if (FFlag::LuauUnknownAndNeverType)
{
if (get<NeverTypeVar>(*ty))
@ -714,8 +766,7 @@ static bool dcrMagicFunctionPack(MagicFunctionCallContext context)
result = arena->addType(UnionTypeVar{std::move(options)});
TypeId numberType = context.solver->singletonTypes->numberType;
TypeId packedTable = arena->addType(
TableTypeVar{{{"n", {numberType}}}, TableIndexer(numberType, result), {}, TableState::Sealed});
TypeId packedTable = arena->addType(TableTypeVar{{{"n", {numberType}}}, TableIndexer(numberType, result), {}, TableState::Sealed});
TypePackId tableTypePack = arena->addTypePack({packedTable});
asMutable(context.result)->ty.emplace<BoundTypePack>(tableTypePack);

View file

@ -62,7 +62,6 @@ struct TypeCloner
void operator()(const LazyTypeVar& t);
void operator()(const UnknownTypeVar& t);
void operator()(const NeverTypeVar& t);
void operator()(const UseTypeVar& t);
void operator()(const NegationTypeVar& t);
};
@ -338,12 +337,6 @@ void TypeCloner::operator()(const NeverTypeVar& t)
defaultClone(t);
}
void TypeCloner::operator()(const UseTypeVar& t)
{
TypeId result = dest.addType(BoundTypeVar{follow(typeId)});
seenTypes[typeId] = result;
}
void TypeCloner::operator()(const NegationTypeVar& t)
{
TypeId result = dest.addType(AnyTypeVar{});

View file

@ -0,0 +1,32 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Connective.h"
namespace Luau
{
ConnectiveId ConnectiveArena::negation(ConnectiveId connective)
{
return NotNull{allocator.allocate(Negation{connective})};
}
ConnectiveId ConnectiveArena::conjunction(ConnectiveId lhs, ConnectiveId rhs)
{
return NotNull{allocator.allocate(Conjunction{lhs, rhs})};
}
ConnectiveId ConnectiveArena::disjunction(ConnectiveId lhs, ConnectiveId rhs)
{
return NotNull{allocator.allocate(Disjunction{lhs, rhs})};
}
ConnectiveId ConnectiveArena::equivalence(ConnectiveId lhs, ConnectiveId rhs)
{
return NotNull{allocator.allocate(Equivalence{lhs, rhs})};
}
ConnectiveId ConnectiveArena::proposition(DefId def, TypeId discriminantTy)
{
return NotNull{allocator.allocate(Proposition{def, discriminantTy})};
}
} // namespace Luau

View file

@ -52,6 +52,70 @@ static bool matchSetmetatable(const AstExprCall& call)
return true;
}
struct TypeGuard
{
bool isTypeof;
AstExpr* target;
std::string type;
};
static std::optional<TypeGuard> matchTypeGuard(const AstExprBinary* binary)
{
if (binary->op != AstExprBinary::CompareEq && binary->op != AstExprBinary::CompareNe)
return std::nullopt;
AstExpr* left = binary->left;
AstExpr* right = binary->right;
if (right->is<AstExprCall>())
std::swap(left, right);
if (!right->is<AstExprConstantString>())
return std::nullopt;
AstExprCall* call = left->as<AstExprCall>();
AstExprConstantString* string = right->as<AstExprConstantString>();
if (!call || !string)
return std::nullopt;
AstExprGlobal* callee = call->func->as<AstExprGlobal>();
if (!callee)
return std::nullopt;
if (callee->name != "type" && callee->name != "typeof")
return std::nullopt;
if (call->args.size != 1)
return std::nullopt;
return TypeGuard{
/*isTypeof*/ callee->name == "typeof",
/*target*/ call->args.data[0],
/*type*/ std::string(string->value.data, string->value.size),
};
}
namespace
{
struct Checkpoint
{
size_t offset;
};
Checkpoint checkpoint(const ConstraintGraphBuilder* cgb)
{
return Checkpoint{cgb->constraints.size()};
}
template<typename F>
void forEachConstraint(const Checkpoint& start, const Checkpoint& end, const ConstraintGraphBuilder* cgb, F f)
{
for (size_t i = start.offset; i < end.offset; ++i)
f(cgb->constraints[i]);
}
} // namespace
ConstraintGraphBuilder::ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena,
NotNull<ModuleResolver> moduleResolver, NotNull<SingletonTypes> singletonTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope,
DcrLogger* logger, NotNull<DataFlowGraph> dfg)
@ -99,12 +163,107 @@ ScopePtr ConstraintGraphBuilder::childScope(AstNode* node, const ScopePtr& paren
NotNull<Constraint> ConstraintGraphBuilder::addConstraint(const ScopePtr& scope, const Location& location, ConstraintV cv)
{
return NotNull{scope->constraints.emplace_back(new Constraint{NotNull{scope.get()}, location, std::move(cv)}).get()};
return NotNull{constraints.emplace_back(new Constraint{NotNull{scope.get()}, location, std::move(cv)}).get()};
}
NotNull<Constraint> ConstraintGraphBuilder::addConstraint(const ScopePtr& scope, std::unique_ptr<Constraint> c)
{
return NotNull{scope->constraints.emplace_back(std::move(c)).get()};
return NotNull{constraints.emplace_back(std::move(c)).get()};
}
static void unionRefinements(const std::unordered_map<DefId, TypeId>& lhs, const std::unordered_map<DefId, TypeId>& rhs,
std::unordered_map<DefId, TypeId>& dest, NotNull<TypeArena> arena)
{
for (auto [def, ty] : lhs)
{
auto rhsIt = rhs.find(def);
if (rhsIt == rhs.end())
continue;
std::vector<TypeId> discriminants{{ty, rhsIt->second}};
if (auto destIt = dest.find(def); destIt != dest.end())
discriminants.push_back(destIt->second);
dest[def] = arena->addType(UnionTypeVar{std::move(discriminants)});
}
}
static void computeRefinement(const ScopePtr& scope, ConnectiveId connective, std::unordered_map<DefId, TypeId>* refis, bool sense,
NotNull<TypeArena> arena, bool eq, std::vector<SingletonOrTopTypeConstraint>* constraints)
{
using RefinementMap = std::unordered_map<DefId, TypeId>;
if (!connective)
return;
else if (auto negation = get<Negation>(connective))
return computeRefinement(scope, negation->connective, refis, !sense, arena, eq, constraints);
else if (auto conjunction = get<Conjunction>(connective))
{
RefinementMap lhsRefis;
RefinementMap rhsRefis;
computeRefinement(scope, conjunction->lhs, sense ? refis : &lhsRefis, sense, arena, eq, constraints);
computeRefinement(scope, conjunction->rhs, sense ? refis : &rhsRefis, sense, arena, eq, constraints);
if (!sense)
unionRefinements(lhsRefis, rhsRefis, *refis, arena);
}
else if (auto disjunction = get<Disjunction>(connective))
{
RefinementMap lhsRefis;
RefinementMap rhsRefis;
computeRefinement(scope, disjunction->lhs, sense ? &lhsRefis : refis, sense, arena, eq, constraints);
computeRefinement(scope, disjunction->rhs, sense ? &rhsRefis : refis, sense, arena, eq, constraints);
if (sense)
unionRefinements(lhsRefis, rhsRefis, *refis, arena);
}
else if (auto equivalence = get<Equivalence>(connective))
{
computeRefinement(scope, equivalence->lhs, refis, sense, arena, true, constraints);
computeRefinement(scope, equivalence->rhs, refis, sense, arena, true, constraints);
}
else if (auto proposition = get<Proposition>(connective))
{
TypeId discriminantTy = proposition->discriminantTy;
if (!sense && !eq)
discriminantTy = arena->addType(NegationTypeVar{proposition->discriminantTy});
else if (!sense && eq)
{
discriminantTy = arena->addType(BlockedTypeVar{});
constraints->push_back(SingletonOrTopTypeConstraint{discriminantTy, proposition->discriminantTy});
}
if (auto it = refis->find(proposition->def); it != refis->end())
(*refis)[proposition->def] = arena->addType(IntersectionTypeVar{{discriminantTy, it->second}});
else
(*refis)[proposition->def] = discriminantTy;
}
}
void ConstraintGraphBuilder::applyRefinements(const ScopePtr& scope, Location location, ConnectiveId connective)
{
if (!connective)
return;
std::unordered_map<DefId, TypeId> refinements;
std::vector<SingletonOrTopTypeConstraint> constraints;
computeRefinement(scope, connective, &refinements, /*sense*/ true, arena, /*eq*/ false, &constraints);
for (auto [def, discriminantTy] : refinements)
{
std::optional<TypeId> defTy = scope->lookup(def);
if (!defTy)
ice->ice("Every DefId must map to a type!");
TypeId resultTy = arena->addType(IntersectionTypeVar{{*defTy, discriminantTy}});
scope->dcrRefinements[def] = resultTy;
}
for (auto& c : constraints)
addConstraint(scope, location, c);
}
void ConstraintGraphBuilder::visit(AstStatBlock* block)
@ -250,14 +409,33 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
if (value->is<AstExprConstantNil>())
{
// HACK: we leave nil-initialized things floating under the assumption that they will later be populated.
// See the test TypeInfer/infer_locals_with_nil_value.
// Better flow awareness should make this obsolete.
// HACK: we leave nil-initialized things floating under the
// assumption that they will later be populated.
//
// See the test TypeInfer/infer_locals_with_nil_value. Better flow
// awareness should make this obsolete.
if (!varTypes[i])
varTypes[i] = freshType(scope);
}
else if (i == local->values.size - 1)
// Only function calls and vararg expressions can produce packs. All
// other expressions produce exactly one value.
else if (i != local->values.size - 1 || (!value->is<AstExprCall>() && !value->is<AstExprVarargs>()))
{
std::optional<TypeId> expectedType;
if (hasAnnotation)
expectedType = varTypes.at(i);
TypeId exprType = check(scope, value, expectedType).ty;
if (i < varTypes.size())
{
if (varTypes[i])
addConstraint(scope, local->location, SubtypeConstraint{exprType, varTypes[i]});
else
varTypes[i] = exprType;
}
}
else
{
std::vector<TypeId> expectedTypes;
if (hasAnnotation)
@ -286,21 +464,6 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
addConstraint(scope, local->location, PackSubtypeConstraint{exprPack, tailPack});
}
}
else
{
std::optional<TypeId> expectedType;
if (hasAnnotation)
expectedType = varTypes.at(i);
TypeId exprType = check(scope, value, expectedType).ty;
if (i < varTypes.size())
{
if (varTypes[i])
addConstraint(scope, local->location, SubtypeConstraint{varTypes[i], exprType});
else
varTypes[i] = exprType;
}
}
}
for (size_t i = 0; i < local->vars.size; ++i)
@ -377,6 +540,9 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatForIn* forIn)
TypeId ty = freshType(loopScope);
loopScope->bindings[var] = Binding{ty, var->location};
variableTypes.push_back(ty);
if (auto def = dfg->getDef(var))
loopScope->dcrRefinements[*def] = ty;
}
// It is always ok to provide too few variables, so we give this pack a free tail.
@ -407,20 +573,6 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatRepeat* repeat)
check(repeatScope, repeat->condition);
}
void addConstraints(Constraint* constraint, NotNull<Scope> scope)
{
scope->constraints.reserve(scope->constraints.size() + scope->constraints.size());
for (const auto& c : scope->constraints)
constraint->dependencies.push_back(NotNull{c.get()});
for (const auto& c : scope->unqueuedConstraints)
constraint->dependencies.push_back(NotNull{c.get()});
for (NotNull<Scope> childScope : scope->children)
addConstraints(constraint, childScope);
}
void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocalFunction* function)
{
// Local
@ -438,12 +590,17 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocalFunction*
FunctionSignature sig = checkFunctionSignature(scope, function->func);
sig.bodyScope->bindings[function->name] = Binding{sig.signature, function->func->location};
auto start = checkpoint(this);
checkFunctionBody(sig.bodyScope, function->func);
auto end = checkpoint(this);
NotNull<Scope> constraintScope{sig.signatureScope ? sig.signatureScope.get() : sig.bodyScope.get()};
std::unique_ptr<Constraint> c =
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{functionType, sig.signature});
addConstraints(c.get(), NotNull(sig.bodyScope.get()));
forEachConstraint(start, end, this, [&c](const ConstraintPtr& constraint) {
c->dependencies.push_back(NotNull{constraint.get()});
});
addConstraint(scope, std::move(c));
}
@ -511,12 +668,17 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction* funct
LUAU_ASSERT(functionType != nullptr);
auto start = checkpoint(this);
checkFunctionBody(sig.bodyScope, function->func);
auto end = checkpoint(this);
NotNull<Scope> constraintScope{sig.signatureScope ? sig.signatureScope.get() : sig.bodyScope.get()};
std::unique_ptr<Constraint> c =
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{functionType, sig.signature});
addConstraints(c.get(), NotNull(sig.bodyScope.get()));
forEachConstraint(start, end, this, [&c](const ConstraintPtr& constraint) {
c->dependencies.push_back(NotNull{constraint.get()});
});
addConstraint(scope, std::move(c));
}
@ -569,14 +731,16 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatIf* ifStatement
// TODO: Optimization opportunity, the interior scope of the condition could be
// reused for the then body, so we don't need to refine twice.
ScopePtr condScope = childScope(ifStatement->condition, scope);
check(condScope, ifStatement->condition, std::nullopt);
auto [_, connective] = check(condScope, ifStatement->condition, std::nullopt);
ScopePtr thenScope = childScope(ifStatement->thenbody, scope);
applyRefinements(thenScope, Location{}, connective);
visit(thenScope, ifStatement->thenbody);
if (ifStatement->elsebody)
{
ScopePtr elseScope = childScope(ifStatement->elsebody, scope);
applyRefinements(elseScope, Location{}, connectiveArena.negation(connective));
visit(elseScope, ifStatement->elsebody);
}
}
@ -846,8 +1010,7 @@ InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr*
InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExprCall* call, const std::vector<TypeId>& expectedTypes)
{
TypeId fnType = check(scope, call->func).ty;
const size_t constraintIndex = scope->constraints.size();
const size_t scopeIndex = scopes.size();
auto startCheckpoint = checkpoint(this);
std::vector<TypeId> args;
@ -876,8 +1039,7 @@ InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExprCa
}
else
{
const size_t constraintEndIndex = scope->constraints.size();
const size_t scopeEndIndex = scopes.size();
auto endCheckpoint = checkpoint(this);
astOriginalCallTypes[call->func] = fnType;
@ -888,29 +1050,22 @@ InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExprCa
FunctionTypeVar ftv(TypeLevel{}, scope.get(), argPack, rets);
TypeId inferredFnType = arena->addType(ftv);
scope->unqueuedConstraints.push_back(
unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, InstantiationConstraint{instantiatedType, fnType}));
NotNull<const Constraint> ic(scope->unqueuedConstraints.back().get());
NotNull<const Constraint> ic(unqueuedConstraints.back().get());
scope->unqueuedConstraints.push_back(
unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, SubtypeConstraint{inferredFnType, instantiatedType}));
NotNull<Constraint> sc(scope->unqueuedConstraints.back().get());
NotNull<Constraint> sc(unqueuedConstraints.back().get());
// We force constraints produced by checking function arguments to wait
// until after we have resolved the constraint on the function itself.
// This ensures, for instance, that we start inferring the contents of
// lambdas under the assumption that their arguments and return types
// will be compatible with the enclosing function call.
for (size_t ci = constraintIndex; ci < constraintEndIndex; ++ci)
scope->constraints[ci]->dependencies.push_back(sc);
for (size_t si = scopeIndex; si < scopeEndIndex; ++si)
{
for (auto& c : scopes[si].second->constraints)
{
c->dependencies.push_back(sc);
}
}
forEachConstraint(startCheckpoint, endCheckpoint, this, [sc](const ConstraintPtr& constraint) {
constraint->dependencies.push_back(sc);
});
addConstraint(scope, call->func->location,
FunctionCallConstraint{
@ -925,7 +1080,7 @@ InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExprCa
}
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType)
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType, bool forceSingleton)
{
RecursionCounter counter{&recursionCount};
@ -938,13 +1093,13 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, st
Inference result;
if (auto group = expr->as<AstExprGroup>())
result = check(scope, group->expr, expectedType);
result = check(scope, group->expr, expectedType, forceSingleton);
else if (auto stringExpr = expr->as<AstExprConstantString>())
result = check(scope, stringExpr, expectedType);
result = check(scope, stringExpr, expectedType, forceSingleton);
else if (expr->is<AstExprConstantNumber>())
result = Inference{singletonTypes->numberType};
else if (auto boolExpr = expr->as<AstExprConstantBool>())
result = check(scope, boolExpr, expectedType);
result = check(scope, boolExpr, expectedType, forceSingleton);
else if (expr->is<AstExprConstantNil>())
result = Inference{singletonTypes->nilType};
else if (auto local = expr->as<AstExprLocal>())
@ -999,8 +1154,11 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, st
return result;
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantString* string, std::optional<TypeId> expectedType)
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantString* string, std::optional<TypeId> expectedType, bool forceSingleton)
{
if (forceSingleton)
return Inference{arena->addType(SingletonTypeVar{StringSingleton{std::string{string->value.data, string->value.size}}})};
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
@ -1020,12 +1178,15 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantSt
return Inference{singletonTypes->stringType};
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantBool* boolExpr, std::optional<TypeId> expectedType)
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantBool* boolExpr, std::optional<TypeId> expectedType, bool forceSingleton)
{
const TypeId singletonType = boolExpr->value ? singletonTypes->trueType : singletonTypes->falseType;
if (forceSingleton)
return Inference{singletonType};
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
const TypeId singletonType = boolExpr->value ? singletonTypes->trueType : singletonTypes->falseType;
if (get<BlockedTypeVar>(expectedTy) || get<PendingExpansionTypeVar>(expectedTy))
{
@ -1045,8 +1206,8 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprConstantBo
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprLocal* local)
{
std::optional<TypeId> resultTy;
if (auto def = dfg->getDef(local))
auto def = dfg->getDef(local);
if (def)
resultTy = scope->lookup(*def);
if (!resultTy)
@ -1058,7 +1219,10 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprLocal* loc
if (!resultTy)
return Inference{singletonTypes->errorRecoveryType()}; // TODO: replace with ice, locals should never exist before its definition.
return Inference{*resultTy};
if (def)
return Inference{*resultTy, connectiveArena.proposition(*def, singletonTypes->truthyType)};
else
return Inference{*resultTy};
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprGlobal* global)
@ -1107,20 +1271,23 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIndexExpr*
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprUnary* unary)
{
TypeId operandType = check(scope, unary->expr).ty;
auto [operandType, connective] = check(scope, unary->expr);
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, unary->location, UnaryConstraint{unary->op, operandType, resultType});
return Inference{resultType};
if (unary->op == AstExprUnary::Not)
return Inference{resultType, connectiveArena.negation(connective)};
else
return Inference{resultType};
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binary, std::optional<TypeId> expectedType)
{
TypeId leftType = check(scope, binary->left, expectedType).ty;
TypeId rightType = check(scope, binary->right, expectedType).ty;
auto [leftType, rightType, connective] = checkBinary(scope, binary, expectedType);
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, binary->location, BinaryConstraint{binary->op, leftType, rightType, resultType});
return Inference{resultType};
return Inference{resultType, std::move(connective)};
}
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType)
@ -1147,6 +1314,106 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTypeAssert
return Inference{resolveType(scope, typeAssert->annotation)};
}
std::tuple<TypeId, TypeId, ConnectiveId> ConstraintGraphBuilder::checkBinary(
const ScopePtr& scope, AstExprBinary* binary, std::optional<TypeId> expectedType)
{
if (binary->op == AstExprBinary::And)
{
auto [leftType, leftConnective] = check(scope, binary->left, expectedType);
ScopePtr rightScope = childScope(binary->right, scope);
applyRefinements(rightScope, binary->right->location, leftConnective);
auto [rightType, rightConnective] = check(rightScope, binary->right, expectedType);
return {leftType, rightType, connectiveArena.conjunction(leftConnective, rightConnective)};
}
else if (binary->op == AstExprBinary::Or)
{
auto [leftType, leftConnective] = check(scope, binary->left, expectedType);
ScopePtr rightScope = childScope(binary->right, scope);
applyRefinements(rightScope, binary->right->location, connectiveArena.negation(leftConnective));
auto [rightType, rightConnective] = check(rightScope, binary->right, expectedType);
return {leftType, rightType, connectiveArena.disjunction(leftConnective, rightConnective)};
}
else if (auto typeguard = matchTypeGuard(binary))
{
TypeId leftType = check(scope, binary->left).ty;
TypeId rightType = check(scope, binary->right).ty;
std::optional<DefId> def = dfg->getDef(typeguard->target);
if (!def)
return {leftType, rightType, nullptr};
TypeId discriminantTy = singletonTypes->neverType;
if (typeguard->type == "nil")
discriminantTy = singletonTypes->nilType;
else if (typeguard->type == "string")
discriminantTy = singletonTypes->stringType;
else if (typeguard->type == "number")
discriminantTy = singletonTypes->numberType;
else if (typeguard->type == "boolean")
discriminantTy = singletonTypes->threadType;
else if (typeguard->type == "table")
discriminantTy = singletonTypes->neverType; // TODO: replace with top table type
else if (typeguard->type == "function")
discriminantTy = singletonTypes->functionType;
else if (typeguard->type == "userdata")
{
// For now, we don't really care about being accurate with userdata if the typeguard was using typeof
discriminantTy = singletonTypes->neverType; // TODO: replace with top class type
}
else if (!typeguard->isTypeof && typeguard->type == "vector")
discriminantTy = singletonTypes->neverType; // TODO: figure out a way to deal with this quirky type
else if (!typeguard->isTypeof)
discriminantTy = singletonTypes->neverType;
else if (auto typeFun = globalScope->lookupType(typeguard->type); typeFun && typeFun->typeParams.empty() && typeFun->typePackParams.empty())
{
TypeId ty = follow(typeFun->type);
// We're only interested in the root class of any classes.
if (auto ctv = get<ClassTypeVar>(ty); !ctv || !ctv->parent)
discriminantTy = ty;
}
ConnectiveId proposition = connectiveArena.proposition(*def, discriminantTy);
if (binary->op == AstExprBinary::CompareEq)
return {leftType, rightType, proposition};
else if (binary->op == AstExprBinary::CompareNe)
return {leftType, rightType, connectiveArena.negation(proposition)};
else
ice->ice("matchTypeGuard should only return a Some under `==` or `~=`!");
}
else if (binary->op == AstExprBinary::CompareEq || binary->op == AstExprBinary::CompareNe)
{
TypeId leftType = check(scope, binary->left, expectedType, true).ty;
TypeId rightType = check(scope, binary->right, expectedType, true).ty;
ConnectiveId leftConnective = nullptr;
if (auto def = dfg->getDef(binary->left))
leftConnective = connectiveArena.proposition(*def, rightType);
ConnectiveId rightConnective = nullptr;
if (auto def = dfg->getDef(binary->right))
rightConnective = connectiveArena.proposition(*def, leftType);
if (binary->op == AstExprBinary::CompareNe)
{
leftConnective = connectiveArena.negation(leftConnective);
rightConnective = connectiveArena.negation(rightConnective);
}
return {leftType, rightType, connectiveArena.equivalence(leftConnective, rightConnective)};
}
else
{
TypeId leftType = check(scope, binary->left, expectedType).ty;
TypeId rightType = check(scope, binary->right, expectedType).ty;
return {leftType, rightType, nullptr};
}
}
TypePackId ConstraintGraphBuilder::checkLValues(const ScopePtr& scope, AstArray<AstExpr*> exprs)
{
std::vector<TypeId> types;
@ -1841,9 +2108,13 @@ std::vector<std::pair<Name, GenericTypePackDefinition>> ConstraintGraphBuilder::
Inference ConstraintGraphBuilder::flattenPack(const ScopePtr& scope, Location location, InferencePack pack)
{
auto [tp] = pack;
const auto& [tp, connectives] = pack;
ConnectiveId connective = nullptr;
if (!connectives.empty())
connective = connectives[0];
if (auto f = first(tp))
return Inference{*f};
return Inference{*f, connective};
TypeId typeResult = freshType(scope);
TypePack onePack{{typeResult}, freshTypePack(scope)};
@ -1851,7 +2122,7 @@ Inference ConstraintGraphBuilder::flattenPack(const ScopePtr& scope, Location lo
addConstraint(scope, location, PackSubtypeConstraint{tp, oneTypePack});
return Inference{typeResult};
return Inference{typeResult, connective};
}
void ConstraintGraphBuilder::reportError(Location location, TypeErrorData err)
@ -1897,19 +2168,14 @@ void ConstraintGraphBuilder::prepopulateGlobalScope(const ScopePtr& globalScope,
program->visit(&gp);
}
void collectConstraints(std::vector<NotNull<Constraint>>& result, NotNull<Scope> scope)
{
for (const auto& c : scope->constraints)
result.push_back(NotNull{c.get()});
for (NotNull<Scope> child : scope->children)
collectConstraints(result, child);
}
std::vector<NotNull<Constraint>> collectConstraints(NotNull<Scope> rootScope)
std::vector<NotNull<Constraint>> borrowConstraints(const std::vector<ConstraintPtr>& constraints)
{
std::vector<NotNull<Constraint>> result;
collectConstraints(result, rootScope);
result.reserve(constraints.size());
for (const auto& c : constraints)
result.emplace_back(c.get());
return result;
}

View file

@ -18,7 +18,6 @@
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false);
LUAU_FASTFLAG(LuauFixNameMaps)
namespace Luau
{
@ -27,34 +26,14 @@ namespace Luau
{
for (const auto& [k, v] : scope->bindings)
{
if (FFlag::LuauFixNameMaps)
{
auto d = toString(v.typeId, opts);
printf("\t%s : %s\n", k.c_str(), d.c_str());
}
else
{
auto d = toStringDetailed(v.typeId, opts);
opts.DEPRECATED_nameMap = d.DEPRECATED_nameMap;
printf("\t%s : %s\n", k.c_str(), d.name.c_str());
}
auto d = toString(v.typeId, opts);
printf("\t%s : %s\n", k.c_str(), d.c_str());
}
for (NotNull<Scope> child : scope->children)
dumpBindings(child, opts);
}
static void dumpConstraints(NotNull<Scope> scope, ToStringOptions& opts)
{
for (const ConstraintPtr& c : scope->constraints)
{
printf("\t%s\n", toString(*c, opts).c_str());
}
for (NotNull<Scope> child : scope->children)
dumpConstraints(child, opts);
}
static std::pair<std::vector<TypeId>, std::vector<TypePackId>> saturateArguments(TypeArena* arena, NotNull<SingletonTypes> singletonTypes,
const TypeFun& fn, const std::vector<TypeId>& rawTypeArguments, const std::vector<TypePackId>& rawPackArguments)
{
@ -219,12 +198,6 @@ size_t HashInstantiationSignature::operator()(const InstantiationSignature& sign
return hash;
}
void dump(NotNull<Scope> rootScope, ToStringOptions& opts)
{
printf("constraints:\n");
dumpConstraints(rootScope, opts);
}
void dump(ConstraintSolver* cs, ToStringOptions& opts)
{
printf("constraints:\n");
@ -248,17 +221,17 @@ void dump(ConstraintSolver* cs, ToStringOptions& opts)
if (auto fcc = get<FunctionCallConstraint>(*c))
{
for (NotNull<const Constraint> inner : fcc->innerConstraints)
printf("\t\t\t%s\n", toString(*inner, opts).c_str());
printf("\t ->\t\t%s\n", toString(*inner, opts).c_str());
}
}
}
ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, ModuleName moduleName,
NotNull<ModuleResolver> moduleResolver, std::vector<RequireCycle> requireCycles, DcrLogger* logger)
ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, std::vector<NotNull<Constraint>> constraints,
ModuleName moduleName, NotNull<ModuleResolver> moduleResolver, std::vector<RequireCycle> requireCycles, DcrLogger* logger)
: arena(normalizer->arena)
, singletonTypes(normalizer->singletonTypes)
, normalizer(normalizer)
, constraints(collectConstraints(rootScope))
, constraints(std::move(constraints))
, rootScope(rootScope)
, currentModuleName(std::move(moduleName))
, moduleResolver(moduleResolver)
@ -267,7 +240,7 @@ ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope
{
opts.exhaustive = true;
for (NotNull<Constraint> c : constraints)
for (NotNull<Constraint> c : this->constraints)
{
unsolvedConstraints.push_back(c);
@ -310,6 +283,8 @@ void ConstraintSolver::run()
{
printf("Starting solver\n");
dump(this, opts);
printf("Bindings:\n");
dumpBindings(rootScope, opts);
}
if (FFlag::DebugLuauLogSolverToJson)
@ -440,8 +415,8 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
success = tryDispatch(*fcc, constraint);
else if (auto hpc = get<HasPropConstraint>(*constraint))
success = tryDispatch(*hpc, constraint);
else if (auto rc = get<RefinementConstraint>(*constraint))
success = tryDispatch(*rc, constraint);
else if (auto sottc = get<SingletonOrTopTypeConstraint>(*constraint))
success = tryDispatch(*sottc, constraint);
else
LUAU_ASSERT(false);
@ -633,13 +608,6 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
return true;
}
// For or expressions, the LHS will never have nil as a possible output.
// Consider:
// local foo = nil or 2
// `foo` will always be 2.
if (c.op == AstExprBinary::Op::Or)
leftType = stripNil(singletonTypes, *arena, leftType);
// Metatables go first, even if there is primitive behavior.
if (auto it = kBinaryOpMetamethods.find(c.op); it != kBinaryOpMetamethods.end())
{
@ -769,15 +737,47 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
// And evalutes to a boolean if the LHS is falsey, and the RHS type if LHS is
// truthy.
case AstExprBinary::Op::And:
asMutable(resultType)->ty.emplace<BoundTypeVar>(unionOfTypes(rightType, singletonTypes->booleanType, constraint->scope, false));
{
TypeId leftFilteredTy = arena->addType(IntersectionTypeVar{{singletonTypes->falsyType, leftType}});
// TODO: normaliztion here should be replaced by a more limited 'simplification'
const NormalizedType* normalized = normalizer->normalize(arena->addType(UnionTypeVar{{leftFilteredTy, rightType}}));
if (!normalized)
{
reportError(CodeTooComplex{}, constraint->location);
asMutable(resultType)->ty.emplace<BoundTypeVar>(errorRecoveryType());
}
else
{
asMutable(resultType)->ty.emplace<BoundTypeVar>(normalizer->typeFromNormal(*normalized));
}
unblock(resultType);
return true;
}
// Or evaluates to the LHS type if the LHS is truthy, and the RHS type if
// LHS is falsey.
case AstExprBinary::Op::Or:
asMutable(resultType)->ty.emplace<BoundTypeVar>(unionOfTypes(rightType, leftType, constraint->scope, true));
{
TypeId rightFilteredTy = arena->addType(IntersectionTypeVar{{singletonTypes->truthyType, leftType}});
// TODO: normaliztion here should be replaced by a more limited 'simplification'
const NormalizedType* normalized = normalizer->normalize(arena->addType(UnionTypeVar{{rightFilteredTy, rightType}}));
if (!normalized)
{
reportError(CodeTooComplex{}, constraint->location);
asMutable(resultType)->ty.emplace<BoundTypeVar>(errorRecoveryType());
}
else
{
asMutable(resultType)->ty.emplace<BoundTypeVar>(normalizer->typeFromNormal(*normalized));
}
unblock(resultType);
return true;
}
default:
iceReporter.ice("Unhandled AstExprBinary::Op for binary operation", constraint->location);
break;
@ -1148,6 +1148,17 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
// Alter the inner constraints.
LUAU_ASSERT(c.innerConstraints.size() == 2);
// Anything that is blocked on this constraint must also be blocked on our inner constraints
auto blockedIt = blocked.find(constraint.get());
if (blockedIt != blocked.end())
{
for (const auto& ic : c.innerConstraints)
{
for (const auto& blockedConstraint : blockedIt->second)
block(ic, blockedConstraint);
}
}
asMutable(*c.innerConstraints.at(0)).c = InstantiationConstraint{instantiatedType, *callMm};
asMutable(*c.innerConstraints.at(1)).c = SubtypeConstraint{inferredFnType, instantiatedType};
@ -1180,6 +1191,17 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
}
else
{
// Anything that is blocked on this constraint must also be blocked on our inner constraints
auto blockedIt = blocked.find(constraint.get());
if (blockedIt != blocked.end())
{
for (const auto& ic : c.innerConstraints)
{
for (const auto& blockedConstraint : blockedIt->second)
block(ic, blockedConstraint);
}
}
unsolvedConstraints.insert(end(unsolvedConstraints), begin(c.innerConstraints), end(c.innerConstraints));
asMutable(c.result)->ty.emplace<FreeTypePack>(constraint->scope);
}
@ -1274,25 +1296,18 @@ bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Con
return true;
}
bool ConstraintSolver::tryDispatch(const RefinementConstraint& c, NotNull<const Constraint> constraint)
bool ConstraintSolver::tryDispatch(const SingletonOrTopTypeConstraint& c, NotNull<const Constraint> constraint)
{
// TODO: Figure out exact details on when refinements need to be blocked.
// It's possible that it never needs to be, since we can just use intersection types with the discriminant type?
if (isBlocked(c.discriminantType))
return false;
if (!constraint->scope->parent)
iceReporter.ice("No parent scope");
TypeId followed = follow(c.discriminantType);
std::optional<TypeId> previousTy = constraint->scope->parent->lookup(c.def);
if (!previousTy)
iceReporter.ice("No previous type");
std::optional<TypeId> useTy = constraint->scope->lookup(c.def);
if (!useTy)
iceReporter.ice("The def is not bound to a type");
TypeId resultTy = follow(*useTy);
std::vector<TypeId> parts{*previousTy, c.discriminantType};
asMutable(resultTy)->ty.emplace<IntersectionTypeVar>(std::move(parts));
// `nil` is a singleton type too! There's only one value of type `nil`.
if (get<SingletonTypeVar>(followed) || isNil(followed))
*asMutable(c.resultType) = NegationTypeVar{c.discriminantType};
else
*asMutable(c.resultType) = BoundTypeVar{singletonTypes->unknownType};
return true;
}

View file

@ -2,6 +2,7 @@
#include "Luau/BuiltinDefinitions.h"
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAG(LuauOptionalNextKey)
namespace Luau
{
@ -13,16 +14,16 @@ declare bit32: {
bor: (...number) -> number,
bxor: (...number) -> number,
btest: (number, ...number) -> boolean,
rrotate: (number, number) -> number,
lrotate: (number, number) -> number,
lshift: (number, number) -> number,
arshift: (number, number) -> number,
rshift: (number, number) -> number,
bnot: (number) -> number,
extract: (number, number, number?) -> number,
replace: (number, number, number, number?) -> number,
countlz: (number) -> number,
countrz: (number) -> number,
rrotate: (x: number, disp: number) -> number,
lrotate: (x: number, disp: number) -> number,
lshift: (x: number, disp: number) -> number,
arshift: (x: number, disp: number) -> number,
rshift: (x: number, disp: number) -> number,
bnot: (x: number) -> number,
extract: (n: number, field: number, width: number?) -> number,
replace: (n: number, v: number, field: number, width: number?) -> number,
countlz: (n: number) -> number,
countrz: (n: number) -> number,
}
declare math: {
@ -93,9 +94,9 @@ type DateTypeResult = {
}
declare os: {
time: (DateTypeArg?) -> number,
date: (string?, number?) -> DateTypeResult | string,
difftime: (DateTypeResult | number, DateTypeResult | number) -> number,
time: (time: DateTypeArg?) -> number,
date: (formatString: string?, time: number?) -> DateTypeResult | string,
difftime: (t2: DateTypeResult | number, t1: DateTypeResult | number) -> number,
clock: () -> number,
}
@ -126,7 +127,7 @@ declare function rawlen<K, V>(obj: {[K]: V} | string): number
declare function setfenv<T..., R...>(target: number | (T...) -> R..., env: {[string]: any}): ((T...) -> R...)?
declare function ipairs<V>(tab: {V}): (({V}, number) -> (number, V), {V}, number)
-- TODO: place ipairs definition here with removal of FFlagLuauOptionalNextKey
declare function pcall<A..., R...>(f: (A...) -> R..., ...: A...): (boolean, R...)
@ -145,51 +146,51 @@ declare function loadstring<A...>(src: string, chunkname: string?): (((A...) ->
declare function newproxy(mt: boolean?): any
declare coroutine: {
create: <A..., R...>((A...) -> R...) -> thread,
resume: <A..., R...>(thread, A...) -> (boolean, R...),
create: <A..., R...>(f: (A...) -> R...) -> thread,
resume: <A..., R...>(co: thread, A...) -> (boolean, R...),
running: () -> thread,
status: (thread) -> "dead" | "running" | "normal" | "suspended",
status: (co: thread) -> "dead" | "running" | "normal" | "suspended",
-- FIXME: This technically returns a function, but we can't represent this yet.
wrap: <A..., R...>((A...) -> R...) -> any,
wrap: <A..., R...>(f: (A...) -> R...) -> any,
yield: <A..., R...>(A...) -> R...,
isyieldable: () -> boolean,
close: (thread) -> (boolean, any)
close: (co: thread) -> (boolean, any)
}
declare table: {
concat: <V>({V}, string?, number?, number?) -> string,
insert: (<V>({V}, V) -> ()) & (<V>({V}, number, V) -> ()),
maxn: <V>({V}) -> number,
remove: <V>({V}, number?) -> V?,
sort: <V>({V}, ((V, V) -> boolean)?) -> (),
create: <V>(number, V?) -> {V},
find: <V>({V}, V, number?) -> number?,
concat: <V>(t: {V}, sep: string?, i: number?, j: number?) -> string,
insert: (<V>(t: {V}, value: V) -> ()) & (<V>(t: {V}, pos: number, value: V) -> ()),
maxn: <V>(t: {V}) -> number,
remove: <V>(t: {V}, number?) -> V?,
sort: <V>(t: {V}, comp: ((V, V) -> boolean)?) -> (),
create: <V>(count: number, value: V?) -> {V},
find: <V>(haystack: {V}, needle: V, init: number?) -> number?,
unpack: <V>({V}, number?, number?) -> ...V,
unpack: <V>(list: {V}, i: number?, j: number?) -> ...V,
pack: <V>(...V) -> { n: number, [number]: V },
getn: <V>({V}) -> number,
foreach: <K, V>({[K]: V}, (K, V) -> ()) -> (),
getn: <V>(t: {V}) -> number,
foreach: <K, V>(t: {[K]: V}, f: (K, V) -> ()) -> (),
foreachi: <V>({V}, (number, V) -> ()) -> (),
move: <V>({V}, number, number, number, {V}?) -> {V},
clear: <K, V>({[K]: V}) -> (),
move: <V>(src: {V}, a: number, b: number, t: number, dst: {V}?) -> {V},
clear: <K, V>(table: {[K]: V}) -> (),
isfrozen: <K, V>({[K]: V}) -> boolean,
isfrozen: <K, V>(t: {[K]: V}) -> boolean,
}
declare debug: {
info: (<R...>(thread, number, string) -> R...) & (<R...>(number, string) -> R...) & (<A..., R1..., R2...>((A...) -> R1..., string) -> R2...),
traceback: ((string?, number?) -> string) & ((thread, string?, number?) -> string),
info: (<R...>(thread: thread, level: number, options: string) -> R...) & (<R...>(level: number, options: string) -> R...) & (<A..., R1..., R2...>(func: (A...) -> R1..., options: string) -> R2...),
traceback: ((message: string?, level: number?) -> string) & ((thread: thread, message: string?, level: number?) -> string),
}
declare utf8: {
char: (...number) -> string,
charpattern: string,
codes: (string) -> ((string, number) -> (number, number), string, number),
codepoint: (string, number?, number?) -> ...number,
len: (string, number?, number?) -> (number?, number?),
offset: (string, number?, number?) -> number,
codes: (str: string) -> ((string, number) -> (number, number), string, number),
codepoint: (str: string, i: number?, j: number?) -> ...number,
len: (s: string, i: number?, j: number?) -> (number?, number?),
offset: (s: string, n: number?, i: number?) -> number,
}
-- Cannot use `typeof` here because it will produce a polytype when we expect a monotype.
@ -207,6 +208,11 @@ std::string getBuiltinDefinitionSource()
else
result += "declare function error<T>(message: T, level: number?)\n";
if (FFlag::LuauOptionalNextKey)
result += "declare function ipairs<V>(tab: {V}): (({V}, number) -> (number?, V), {V}, number)\n";
else
result += "declare function ipairs<V>(tab: {V}): (({V}, number) -> (number, V), {V}, number)\n";
return result;
}

View file

@ -955,7 +955,8 @@ ModulePtr Frontend::check(
cgb.visit(sourceModule.root);
result->errors = std::move(cgb.errors);
ConstraintSolver cs{NotNull{&normalizer}, NotNull(cgb.rootScope), sourceModule.name, NotNull(&moduleResolver), requireCycles, logger.get()};
ConstraintSolver cs{NotNull{&normalizer}, NotNull(cgb.rootScope), borrowConstraints(cgb.constraints), sourceModule.name, NotNull(&moduleResolver),
requireCycles, logger.get()};
if (options.randomizeConstraintResolutionSeed)
cs.randomize(*options.randomizeConstraintResolutionSeed);

View file

@ -14,9 +14,7 @@
#include <algorithm>
LUAU_FASTFLAG(LuauAnyifyModuleReturnGenerics)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauForceExportSurfacesToBeNormal, false);
LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess, false);
LUAU_FASTFLAG(LuauSubstitutionReentrant);
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution);
@ -222,18 +220,6 @@ void Module::clonePublicInterface(NotNull<SingletonTypes> singletonTypes, Intern
}
}
if (!FFlag::LuauAnyifyModuleReturnGenerics)
{
for (TypeId ty : returnType)
{
if (get<GenericTypeVar>(follow(ty)))
{
auto t = asMutable(ty);
t->ty = AnyTypeVar{};
}
}
}
for (auto& [name, ty] : declaredGlobals)
{
if (FFlag::LuauClonePublicInterfaceLess)

View file

@ -7,9 +7,9 @@
#include "Luau/Clone.h"
#include "Luau/Common.h"
#include "Luau/RecursionCounter.h"
#include "Luau/TypeVar.h"
#include "Luau/Unifier.h"
#include "Luau/VisitTypeVar.h"
LUAU_FASTFLAGVARIABLE(DebugLuauCopyBeforeNormalizing, false)
LUAU_FASTFLAGVARIABLE(DebugLuauCheckNormalizeInvariant, false)
@ -20,6 +20,7 @@ LUAU_FASTINTVARIABLE(LuauNormalizeCacheLimit, 100000);
LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineTableFix, false);
LUAU_FASTFLAGVARIABLE(LuauTypeNormalization2, false);
LUAU_FASTFLAGVARIABLE(LuauNegatedStringSingletons, false);
LUAU_FASTFLAGVARIABLE(LuauNegatedFunctionTypes, false);
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauOverloadedFunctionSubtypingPerf);
@ -206,6 +207,28 @@ bool isSubtype(const NormalizedStringType& subStr, const NormalizedStringType& s
return true;
}
NormalizedFunctionType::NormalizedFunctionType()
: parts(FFlag::LuauNegatedFunctionTypes ? std::optional<TypeIds>{TypeIds{}} : std::nullopt)
{
}
void NormalizedFunctionType::resetToTop()
{
isTop = true;
parts.emplace();
}
void NormalizedFunctionType::resetToNever()
{
isTop = false;
parts.emplace();
}
bool NormalizedFunctionType::isNever() const
{
return !isTop && (!parts || parts->empty());
}
NormalizedType::NormalizedType(NotNull<SingletonTypes> singletonTypes)
: tops(singletonTypes->neverType)
, booleans(singletonTypes->neverType)
@ -220,8 +243,8 @@ NormalizedType::NormalizedType(NotNull<SingletonTypes> singletonTypes)
static bool isInhabited(const NormalizedType& norm)
{
return !get<NeverTypeVar>(norm.tops) || !get<NeverTypeVar>(norm.booleans) || !norm.classes.empty() || !get<NeverTypeVar>(norm.errors) ||
!get<NeverTypeVar>(norm.nils) || !get<NeverTypeVar>(norm.numbers) || !norm.strings.isNever() ||
!get<NeverTypeVar>(norm.threads) || norm.functions || !norm.tables.empty() || !norm.tyvars.empty();
!get<NeverTypeVar>(norm.nils) || !get<NeverTypeVar>(norm.numbers) || !norm.strings.isNever() || !get<NeverTypeVar>(norm.threads) ||
!norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty();
}
static int tyvarIndex(TypeId ty)
@ -317,10 +340,14 @@ static bool isNormalizedThread(TypeId ty)
static bool areNormalizedFunctions(const NormalizedFunctionType& tys)
{
if (tys)
for (TypeId ty : *tys)
if (tys.parts)
{
for (TypeId ty : *tys.parts)
{
if (!get<FunctionTypeVar>(ty) && !get<ErrorTypeVar>(ty))
return false;
}
}
return true;
}
@ -420,7 +447,7 @@ void Normalizer::clearNormal(NormalizedType& norm)
norm.strings.resetToNever();
norm.threads = singletonTypes->neverType;
norm.tables.clear();
norm.functions = std::nullopt;
norm.functions.resetToNever();
norm.tyvars.clear();
}
@ -809,20 +836,28 @@ std::optional<TypeId> Normalizer::unionOfFunctions(TypeId here, TypeId there)
void Normalizer::unionFunctions(NormalizedFunctionType& heres, const NormalizedFunctionType& theres)
{
if (!theres)
if (FFlag::LuauNegatedFunctionTypes)
{
if (heres.isTop)
return;
if (theres.isTop)
heres.resetToTop();
}
if (theres.isNever())
return;
TypeIds tmps;
if (!heres)
if (heres.isNever())
{
tmps.insert(theres->begin(), theres->end());
heres = std::move(tmps);
tmps.insert(theres.parts->begin(), theres.parts->end());
heres.parts = std::move(tmps);
return;
}
for (TypeId here : *heres)
for (TypeId there : *theres)
for (TypeId here : *heres.parts)
for (TypeId there : *theres.parts)
{
if (std::optional<TypeId> fun = unionOfFunctions(here, there))
tmps.insert(*fun);
@ -830,28 +865,28 @@ void Normalizer::unionFunctions(NormalizedFunctionType& heres, const NormalizedF
tmps.insert(singletonTypes->errorRecoveryType(there));
}
heres = std::move(tmps);
heres.parts = std::move(tmps);
}
void Normalizer::unionFunctionsWithFunction(NormalizedFunctionType& heres, TypeId there)
{
if (!heres)
if (heres.isNever())
{
TypeIds tmps;
tmps.insert(there);
heres = std::move(tmps);
heres.parts = std::move(tmps);
return;
}
TypeIds tmps;
for (TypeId here : *heres)
for (TypeId here : *heres.parts)
{
if (std::optional<TypeId> fun = unionOfFunctions(here, there))
tmps.insert(*fun);
else
tmps.insert(singletonTypes->errorRecoveryType(there));
}
heres = std::move(tmps);
heres.parts = std::move(tmps);
}
void Normalizer::unionTablesWithTable(TypeIds& heres, TypeId there)
@ -1004,6 +1039,11 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
here.strings.resetToString();
else if (ptv->type == PrimitiveTypeVar::Thread)
here.threads = there;
else if (ptv->type == PrimitiveTypeVar::Function)
{
LUAU_ASSERT(FFlag::LuauNegatedFunctionTypes);
here.functions.resetToTop();
}
else
LUAU_ASSERT(!"Unreachable");
}
@ -1036,8 +1076,11 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
else if (const NegationTypeVar* ntv = get<NegationTypeVar>(there))
{
const NormalizedType* thereNormal = normalize(ntv->ty);
NormalizedType tn = negateNormal(*thereNormal);
if (!unionNormals(here, tn))
std::optional<NormalizedType> tn = negateNormal(*thereNormal);
if (!tn)
return false;
if (!unionNormals(here, *tn))
return false;
}
else
@ -1053,7 +1096,7 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
// ------- Negations
NormalizedType Normalizer::negateNormal(const NormalizedType& here)
std::optional<NormalizedType> Normalizer::negateNormal(const NormalizedType& here)
{
NormalizedType result{singletonTypes};
if (!get<NeverTypeVar>(here.tops))
@ -1092,10 +1135,24 @@ NormalizedType Normalizer::negateNormal(const NormalizedType& here)
result.threads = get<NeverTypeVar>(here.threads) ? singletonTypes->threadType : singletonTypes->neverType;
/*
* Things get weird and so, so complicated if we allow negations of
* arbitrary function types. Ordinary code can never form these kinds of
* types, so we decline to negate them.
*/
if (FFlag::LuauNegatedFunctionTypes)
{
if (here.functions.isNever())
result.functions.resetToTop();
else if (here.functions.isTop)
result.functions.resetToNever();
else
return std::nullopt;
}
// TODO: negating tables
// TODO: negating functions
// TODO: negating tyvars?
return result;
}
@ -1142,21 +1199,25 @@ void Normalizer::subtractPrimitive(NormalizedType& here, TypeId ty)
LUAU_ASSERT(ptv);
switch (ptv->type)
{
case PrimitiveTypeVar::NilType:
here.nils = singletonTypes->neverType;
break;
case PrimitiveTypeVar::Boolean:
here.booleans = singletonTypes->neverType;
break;
case PrimitiveTypeVar::Number:
here.numbers = singletonTypes->neverType;
break;
case PrimitiveTypeVar::String:
here.strings.resetToNever();
break;
case PrimitiveTypeVar::Thread:
here.threads = singletonTypes->neverType;
break;
case PrimitiveTypeVar::NilType:
here.nils = singletonTypes->neverType;
break;
case PrimitiveTypeVar::Boolean:
here.booleans = singletonTypes->neverType;
break;
case PrimitiveTypeVar::Number:
here.numbers = singletonTypes->neverType;
break;
case PrimitiveTypeVar::String:
here.strings.resetToNever();
break;
case PrimitiveTypeVar::Thread:
here.threads = singletonTypes->neverType;
break;
case PrimitiveTypeVar::Function:
LUAU_ASSERT(FFlag::LuauNegatedStringSingletons);
here.functions.resetToNever();
break;
}
}
@ -1589,7 +1650,7 @@ std::optional<TypeId> Normalizer::intersectionOfFunctions(TypeId here, TypeId th
TypePackId argTypes;
TypePackId retTypes;
if (hftv->retTypes == tftv->retTypes)
{
std::optional<TypePackId> argTypesOpt = unionOfTypePacks(hftv->argTypes, tftv->argTypes);
@ -1598,7 +1659,7 @@ std::optional<TypeId> Normalizer::intersectionOfFunctions(TypeId here, TypeId th
argTypes = *argTypesOpt;
retTypes = hftv->retTypes;
}
else if (FFlag::LuauOverloadedFunctionSubtypingPerf && hftv->argTypes == tftv->argTypes)
else if (FFlag::LuauOverloadedFunctionSubtypingPerf && hftv->argTypes == tftv->argTypes)
{
std::optional<TypePackId> retTypesOpt = intersectionOfTypePacks(hftv->argTypes, tftv->argTypes);
if (!retTypesOpt)
@ -1738,18 +1799,20 @@ std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId th
void Normalizer::intersectFunctionsWithFunction(NormalizedFunctionType& heres, TypeId there)
{
if (!heres)
if (heres.isNever())
return;
for (auto it = heres->begin(); it != heres->end();)
heres.isTop = false;
for (auto it = heres.parts->begin(); it != heres.parts->end();)
{
TypeId here = *it;
if (get<ErrorTypeVar>(here))
it++;
else if (std::optional<TypeId> tmp = intersectionOfFunctions(here, there))
{
heres->erase(it);
heres->insert(*tmp);
heres.parts->erase(it);
heres.parts->insert(*tmp);
return;
}
else
@ -1757,27 +1820,27 @@ void Normalizer::intersectFunctionsWithFunction(NormalizedFunctionType& heres, T
}
TypeIds tmps;
for (TypeId here : *heres)
for (TypeId here : *heres.parts)
{
if (std::optional<TypeId> tmp = unionSaturatedFunctions(here, there))
tmps.insert(*tmp);
}
heres->insert(there);
heres->insert(tmps.begin(), tmps.end());
heres.parts->insert(there);
heres.parts->insert(tmps.begin(), tmps.end());
}
void Normalizer::intersectFunctions(NormalizedFunctionType& heres, const NormalizedFunctionType& theres)
{
if (!heres)
if (heres.isNever())
return;
else if (!theres)
else if (theres.isNever())
{
heres = std::nullopt;
heres.resetToNever();
return;
}
else
{
for (TypeId there : *theres)
for (TypeId there : *theres.parts)
intersectFunctionsWithFunction(heres, there);
}
}
@ -1935,6 +1998,7 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there)
TypeId nils = here.nils;
TypeId numbers = here.numbers;
NormalizedStringType strings = std::move(here.strings);
NormalizedFunctionType functions = std::move(here.functions);
TypeId threads = here.threads;
clearNormal(here);
@ -1949,6 +2013,11 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there)
here.strings = std::move(strings);
else if (ptv->type == PrimitiveTypeVar::Thread)
here.threads = threads;
else if (ptv->type == PrimitiveTypeVar::Function)
{
LUAU_ASSERT(FFlag::LuauNegatedFunctionTypes);
here.functions = std::move(functions);
}
else
LUAU_ASSERT(!"Unreachable");
}
@ -1981,8 +2050,10 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there)
for (TypeId part : itv->options)
{
const NormalizedType* normalPart = normalize(part);
NormalizedType negated = negateNormal(*normalPart);
intersectNormals(here, negated);
std::optional<NormalizedType> negated = negateNormal(*normalPart);
if (!negated)
return false;
intersectNormals(here, *negated);
}
}
else
@ -2016,14 +2087,16 @@ TypeId Normalizer::typeFromNormal(const NormalizedType& norm)
result.insert(result.end(), norm.classes.begin(), norm.classes.end());
if (!get<NeverTypeVar>(norm.errors))
result.push_back(norm.errors);
if (norm.functions)
if (FFlag::LuauNegatedFunctionTypes && norm.functions.isTop)
result.push_back(singletonTypes->functionType);
else if (!norm.functions.isNever())
{
if (norm.functions->size() == 1)
result.push_back(*norm.functions->begin());
if (norm.functions.parts->size() == 1)
result.push_back(*norm.functions.parts->begin());
else
{
std::vector<TypeId> parts;
parts.insert(parts.end(), norm.functions->begin(), norm.functions->end());
parts.insert(parts.end(), norm.functions.parts->begin(), norm.functions.parts->end());
result.push_back(arena->addType(IntersectionTypeVar{std::move(parts)}));
}
}
@ -2070,62 +2143,24 @@ TypeId Normalizer::typeFromNormal(const NormalizedType& norm)
return arena->addType(UnionTypeVar{std::move(result)});
}
namespace
{
struct Replacer
{
TypeArena* arena;
TypeId sourceType;
TypeId replacedType;
DenseHashMap<TypeId, TypeId> newTypes;
Replacer(TypeArena* arena, TypeId sourceType, TypeId replacedType)
: arena(arena)
, sourceType(sourceType)
, replacedType(replacedType)
, newTypes(nullptr)
{
}
TypeId smartClone(TypeId t)
{
t = follow(t);
TypeId* res = newTypes.find(t);
if (res)
return *res;
TypeId result = shallowClone(t, *arena, TxnLog::empty());
newTypes[t] = result;
newTypes[result] = result;
return result;
}
};
} // anonymous namespace
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
Normalizer normalizer{&arena, singletonTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
u.anyIsTop = anyIsTop;
u.tryUnify(subTy, superTy);
const bool ok = u.errors.empty() && u.log.empty();
return ok;
}
bool isSubtype(
TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
Normalizer normalizer{&arena, singletonTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
u.anyIsTop = anyIsTop;
u.tryUnify(subPack, superPack);
const bool ok = u.errors.empty() && u.log.empty();

View file

@ -10,12 +10,11 @@
#include <algorithm>
#include <stdexcept>
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauLvaluelessPath)
LUAU_FASTFLAGVARIABLE(LuauSpecialTypesAsterisked, false)
LUAU_FASTFLAGVARIABLE(LuauFixNameMaps, false)
LUAU_FASTFLAGVARIABLE(LuauUnseeArrayTtv, false)
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauFunctionReturnStringificationFixup, false)
LUAU_FASTFLAGVARIABLE(LuauUnseeArrayTtv, false)
/*
* Prefix generic typenames with gen-
@ -130,28 +129,15 @@ struct StringifierState
bool exhaustive;
StringifierState(ToStringOptions& opts, ToStringResult& result, const std::optional<ToStringNameMap>& DEPRECATED_nameMap)
StringifierState(ToStringOptions& opts, ToStringResult& result)
: opts(opts)
, result(result)
, exhaustive(opts.exhaustive)
{
if (!FFlag::LuauFixNameMaps && DEPRECATED_nameMap)
result.DEPRECATED_nameMap = *DEPRECATED_nameMap;
if (!FFlag::LuauFixNameMaps)
{
for (const auto& [_, v] : result.DEPRECATED_nameMap.typeVars)
usedNames.insert(v);
for (const auto& [_, v] : result.DEPRECATED_nameMap.typePacks)
usedNames.insert(v);
}
else
{
for (const auto& [_, v] : opts.nameMap.typeVars)
usedNames.insert(v);
for (const auto& [_, v] : opts.nameMap.typePacks)
usedNames.insert(v);
}
for (const auto& [_, v] : opts.nameMap.typeVars)
usedNames.insert(v);
for (const auto& [_, v] : opts.nameMap.typePacks)
usedNames.insert(v);
}
bool hasSeen(const void* tv)
@ -174,8 +160,8 @@ struct StringifierState
std::string getName(TypeId ty)
{
const size_t s = FFlag::LuauFixNameMaps ? opts.nameMap.typeVars.size() : result.DEPRECATED_nameMap.typeVars.size();
std::string& n = FFlag::LuauFixNameMaps ? opts.nameMap.typeVars[ty] : result.DEPRECATED_nameMap.typeVars[ty];
const size_t s = opts.nameMap.typeVars.size();
std::string& n = opts.nameMap.typeVars[ty];
if (!n.empty())
return n;
@ -197,8 +183,8 @@ struct StringifierState
std::string getName(TypePackId ty)
{
const size_t s = FFlag::LuauFixNameMaps ? opts.nameMap.typePacks.size() : result.DEPRECATED_nameMap.typePacks.size();
std::string& n = FFlag::LuauFixNameMaps ? opts.nameMap.typePacks[ty] : result.DEPRECATED_nameMap.typePacks[ty];
const size_t s = opts.nameMap.typePacks.size();
std::string& n = opts.nameMap.typePacks[ty];
if (!n.empty())
return n;
@ -225,6 +211,20 @@ struct StringifierState
result.name += s;
}
void emitLevel(Scope* scope)
{
size_t count = 0;
for (Scope* s = scope; s; s = s->parent.get())
++count;
emit(count);
emit("-");
char buffer[16];
uint32_t s = uint32_t(intptr_t(scope) & 0xFFFFFF);
snprintf(buffer, sizeof(buffer), "0x%x", s);
emit(buffer);
}
void emit(TypeLevel level)
{
emit(std::to_string(level.level));
@ -296,10 +296,7 @@ struct TypeVarStringifier
if (tv->ty.valueless_by_exception())
{
state.result.error = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("* VALUELESS BY EXCEPTION *");
else
state.emit("< VALUELESS BY EXCEPTION >");
state.emit("* VALUELESS BY EXCEPTION *");
return;
}
@ -377,7 +374,10 @@ struct TypeVarStringifier
if (FFlag::DebugLuauVerboseTypeNames)
{
state.emit("-");
state.emit(ftv.level);
if (FFlag::DebugLuauDeferredConstraintResolution)
state.emitLevel(ftv.scope);
else
state.emit(ftv.level);
}
}
@ -391,14 +391,20 @@ struct TypeVarStringifier
if (gtv.explicitName)
{
state.usedNames.insert(gtv.name);
if (FFlag::LuauFixNameMaps)
state.opts.nameMap.typeVars[ty] = gtv.name;
else
state.result.DEPRECATED_nameMap.typeVars[ty] = gtv.name;
state.opts.nameMap.typeVars[ty] = gtv.name;
state.emit(gtv.name);
}
else
state.emit(state.getName(ty));
if (FFlag::DebugLuauVerboseTypeNames)
{
state.emit("-");
if (FFlag::DebugLuauDeferredConstraintResolution)
state.emitLevel(gtv.scope);
else
state.emit(gtv.level);
}
}
void operator()(TypeId, const BlockedTypeVar& btv)
@ -434,6 +440,9 @@ struct TypeVarStringifier
case PrimitiveTypeVar::Thread:
state.emit("thread");
return;
case PrimitiveTypeVar::Function:
state.emit("function");
return;
default:
LUAU_ASSERT(!"Unknown primitive type");
throwRuntimeError("Unknown primitive type " + std::to_string(ptv.type));
@ -462,10 +471,7 @@ struct TypeVarStringifier
if (state.hasSeen(&ftv))
{
state.result.cycle = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*CYCLE*");
else
state.emit("<CYCLE>");
state.emit("*CYCLE*");
return;
}
@ -573,10 +579,7 @@ struct TypeVarStringifier
if (state.hasSeen(&ttv))
{
state.result.cycle = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*CYCLE*");
else
state.emit("<CYCLE>");
state.emit("*CYCLE*");
return;
}
@ -710,10 +713,7 @@ struct TypeVarStringifier
if (state.hasSeen(&uv))
{
state.result.cycle = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*CYCLE*");
else
state.emit("<CYCLE>");
state.emit("*CYCLE*");
return;
}
@ -780,10 +780,7 @@ struct TypeVarStringifier
if (state.hasSeen(&uv))
{
state.result.cycle = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*CYCLE*");
else
state.emit("<CYCLE>");
state.emit("*CYCLE*");
return;
}
@ -828,10 +825,7 @@ struct TypeVarStringifier
void operator()(TypeId, const ErrorTypeVar& tv)
{
state.result.error = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit(FFlag::LuauUnknownAndNeverType ? "*error-type*" : "*unknown*");
else
state.emit(FFlag::LuauUnknownAndNeverType ? "<error-type>" : "*unknown*");
state.emit(FFlag::LuauUnknownAndNeverType ? "*error-type*" : "*unknown*");
}
void operator()(TypeId, const LazyTypeVar& ltv)
@ -850,11 +844,6 @@ struct TypeVarStringifier
state.emit("never");
}
void operator()(TypeId ty, const UseTypeVar&)
{
stringify(follow(ty));
}
void operator()(TypeId, const NegationTypeVar& ntv)
{
state.emit("~");
@ -907,10 +896,7 @@ struct TypePackStringifier
if (tp->ty.valueless_by_exception())
{
state.result.error = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("* VALUELESS TP BY EXCEPTION *");
else
state.emit("< VALUELESS TP BY EXCEPTION >");
state.emit("* VALUELESS TP BY EXCEPTION *");
return;
}
@ -934,10 +920,7 @@ struct TypePackStringifier
if (state.hasSeen(&tp))
{
state.result.cycle = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*CYCLETP*");
else
state.emit("<CYCLETP>");
state.emit("*CYCLETP*");
return;
}
@ -982,10 +965,7 @@ struct TypePackStringifier
void operator()(TypePackId, const Unifiable::Error& error)
{
state.result.error = true;
if (FFlag::LuauSpecialTypesAsterisked)
state.emit(FFlag::LuauUnknownAndNeverType ? "*error-type*" : "*unknown*");
else
state.emit(FFlag::LuauUnknownAndNeverType ? "<error-type>" : "*unknown*");
state.emit(FFlag::LuauUnknownAndNeverType ? "*error-type*" : "*unknown*");
}
void operator()(TypePackId, const VariadicTypePack& pack)
@ -993,10 +973,7 @@ struct TypePackStringifier
state.emit("...");
if (FFlag::DebugLuauVerboseTypeNames && pack.hidden)
{
if (FFlag::LuauSpecialTypesAsterisked)
state.emit("*hidden*");
else
state.emit("<hidden>");
state.emit("*hidden*");
}
stringify(pack.ty);
}
@ -1008,10 +985,7 @@ struct TypePackStringifier
if (pack.explicitName)
{
state.usedNames.insert(pack.name);
if (FFlag::LuauFixNameMaps)
state.opts.nameMap.typePacks[tp] = pack.name;
else
state.result.DEPRECATED_nameMap.typePacks[tp] = pack.name;
state.opts.nameMap.typePacks[tp] = pack.name;
state.emit(pack.name);
}
else
@ -1031,7 +1005,10 @@ struct TypePackStringifier
if (FFlag::DebugLuauVerboseTypeNames)
{
state.emit("-");
state.emit(pack.level);
if (FFlag::DebugLuauDeferredConstraintResolution)
state.emitLevel(pack.scope);
else
state.emit(pack.level);
}
state.emit("...");
@ -1111,8 +1088,7 @@ ToStringResult toStringDetailed(TypeId ty, ToStringOptions& opts)
ToStringResult result;
StringifierState state =
FFlag::LuauFixNameMaps ? StringifierState{opts, result, opts.nameMap} : StringifierState{opts, result, opts.DEPRECATED_nameMap};
StringifierState state{opts, result};
std::set<TypeId> cycles;
std::set<TypePackId> cycleTPs;
@ -1204,10 +1180,7 @@ ToStringResult toStringDetailed(TypeId ty, ToStringOptions& opts)
{
result.truncated = true;
if (FFlag::LuauSpecialTypesAsterisked)
result.name += "... *TRUNCATED*";
else
result.name += "... <TRUNCATED>";
result.name += "... *TRUNCATED*";
}
return result;
@ -1222,8 +1195,7 @@ ToStringResult toStringDetailed(TypePackId tp, ToStringOptions& opts)
* 4. Print out the root of the type using the same algorithm as step 3.
*/
ToStringResult result;
StringifierState state =
FFlag::LuauFixNameMaps ? StringifierState{opts, result, opts.nameMap} : StringifierState{opts, result, opts.DEPRECATED_nameMap};
StringifierState state{opts, result};
std::set<TypeId> cycles;
std::set<TypePackId> cycleTPs;
@ -1280,10 +1252,7 @@ ToStringResult toStringDetailed(TypePackId tp, ToStringOptions& opts)
if (opts.maxTypeLength > 0 && result.name.length() > opts.maxTypeLength)
{
if (FFlag::LuauSpecialTypesAsterisked)
result.name += "... *TRUNCATED*";
else
result.name += "... <TRUNCATED>";
result.name += "... *TRUNCATED*";
}
return result;
@ -1312,8 +1281,7 @@ std::string toString(const TypePackVar& tp, ToStringOptions& opts)
std::string toStringNamedFunction(const std::string& funcName, const FunctionTypeVar& ftv, ToStringOptions& opts)
{
ToStringResult result;
StringifierState state =
FFlag::LuauFixNameMaps ? StringifierState{opts, result, opts.nameMap} : StringifierState{opts, result, opts.DEPRECATED_nameMap};
StringifierState state{opts, result};
TypeVarStringifier tvs{state};
state.emit(funcName);
@ -1445,90 +1413,86 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
auto go = [&opts](auto&& c) -> std::string {
using T = std::decay_t<decltype(c)>;
// TODO: Inline and delete this function when clipping FFlag::LuauFixNameMaps
auto tos = [](auto&& a, ToStringOptions& opts) {
if (FFlag::LuauFixNameMaps)
return toString(a, opts);
else
{
ToStringResult tsr = toStringDetailed(a, opts);
opts.DEPRECATED_nameMap = std::move(tsr.DEPRECATED_nameMap);
return tsr.name;
}
auto tos = [&opts](auto&& a)
{
return toString(a, opts);
};
if constexpr (std::is_same_v<T, SubtypeConstraint>)
{
std::string subStr = tos(c.subType, opts);
std::string superStr = tos(c.superType, opts);
std::string subStr = tos(c.subType);
std::string superStr = tos(c.superType);
return subStr + " <: " + superStr;
}
else if constexpr (std::is_same_v<T, PackSubtypeConstraint>)
{
std::string subStr = tos(c.subPack, opts);
std::string superStr = tos(c.superPack, opts);
std::string subStr = tos(c.subPack);
std::string superStr = tos(c.superPack);
return subStr + " <: " + superStr;
}
else if constexpr (std::is_same_v<T, GeneralizationConstraint>)
{
std::string subStr = tos(c.generalizedType, opts);
std::string superStr = tos(c.sourceType, opts);
std::string subStr = tos(c.generalizedType);
std::string superStr = tos(c.sourceType);
return subStr + " ~ gen " + superStr;
}
else if constexpr (std::is_same_v<T, InstantiationConstraint>)
{
std::string subStr = tos(c.subType, opts);
std::string superStr = tos(c.superType, opts);
std::string subStr = tos(c.subType);
std::string superStr = tos(c.superType);
return subStr + " ~ inst " + superStr;
}
else if constexpr (std::is_same_v<T, UnaryConstraint>)
{
std::string resultStr = tos(c.resultType, opts);
std::string operandStr = tos(c.operandType, opts);
std::string resultStr = tos(c.resultType);
std::string operandStr = tos(c.operandType);
return resultStr + " ~ Unary<" + toString(c.op) + ", " + operandStr + ">";
}
else if constexpr (std::is_same_v<T, BinaryConstraint>)
{
std::string resultStr = tos(c.resultType, opts);
std::string leftStr = tos(c.leftType, opts);
std::string rightStr = tos(c.rightType, opts);
std::string resultStr = tos(c.resultType);
std::string leftStr = tos(c.leftType);
std::string rightStr = tos(c.rightType);
return resultStr + " ~ Binary<" + toString(c.op) + ", " + leftStr + ", " + rightStr + ">";
}
else if constexpr (std::is_same_v<T, IterableConstraint>)
{
std::string iteratorStr = tos(c.iterator, opts);
std::string variableStr = tos(c.variables, opts);
std::string iteratorStr = tos(c.iterator);
std::string variableStr = tos(c.variables);
return variableStr + " ~ Iterate<" + iteratorStr + ">";
}
else if constexpr (std::is_same_v<T, NameConstraint>)
{
std::string namedStr = tos(c.namedType, opts);
std::string namedStr = tos(c.namedType);
return "@name(" + namedStr + ") = " + c.name;
}
else if constexpr (std::is_same_v<T, TypeAliasExpansionConstraint>)
{
std::string targetStr = tos(c.target, opts);
std::string targetStr = tos(c.target);
return "expand " + targetStr;
}
else if constexpr (std::is_same_v<T, FunctionCallConstraint>)
{
return "call " + tos(c.fn, opts) + " with { result = " + tos(c.result, opts) + " }";
return "call " + tos(c.fn) + " with { result = " + tos(c.result) + " }";
}
else if constexpr (std::is_same_v<T, PrimitiveTypeConstraint>)
{
return tos(c.resultType, opts) + " ~ prim " + tos(c.expectedType, opts) + ", " + tos(c.singletonType, opts) + ", " +
tos(c.multitonType, opts);
return tos(c.resultType) + " ~ prim " + tos(c.expectedType) + ", " + tos(c.singletonType) + ", " +
tos(c.multitonType);
}
else if constexpr (std::is_same_v<T, HasPropConstraint>)
{
return tos(c.resultType, opts) + " ~ hasProp " + tos(c.subjectType, opts) + ", \"" + c.prop + "\"";
return tos(c.resultType) + " ~ hasProp " + tos(c.subjectType) + ", \"" + c.prop + "\"";
}
else if constexpr (std::is_same_v<T, RefinementConstraint>)
else if constexpr (std::is_same_v<T, SingletonOrTopTypeConstraint>)
{
return "TODO";
std::string result = tos(c.resultType);
std::string discriminant = tos(c.discriminantType);
return result + " ~ if isSingleton D then ~D else unknown where D = " + discriminant;
}
else
static_assert(always_false_v<T>, "Non-exhaustive constraint switch");

View file

@ -338,12 +338,6 @@ public:
{
return allocator->alloc<AstTypeReference>(Location(), std::nullopt, AstName{"never"});
}
AstType* operator()(const UseTypeVar& utv)
{
std::optional<TypeId> ty = utv.scope->lookup(utv.def);
LUAU_ASSERT(ty);
return Luau::visit(*this, (*ty)->ty);
}
AstType* operator()(const NegationTypeVar& ntv)
{
// FIXME: do the same thing we do with ErrorTypeVar

View file

@ -301,7 +301,6 @@ struct TypeChecker2
UnifierSharedState sharedState{&ice};
Normalizer normalizer{&arena, singletonTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, stack.back(), ret->location, Covariant};
u.anyIsTop = true;
u.tryUnify(actualRetType, expectedRetType);
const bool ok = u.errors.empty() && u.log.empty();
@ -331,16 +330,21 @@ struct TypeChecker2
if (value)
visit(value);
if (i != local->values.size - 1)
TypeId* maybeValueType = value ? module->astTypes.find(value) : nullptr;
if (i != local->values.size - 1 || maybeValueType)
{
AstLocal* var = i < local->vars.size ? local->vars.data[i] : nullptr;
if (var && var->annotation)
{
TypeId varType = lookupAnnotation(var->annotation);
TypeId annotationType = lookupAnnotation(var->annotation);
TypeId valueType = value ? lookupType(value) : nullptr;
if (valueType && !isSubtype(varType, valueType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
reportError(TypeMismatch{varType, valueType}, value->location);
if (valueType)
{
ErrorVec errors = tryUnify(stack.back(), value->location, valueType, annotationType);
if (!errors.empty())
reportErrors(std::move(errors));
}
}
}
else
@ -606,7 +610,7 @@ struct TypeChecker2
visit(rhs);
TypeId rhsType = lookupType(rhs);
if (!isSubtype(rhsType, lhsType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(rhsType, lhsType, stack.back()))
{
reportError(TypeMismatch{lhsType, rhsType}, rhs->location);
}
@ -757,7 +761,7 @@ struct TypeChecker2
TypeId actualType = lookupType(number);
TypeId numberType = singletonTypes->numberType;
if (!isSubtype(numberType, actualType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(numberType, actualType, stack.back()))
{
reportError(TypeMismatch{actualType, numberType}, number->location);
}
@ -768,7 +772,7 @@ struct TypeChecker2
TypeId actualType = lookupType(string);
TypeId stringType = singletonTypes->stringType;
if (!isSubtype(actualType, stringType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(actualType, stringType, stack.back()))
{
reportError(TypeMismatch{actualType, stringType}, string->location);
}
@ -857,7 +861,7 @@ struct TypeChecker2
FunctionTypeVar ftv{argsTp, expectedRetType};
TypeId expectedType = arena.addType(ftv);
if (!isSubtype(testFunctionType, expectedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(testFunctionType, expectedType, stack.back()))
{
CloneState cloneState;
expectedType = clone(expectedType, module->internalTypes, cloneState);
@ -876,7 +880,7 @@ struct TypeChecker2
getIndexTypeFromType(module->getModuleScope(), leftType, indexName->index.value, indexName->location, /* addErrors */ true);
if (ty)
{
if (!isSubtype(resultType, *ty, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(resultType, *ty, stack.back()))
{
reportError(TypeMismatch{resultType, *ty}, indexName->location);
}
@ -909,7 +913,7 @@ struct TypeChecker2
TypeId inferredArgTy = *argIt;
TypeId annotatedArgTy = lookupAnnotation(arg->annotation);
if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back()))
{
reportError(TypeMismatch{annotatedArgTy, inferredArgTy}, arg->location);
}
@ -950,7 +954,7 @@ struct TypeChecker2
if (const FunctionTypeVar* ftv = get<FunctionTypeVar>(follow(*mm)))
{
TypePackId expectedArgs = module->internalTypes.addTypePack({operandType});
reportErrors(tryUnify(scope, expr->location, ftv->argTypes, expectedArgs));
reportErrors(tryUnify(scope, expr->location, expectedArgs, ftv->argTypes));
if (std::optional<TypeId> ret = first(ftv->retTypes))
{
@ -1092,7 +1096,7 @@ struct TypeChecker2
expr->op == AstExprBinary::CompareGt || expr->op == AstExprBinary::Op::CompareLe || expr->op == AstExprBinary::Op::CompareLt)
{
TypePackId expectedRets = module->internalTypes.addTypePack({singletonTypes->booleanType});
if (!isSubtype(ftv->retTypes, expectedRets, scope, singletonTypes, ice))
if (!isSubtype(ftv->retTypes, expectedRets, scope))
{
reportError(GenericError{format("Metamethod '%s' must return type 'boolean'", it->second)}, expr->location);
}
@ -1203,10 +1207,10 @@ struct TypeChecker2
TypeId computedType = lookupType(expr->expr);
// Note: As an optimization, we try 'number <: number | string' first, as that is the more likely case.
if (isSubtype(annotationType, computedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (isSubtype(annotationType, computedType, stack.back()))
return;
if (isSubtype(computedType, annotationType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
if (isSubtype(computedType, annotationType, stack.back()))
return;
reportError(TypesAreUnrelated{computedType, annotationType}, expr->location);
@ -1501,13 +1505,27 @@ struct TypeChecker2
}
}
template<typename TID>
bool isSubtype(TID subTy, TID superTy, NotNull<Scope> scope)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
Normalizer normalizer{&arena, singletonTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
u.useScopes = true;
u.tryUnify(subTy, superTy);
const bool ok = u.errors.empty() && u.log.empty();
return ok;
}
template<typename TID>
ErrorVec tryUnify(NotNull<Scope> scope, const Location& location, TID subTy, TID superTy)
{
UnifierSharedState sharedState{&ice};
Normalizer normalizer{&module->internalTypes, singletonTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, location, Covariant};
u.anyIsTop = true;
u.useScopes = true;
u.tryUnify(subTy, superTy);
return std::move(u.errors);

View file

@ -35,18 +35,20 @@ LUAU_FASTFLAG(LuauTypeNormalization2)
LUAU_FASTFLAGVARIABLE(DebugLuauFreezeDuringUnification, false)
LUAU_FASTFLAGVARIABLE(LuauReturnAnyInsteadOfICE, false) // Eventually removed as false.
LUAU_FASTFLAGVARIABLE(DebugLuauSharedSelf, false)
LUAU_FASTFLAGVARIABLE(LuauAnyifyModuleReturnGenerics, false)
LUAU_FASTFLAGVARIABLE(LuauLvaluelessPath, false)
LUAU_FASTFLAGVARIABLE(LuauNilIterator, false)
LUAU_FASTFLAGVARIABLE(LuauUnknownAndNeverType, false)
LUAU_FASTFLAGVARIABLE(LuauBinaryNeedsExpectedTypesToo, false)
LUAU_FASTFLAGVARIABLE(LuauFixVarargExprHeadType, false)
LUAU_FASTFLAGVARIABLE(LuauNeverTypesAndOperatorsInference, false)
LUAU_FASTFLAGVARIABLE(LuauReturnsFromCallsitesAreNotWidened, false)
LUAU_FASTFLAGVARIABLE(LuauTryhardAnd, false)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAGVARIABLE(LuauCompleteVisitor, false)
LUAU_FASTFLAGVARIABLE(LuauOptionalNextKey, false)
LUAU_FASTFLAGVARIABLE(LuauReportShadowedTypeAlias, false)
LUAU_FASTFLAGVARIABLE(LuauBetterMessagingOnCountMismatch, false)
LUAU_FASTFLAGVARIABLE(LuauArgMismatchReportFunctionLocation, false)
LUAU_FASTFLAGVARIABLE(LuauImplicitElseRefinement, false)
LUAU_FASTFLAGVARIABLE(LuauDeclareClassPrototype, false)
namespace Luau
@ -332,8 +334,7 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
else
moduleScope->returnType = anyify(moduleScope, moduleScope->returnType, Location{});
if (FFlag::LuauAnyifyModuleReturnGenerics)
moduleScope->returnType = anyifyModuleReturnTypePackGenerics(moduleScope->returnType);
moduleScope->returnType = anyifyModuleReturnTypePackGenerics(moduleScope->returnType);
for (auto& [_, typeFun] : moduleScope->exportedTypeBindings)
typeFun.type = anyify(moduleScope, typeFun.type, Location{});
@ -1215,10 +1216,10 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatForIn& forin)
AstExpr* firstValue = forin.values.data[0];
// next is a function that takes Table<K, V> and an optional index of type K
// next<K, V>(t: Table<K, V>, index: K | nil) -> (K, V)
// next<K, V>(t: Table<K, V>, index: K | nil) -> (K?, V)
// however, pairs and ipairs are quite messy, but they both share the same types
// pairs returns 'next, t, nil', thus the type would be
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K | nil) -> (K, V), Table<K, V>, K | nil)
// pairs<K, V>(t: Table<K, V>) -> ((Table<K, V>, K | nil) -> (K?, V), Table<K, V>, K | nil)
// ipairs returns 'next, t, 0', thus ipairs will also share the same type as pairs, except K = number
//
// we can also define our own custom iterators by by returning a wrapped coroutine that calls coroutine.yield
@ -1261,6 +1262,9 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatForIn& forin)
iterTy = instantiate(scope, checkExpr(scope, *firstValue).type, firstValue->location);
}
if (FFlag::LuauNilIterator)
iterTy = stripFromNilAndReport(iterTy, firstValue->location);
if (std::optional<TypeId> iterMM = findMetatableEntry(iterTy, "__iter", firstValue->location, /* addErrors= */ true))
{
// if __iter metamethod is present, it will be called and the results are going to be called as if they are functions
@ -1344,21 +1348,61 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatForIn& forin)
reportErrors(state.errors);
}
TypePackId varPack = addTypePack(TypePackVar{TypePack{varTypes, freshTypePack(scope)}});
if (forin.values.size >= 2)
if (FFlag::LuauOptionalNextKey)
{
AstArray<AstExpr*> arguments{forin.values.data + 1, forin.values.size - 1};
TypePackId retPack = iterFunc->retTypes;
Position start = firstValue->location.begin;
Position end = values[forin.values.size - 1]->location.end;
AstExprCall exprCall{Location(start, end), firstValue, arguments, /* self= */ false, Location()};
if (forin.values.size >= 2)
{
AstArray<AstExpr*> arguments{forin.values.data + 1, forin.values.size - 1};
Position start = firstValue->location.begin;
Position end = values[forin.values.size - 1]->location.end;
AstExprCall exprCall{Location(start, end), firstValue, arguments, /* self= */ false, Location()};
retPack = checkExprPack(scope, exprCall).type;
}
// We need to remove 'nil' from the set of options of the first return value
// Because for loop stops when it gets 'nil', this result is never actually assigned to the first variable
if (std::optional<TypeId> fty = first(retPack); fty && !varTypes.empty())
{
TypeId keyTy = follow(*fty);
if (get<UnionTypeVar>(keyTy))
{
if (std::optional<TypeId> ty = tryStripUnionFromNil(keyTy))
keyTy = *ty;
}
unify(keyTy, varTypes.front(), scope, forin.location);
// We have already handled the first variable type, make it match in the pack check
varTypes.front() = *fty;
}
TypePackId varPack = addTypePack(TypePackVar{TypePack{varTypes, freshTypePack(scope)}});
TypePackId retPack = checkExprPack(scope, exprCall).type;
unify(retPack, varPack, scope, forin.location);
}
else
unify(iterFunc->retTypes, varPack, scope, forin.location);
{
TypePackId varPack = addTypePack(TypePackVar{TypePack{varTypes, freshTypePack(scope)}});
if (forin.values.size >= 2)
{
AstArray<AstExpr*> arguments{forin.values.data + 1, forin.values.size - 1};
Position start = firstValue->location.begin;
Position end = values[forin.values.size - 1]->location.end;
AstExprCall exprCall{Location(start, end), firstValue, arguments, /* self= */ false, Location()};
TypePackId retPack = checkExprPack(scope, exprCall).type;
unify(retPack, varPack, scope, forin.location);
}
else
unify(iterFunc->retTypes, varPack, scope, forin.location);
}
check(loopScope, *forin.body);
}
@ -1977,18 +2021,10 @@ WithPredicate<TypeId> TypeChecker::checkExpr(const ScopePtr& scope, const AstExp
if (get<TypePack>(varargPack))
{
if (FFlag::LuauFixVarargExprHeadType)
{
if (std::optional<TypeId> ty = first(varargPack))
return {*ty};
if (std::optional<TypeId> ty = first(varargPack))
return {*ty};
return {nilType};
}
else
{
std::vector<TypeId> types = flatten(varargPack).first;
return {!types.empty() ? types[0] : nilType};
}
return {nilType};
}
else if (get<FreeTypePack>(varargPack))
{
@ -2839,12 +2875,54 @@ TypeId TypeChecker::checkRelationalOperation(
case AstExprBinary::And:
if (lhsIsAny)
{
return lhsType;
return unionOfTypes(rhsType, booleanType, scope, expr.location, false);
}
else if (FFlag::LuauTryhardAnd)
{
// If lhs is free, we can't tell which 'falsy' components it has, if any
if (get<FreeTypeVar>(lhsType))
return unionOfTypes(addType(UnionTypeVar{{nilType, singletonType(false)}}), rhsType, scope, expr.location, false);
auto [oty, notNever] = pickTypesFromSense(lhsType, false, neverType); // Filter out falsy types
if (notNever)
{
LUAU_ASSERT(oty);
return unionOfTypes(*oty, rhsType, scope, expr.location, false);
}
else
{
return rhsType;
}
}
else
{
return unionOfTypes(rhsType, booleanType, scope, expr.location, false);
}
case AstExprBinary::Or:
if (lhsIsAny)
{
return lhsType;
return unionOfTypes(lhsType, rhsType, scope, expr.location);
}
else if (FFlag::LuauTryhardAnd)
{
auto [oty, notNever] = pickTypesFromSense(lhsType, true, neverType); // Filter out truthy types
if (notNever)
{
LUAU_ASSERT(oty);
return unionOfTypes(*oty, rhsType, scope, expr.location);
}
else
{
return rhsType;
}
}
else
{
return unionOfTypes(lhsType, rhsType, scope, expr.location);
}
default:
LUAU_ASSERT(0);
ice(format("checkRelationalOperation called with incorrect binary expression '%s'", toString(expr.op).c_str()), expr.location);
@ -4962,9 +5040,9 @@ TypePackId TypeChecker::errorRecoveryTypePack(TypePackId guess)
return singletonTypes->errorRecoveryTypePack(guess);
}
TypeIdPredicate TypeChecker::mkTruthyPredicate(bool sense)
TypeIdPredicate TypeChecker::mkTruthyPredicate(bool sense, TypeId emptySetTy)
{
return [this, sense](TypeId ty) -> std::optional<TypeId> {
return [this, sense, emptySetTy](TypeId ty) -> std::optional<TypeId> {
// any/error/free gets a special pass unconditionally because they can't be decided.
if (get<AnyTypeVar>(ty) || get<ErrorTypeVar>(ty) || get<FreeTypeVar>(ty))
return ty;
@ -4982,7 +5060,7 @@ TypeIdPredicate TypeChecker::mkTruthyPredicate(bool sense)
return sense ? std::nullopt : std::optional<TypeId>(ty);
// at this point, anything else is kept if sense is true, or replaced by nil
return sense ? ty : nilType;
return sense ? ty : emptySetTy;
};
}
@ -5008,9 +5086,9 @@ std::pair<std::optional<TypeId>, bool> TypeChecker::filterMap(TypeId type, TypeI
}
}
std::pair<std::optional<TypeId>, bool> TypeChecker::pickTypesFromSense(TypeId type, bool sense)
std::pair<std::optional<TypeId>, bool> TypeChecker::pickTypesFromSense(TypeId type, bool sense, TypeId emptySetTy)
{
return filterMap(type, mkTruthyPredicate(sense));
return filterMap(type, mkTruthyPredicate(sense, emptySetTy));
}
TypeId TypeChecker::addTV(TypeVar&& tv)
@ -5779,7 +5857,7 @@ void TypeChecker::resolve(const TruthyPredicate& truthyP, RefinementMap& refis,
if (ty && fromOr)
return addRefinement(refis, truthyP.lvalue, *ty);
refineLValue(truthyP.lvalue, refis, scope, mkTruthyPredicate(sense));
refineLValue(truthyP.lvalue, refis, scope, mkTruthyPredicate(sense, nilType));
}
void TypeChecker::resolve(const AndPredicate& andP, RefinementMap& refis, const ScopePtr& scope, bool sense)
@ -5972,13 +6050,57 @@ void TypeChecker::resolve(const EqPredicate& eqP, RefinementMap& refis, const Sc
if (maybeSingleton(eqP.type))
{
// Normally we'd write option <: eqP.type, but singletons are always the subtype, so we flip this.
if (!sense || canUnify(eqP.type, option, scope, eqP.location).empty())
return sense ? eqP.type : option;
if (FFlag::LuauImplicitElseRefinement)
{
bool optionIsSubtype = canUnify(option, eqP.type, scope, eqP.location).empty();
bool targetIsSubtype = canUnify(eqP.type, option, scope, eqP.location).empty();
// local variable works around an odd gcc 9.3 warning: <anonymous> may be used uninitialized
std::optional<TypeId> res = std::nullopt;
return res;
// terminology refresher:
// - option is the type of the expression `x`, and
// - eqP.type is the type of the expression `"hello"`
//
// "hello" == x where
// x : "hello" | "world" -> x : "hello"
// x : number | string -> x : "hello"
// x : number -> x : never
//
// "hello" ~= x where
// x : "hello" | "world" -> x : "world"
// x : number | string -> x : number | string
// x : number -> x : number
// local variable works around an odd gcc 9.3 warning: <anonymous> may be used uninitialized
std::optional<TypeId> nope = std::nullopt;
if (sense)
{
if (optionIsSubtype && !targetIsSubtype)
return option;
else if (!optionIsSubtype && targetIsSubtype)
return eqP.type;
else if (!optionIsSubtype && !targetIsSubtype)
return nope;
else if (optionIsSubtype && targetIsSubtype)
return eqP.type;
}
else
{
bool isOptionSingleton = get<SingletonTypeVar>(option);
if (!isOptionSingleton)
return option;
else if (optionIsSubtype && targetIsSubtype)
return nope;
}
}
else
{
if (!sense || canUnify(eqP.type, option, scope, eqP.location).empty())
return sense ? eqP.type : option;
// local variable works around an odd gcc 9.3 warning: <anonymous> may be used uninitialized
std::optional<TypeId> res = std::nullopt;
return res;
}
}
return option;

View file

@ -3,6 +3,7 @@
#include "Luau/BuiltinDefinitions.h"
#include "Luau/Common.h"
#include "Luau/ConstraintSolver.h"
#include "Luau/DenseHash.h"
#include "Luau/Error.h"
#include "Luau/RecursionCounter.h"
@ -26,6 +27,7 @@ LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauMaybeGenericIntersectionTypes, false)
LUAU_FASTFLAGVARIABLE(LuauNoMoreGlobalSingletonTypes, false)
LUAU_FASTFLAGVARIABLE(LuauNewLibraryTypeNames, false)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
namespace Luau
@ -33,15 +35,19 @@ namespace Luau
std::optional<WithPredicate<TypePackId>> magicFunctionFormat(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate);
static bool dcrMagicFunctionFormat(MagicFunctionCallContext context);
static std::optional<WithPredicate<TypePackId>> magicFunctionGmatch(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate);
static bool dcrMagicFunctionGmatch(MagicFunctionCallContext context);
static std::optional<WithPredicate<TypePackId>> magicFunctionMatch(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate);
static bool dcrMagicFunctionMatch(MagicFunctionCallContext context);
static std::optional<WithPredicate<TypePackId>> magicFunctionFind(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate);
static bool dcrMagicFunctionFind(MagicFunctionCallContext context);
TypeId follow(TypeId t)
{
@ -57,13 +63,6 @@ TypeId follow(TypeId t, std::function<TypeId(TypeId)> mapper)
return btv->boundTo;
else if (auto ttv = get<TableTypeVar>(mapper(ty)))
return ttv->boundTo;
else if (auto utv = get<UseTypeVar>(mapper(ty)))
{
std::optional<TypeId> ty = utv->scope->lookup(utv->def);
if (!ty)
throwRuntimeError("UseTypeVar must map to another TypeId");
return *ty;
}
else
return std::nullopt;
};
@ -761,6 +760,7 @@ SingletonTypes::SingletonTypes()
, stringType(arena->addType(TypeVar{PrimitiveTypeVar{PrimitiveTypeVar::String}, /*persistent*/ true}))
, booleanType(arena->addType(TypeVar{PrimitiveTypeVar{PrimitiveTypeVar::Boolean}, /*persistent*/ true}))
, threadType(arena->addType(TypeVar{PrimitiveTypeVar{PrimitiveTypeVar::Thread}, /*persistent*/ true}))
, functionType(arena->addType(TypeVar{PrimitiveTypeVar{PrimitiveTypeVar::Function}, /*persistent*/ true}))
, trueType(arena->addType(TypeVar{SingletonTypeVar{BooleanSingleton{true}}, /*persistent*/ true}))
, falseType(arena->addType(TypeVar{SingletonTypeVar{BooleanSingleton{false}}, /*persistent*/ true}))
, anyType(arena->addType(TypeVar{AnyTypeVar{}, /*persistent*/ true}))
@ -806,6 +806,7 @@ TypeId SingletonTypes::makeStringMetatable()
FunctionTypeVar formatFTV{arena->addTypePack(TypePack{{stringType}, anyTypePack}), oneStringPack};
formatFTV.magicFunction = &magicFunctionFormat;
const TypeId formatFn = arena->addType(formatFTV);
attachDcrMagicFunction(formatFn, dcrMagicFunctionFormat);
const TypePackId emptyPack = arena->addTypePack({});
const TypePackId stringVariadicList = arena->addTypePack(TypePackVar{VariadicTypePack{stringType}});
@ -820,14 +821,17 @@ TypeId SingletonTypes::makeStringMetatable()
const TypeId gmatchFunc =
makeFunction(*arena, stringType, {}, {}, {stringType}, {}, {arena->addType(FunctionTypeVar{emptyPack, stringVariadicList})});
attachMagicFunction(gmatchFunc, magicFunctionGmatch);
attachDcrMagicFunction(gmatchFunc, dcrMagicFunctionGmatch);
const TypeId matchFunc = arena->addType(
FunctionTypeVar{arena->addTypePack({stringType, stringType, optionalNumber}), arena->addTypePack(TypePackVar{VariadicTypePack{stringType}})});
attachMagicFunction(matchFunc, magicFunctionMatch);
attachDcrMagicFunction(matchFunc, dcrMagicFunctionMatch);
const TypeId findFunc = arena->addType(FunctionTypeVar{arena->addTypePack({stringType, stringType, optionalNumber, optionalBoolean}),
arena->addTypePack(TypePack{{optionalNumber, optionalNumber}, stringVariadicList})});
attachMagicFunction(findFunc, magicFunctionFind);
attachDcrMagicFunction(findFunc, dcrMagicFunctionFind);
TableTypeVar::Props stringLib = {
{"byte", {arena->addType(FunctionTypeVar{arena->addTypePack({stringType, optionalNumber, optionalNumber}), numberVariadicList})}},
@ -861,7 +865,7 @@ TypeId SingletonTypes::makeStringMetatable()
TypeId tableType = arena->addType(TableTypeVar{std::move(stringLib), std::nullopt, TypeLevel{}, TableState::Sealed});
if (TableTypeVar* ttv = getMutable<TableTypeVar>(tableType))
ttv->name = "string";
ttv->name = FFlag::LuauNewLibraryTypeNames ? "typeof(string)" : "string";
return arena->addType(TableTypeVar{{{{"__index", {tableType}}}}, std::nullopt, TypeLevel{}, TableState::Sealed});
}
@ -946,7 +950,8 @@ void persist(TypeId ty)
queue.push_back(mtv->table);
queue.push_back(mtv->metatable);
}
else if (get<GenericTypeVar>(t) || get<AnyTypeVar>(t) || get<FreeTypeVar>(t) || get<SingletonTypeVar>(t) || get<PrimitiveTypeVar>(t) || get<NegationTypeVar>(t))
else if (get<GenericTypeVar>(t) || get<AnyTypeVar>(t) || get<FreeTypeVar>(t) || get<SingletonTypeVar>(t) || get<PrimitiveTypeVar>(t) ||
get<NegationTypeVar>(t))
{
}
else
@ -1077,7 +1082,7 @@ IntersectionTypeVarIterator end(const IntersectionTypeVar* itv)
return IntersectionTypeVarIterator{};
}
static std::vector<TypeId> parseFormatString(TypeChecker& typechecker, const char* data, size_t size)
static std::vector<TypeId> parseFormatString(NotNull<SingletonTypes> singletonTypes, const char* data, size_t size)
{
const char* options = "cdiouxXeEfgGqs*";
@ -1100,13 +1105,13 @@ static std::vector<TypeId> parseFormatString(TypeChecker& typechecker, const cha
break;
if (data[i] == 'q' || data[i] == 's')
result.push_back(typechecker.stringType);
result.push_back(singletonTypes->stringType);
else if (data[i] == '*')
result.push_back(typechecker.unknownType);
result.push_back(singletonTypes->unknownType);
else if (strchr(options, data[i]))
result.push_back(typechecker.numberType);
result.push_back(singletonTypes->numberType);
else
result.push_back(typechecker.errorRecoveryType(typechecker.anyType));
result.push_back(singletonTypes->errorRecoveryType(singletonTypes->anyType));
}
}
@ -1135,7 +1140,7 @@ std::optional<WithPredicate<TypePackId>> magicFunctionFormat(
if (!fmt)
return std::nullopt;
std::vector<TypeId> expected = parseFormatString(typechecker, fmt->value.data, fmt->value.size);
std::vector<TypeId> expected = parseFormatString(typechecker.singletonTypes, fmt->value.data, fmt->value.size);
const auto& [params, tail] = flatten(paramPack);
size_t paramOffset = 1;
@ -1159,7 +1164,50 @@ std::optional<WithPredicate<TypePackId>> magicFunctionFormat(
return WithPredicate<TypePackId>{arena.addTypePack({typechecker.stringType})};
}
static std::vector<TypeId> parsePatternString(TypeChecker& typechecker, const char* data, size_t size)
static bool dcrMagicFunctionFormat(MagicFunctionCallContext context)
{
TypeArena* arena = context.solver->arena;
AstExprConstantString* fmt = nullptr;
if (auto index = context.callSite->func->as<AstExprIndexName>(); index && context.callSite->self)
{
if (auto group = index->expr->as<AstExprGroup>())
fmt = group->expr->as<AstExprConstantString>();
else
fmt = index->expr->as<AstExprConstantString>();
}
if (!context.callSite->self && context.callSite->args.size > 0)
fmt = context.callSite->args.data[0]->as<AstExprConstantString>();
if (!fmt)
return false;
std::vector<TypeId> expected = parseFormatString(context.solver->singletonTypes, fmt->value.data, fmt->value.size);
const auto& [params, tail] = flatten(context.arguments);
size_t paramOffset = 1;
// unify the prefix one argument at a time
for (size_t i = 0; i < expected.size() && i + paramOffset < params.size(); ++i)
{
context.solver->unify(params[i + paramOffset], expected[i], context.solver->rootScope);
}
// if we know the argument count or if we have too many arguments for sure, we can issue an error
size_t numActualParams = params.size();
size_t numExpectedParams = expected.size() + 1; // + 1 for the format string
if (numExpectedParams != numActualParams && (!tail || numExpectedParams < numActualParams))
context.solver->reportError(TypeError{context.callSite->location, CountMismatch{numExpectedParams, std::nullopt, numActualParams}});
TypePackId resultPack = arena->addTypePack({context.solver->singletonTypes->stringType});
asMutable(context.result)->ty.emplace<BoundTypePack>(resultPack);
return true;
}
static std::vector<TypeId> parsePatternString(NotNull<SingletonTypes> singletonTypes, const char* data, size_t size)
{
std::vector<TypeId> result;
int depth = 0;
@ -1191,12 +1239,12 @@ static std::vector<TypeId> parsePatternString(TypeChecker& typechecker, const ch
if (i + 1 < size && data[i + 1] == ')')
{
i++;
result.push_back(typechecker.numberType);
result.push_back(singletonTypes->numberType);
continue;
}
++depth;
result.push_back(typechecker.stringType);
result.push_back(singletonTypes->stringType);
}
else if (data[i] == ')')
{
@ -1214,7 +1262,7 @@ static std::vector<TypeId> parsePatternString(TypeChecker& typechecker, const ch
return std::vector<TypeId>();
if (result.empty())
result.push_back(typechecker.stringType);
result.push_back(singletonTypes->stringType);
return result;
}
@ -1238,7 +1286,7 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionGmatch(
if (!pattern)
return std::nullopt;
std::vector<TypeId> returnTypes = parsePatternString(typechecker, pattern->value.data, pattern->value.size);
std::vector<TypeId> returnTypes = parsePatternString(typechecker.singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return std::nullopt;
@ -1251,6 +1299,39 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionGmatch(
return WithPredicate<TypePackId>{arena.addTypePack({iteratorType})};
}
static bool dcrMagicFunctionGmatch(MagicFunctionCallContext context)
{
const auto& [params, tail] = flatten(context.arguments);
if (params.size() != 2)
return false;
TypeArena* arena = context.solver->arena;
AstExprConstantString* pattern = nullptr;
size_t index = context.callSite->self ? 0 : 1;
if (context.callSite->args.size > index)
pattern = context.callSite->args.data[index]->as<AstExprConstantString>();
if (!pattern)
return false;
std::vector<TypeId> returnTypes = parsePatternString(context.solver->singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return false;
context.solver->unify(params[0], context.solver->singletonTypes->stringType, context.solver->rootScope);
const TypePackId emptyPack = arena->addTypePack({});
const TypePackId returnList = arena->addTypePack(returnTypes);
const TypeId iteratorType = arena->addType(FunctionTypeVar{emptyPack, returnList});
const TypePackId resTypePack = arena->addTypePack({iteratorType});
asMutable(context.result)->ty.emplace<BoundTypePack>(resTypePack);
return true;
}
static std::optional<WithPredicate<TypePackId>> magicFunctionMatch(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate)
{
@ -1270,7 +1351,7 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionMatch(
if (!pattern)
return std::nullopt;
std::vector<TypeId> returnTypes = parsePatternString(typechecker, pattern->value.data, pattern->value.size);
std::vector<TypeId> returnTypes = parsePatternString(typechecker.singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return std::nullopt;
@ -1287,6 +1368,42 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionMatch(
return WithPredicate<TypePackId>{returnList};
}
static bool dcrMagicFunctionMatch(MagicFunctionCallContext context)
{
const auto& [params, tail] = flatten(context.arguments);
if (params.size() < 2 || params.size() > 3)
return false;
TypeArena* arena = context.solver->arena;
AstExprConstantString* pattern = nullptr;
size_t patternIndex = context.callSite->self ? 0 : 1;
if (context.callSite->args.size > patternIndex)
pattern = context.callSite->args.data[patternIndex]->as<AstExprConstantString>();
if (!pattern)
return false;
std::vector<TypeId> returnTypes = parsePatternString(context.solver->singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return false;
context.solver->unify(params[0], context.solver->singletonTypes->stringType, context.solver->rootScope);
const TypeId optionalNumber = arena->addType(UnionTypeVar{{context.solver->singletonTypes->nilType, context.solver->singletonTypes->numberType}});
size_t initIndex = context.callSite->self ? 1 : 2;
if (params.size() == 3 && context.callSite->args.size > initIndex)
context.solver->unify(params[2], optionalNumber, context.solver->rootScope);
const TypePackId returnList = arena->addTypePack(returnTypes);
asMutable(context.result)->ty.emplace<BoundTypePack>(returnList);
return true;
}
static std::optional<WithPredicate<TypePackId>> magicFunctionFind(
TypeChecker& typechecker, const ScopePtr& scope, const AstExprCall& expr, WithPredicate<TypePackId> withPredicate)
{
@ -1317,7 +1434,7 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionFind(
std::vector<TypeId> returnTypes;
if (!plain)
{
returnTypes = parsePatternString(typechecker, pattern->value.data, pattern->value.size);
returnTypes = parsePatternString(typechecker.singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return std::nullopt;
@ -1341,6 +1458,60 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionFind(
return WithPredicate<TypePackId>{returnList};
}
static bool dcrMagicFunctionFind(MagicFunctionCallContext context)
{
const auto& [params, tail] = flatten(context.arguments);
if (params.size() < 2 || params.size() > 4)
return false;
TypeArena* arena = context.solver->arena;
NotNull<SingletonTypes> singletonTypes = context.solver->singletonTypes;
AstExprConstantString* pattern = nullptr;
size_t patternIndex = context.callSite->self ? 0 : 1;
if (context.callSite->args.size > patternIndex)
pattern = context.callSite->args.data[patternIndex]->as<AstExprConstantString>();
if (!pattern)
return false;
bool plain = false;
size_t plainIndex = context.callSite->self ? 2 : 3;
if (context.callSite->args.size > plainIndex)
{
AstExprConstantBool* p = context.callSite->args.data[plainIndex]->as<AstExprConstantBool>();
plain = p && p->value;
}
std::vector<TypeId> returnTypes;
if (!plain)
{
returnTypes = parsePatternString(singletonTypes, pattern->value.data, pattern->value.size);
if (returnTypes.empty())
return false;
}
context.solver->unify(params[0], singletonTypes->stringType, context.solver->rootScope);
const TypeId optionalNumber = arena->addType(UnionTypeVar{{singletonTypes->nilType, singletonTypes->numberType}});
const TypeId optionalBoolean = arena->addType(UnionTypeVar{{singletonTypes->nilType, singletonTypes->booleanType}});
size_t initIndex = context.callSite->self ? 1 : 2;
if (params.size() >= 3 && context.callSite->args.size > initIndex)
context.solver->unify(params[2], optionalNumber, context.solver->rootScope);
if (params.size() == 4 && context.callSite->args.size > plainIndex)
context.solver->unify(params[3], optionalBoolean, context.solver->rootScope);
returnTypes.insert(returnTypes.begin(), {optionalNumber, optionalNumber});
const TypePackId returnList = arena->addTypePack(returnTypes);
asMutable(context.result)->ty.emplace<BoundTypePack>(returnList);
return true;
}
std::vector<TypeId> filterMap(TypeId type, TypeIdPredicate predicate)
{
type = follow(type);

View file

@ -8,6 +8,7 @@
#include "Luau/TypePack.h"
#include "Luau/TypeUtils.h"
#include "Luau/TimeTrace.h"
#include "Luau/TypeVar.h"
#include "Luau/VisitTypeVar.h"
#include "Luau/ToString.h"
@ -21,8 +22,10 @@ LUAU_FASTFLAGVARIABLE(LuauSubtypeNormalizer, false);
LUAU_FASTFLAGVARIABLE(LuauScalarShapeSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauOverloadedFunctionSubtypingPerf, false);
LUAU_FASTFLAGVARIABLE(LuauScalarShapeUnifyToMtOwner, false)
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauNegatedFunctionTypes)
namespace Luau
{
@ -363,7 +366,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (sharedState.counters.iterationLimit > 0 && sharedState.counters.iterationLimit < sharedState.counters.iterationCount)
{
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
return;
}
@ -404,7 +407,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (subGeneric && !subsumes(useScopes, subGeneric, superFree))
{
// TODO: a more informative error message? CLI-39912
reportError(TypeError{location, GenericError{"Generic subtype escaping scope"}});
reportError(location, GenericError{"Generic subtype escaping scope"});
return;
}
@ -433,7 +436,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (superGeneric && !subsumes(useScopes, superGeneric, subFree))
{
// TODO: a more informative error message? CLI-39912
reportError(TypeError{location, GenericError{"Generic supertype escaping scope"}});
reportError(location, GenericError{"Generic supertype escaping scope"});
return;
}
@ -450,15 +453,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
return tryUnifyWithAny(subTy, superTy);
if (get<AnyTypeVar>(subTy))
{
if (anyIsTop)
{
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
return;
}
else
return tryUnifyWithAny(superTy, subTy);
}
return tryUnifyWithAny(superTy, subTy);
if (log.get<ErrorTypeVar>(subTy))
return tryUnifyWithAny(superTy, subTy);
@ -478,7 +473,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (auto error = sharedState.cachedUnifyError.find({subTy, superTy}))
{
reportError(TypeError{location, *error});
reportError(location, *error);
return;
}
}
@ -520,6 +515,12 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
else if ((log.getMutable<PrimitiveTypeVar>(superTy) || log.getMutable<SingletonTypeVar>(superTy)) && log.getMutable<SingletonTypeVar>(subTy))
tryUnifySingletons(subTy, superTy);
else if (auto ptv = get<PrimitiveTypeVar>(superTy);
FFlag::LuauNegatedFunctionTypes && ptv && ptv->type == PrimitiveTypeVar::Function && get<FunctionTypeVar>(subTy))
{
// Ok. Do nothing. forall functions F, F <: function
}
else if (log.getMutable<FunctionTypeVar>(superTy) && log.getMutable<FunctionTypeVar>(subTy))
tryUnifyFunctions(subTy, superTy, isFunctionCall);
@ -559,7 +560,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
tryUnifyNegationWithType(subTy, superTy);
else
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
if (cacheEnabled)
cacheResult(subTy, superTy, errorCount);
@ -633,9 +634,9 @@ void Unifier::tryUnifyUnionWithType(TypeId subTy, const UnionTypeVar* subUnion,
else if (failed)
{
if (firstFailedOption)
reportError(TypeError{location, TypeMismatch{superTy, subTy, "Not all union options are compatible.", *firstFailedOption}});
reportError(location, TypeMismatch{superTy, subTy, "Not all union options are compatible.", *firstFailedOption});
else
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
}
}
@ -734,7 +735,7 @@ void Unifier::tryUnifyTypeWithUnion(TypeId subTy, TypeId superTy, const UnionTyp
const NormalizedType* subNorm = normalizer->normalize(subTy);
const NormalizedType* superNorm = normalizer->normalize(superTy);
if (!subNorm || !superNorm)
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
else if ((failedOptionCount == 1 || foundHeuristic) && failedOption)
tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "None of the union options are compatible. For example:", *failedOption);
else
@ -743,9 +744,9 @@ void Unifier::tryUnifyTypeWithUnion(TypeId subTy, TypeId superTy, const UnionTyp
else if (!found)
{
if ((failedOptionCount == 1 || foundHeuristic) && failedOption)
reportError(TypeError{location, TypeMismatch{superTy, subTy, "None of the union options are compatible. For example:", *failedOption}});
reportError(location, TypeMismatch{superTy, subTy, "None of the union options are compatible. For example:", *failedOption});
else
reportError(TypeError{location, TypeMismatch{superTy, subTy, "none of the union options are compatible"}});
reportError(location, TypeMismatch{superTy, subTy, "none of the union options are compatible"});
}
}
@ -774,7 +775,7 @@ void Unifier::tryUnifyTypeWithIntersection(TypeId subTy, TypeId superTy, const I
if (unificationTooComplex)
reportError(*unificationTooComplex);
else if (firstFailedOption)
reportError(TypeError{location, TypeMismatch{superTy, subTy, "Not all intersection parts are compatible.", *firstFailedOption}});
reportError(location, TypeMismatch{superTy, subTy, "Not all intersection parts are compatible.", *firstFailedOption});
}
void Unifier::tryUnifyIntersectionWithType(TypeId subTy, const IntersectionTypeVar* uv, TypeId superTy, bool cacheEnabled, bool isFunctionCall)
@ -832,11 +833,11 @@ void Unifier::tryUnifyIntersectionWithType(TypeId subTy, const IntersectionTypeV
if (subNorm && superNorm)
tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "none of the intersection parts are compatible");
else
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
}
else if (!found)
{
reportError(TypeError{location, TypeMismatch{superTy, subTy, "none of the intersection parts are compatible"}});
reportError(location, TypeMismatch{superTy, subTy, "none of the intersection parts are compatible"});
}
}
@ -848,37 +849,37 @@ void Unifier::tryUnifyNormalizedTypes(
if (get<UnknownTypeVar>(superNorm.tops) || get<AnyTypeVar>(superNorm.tops) || get<AnyTypeVar>(subNorm.tops))
return;
else if (get<UnknownTypeVar>(subNorm.tops))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
if (get<ErrorTypeVar>(subNorm.errors))
if (!get<ErrorTypeVar>(superNorm.errors))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
if (get<PrimitiveTypeVar>(subNorm.booleans))
{
if (!get<PrimitiveTypeVar>(superNorm.booleans))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
}
else if (const SingletonTypeVar* stv = get<SingletonTypeVar>(subNorm.booleans))
{
if (!get<PrimitiveTypeVar>(superNorm.booleans) && stv != get<SingletonTypeVar>(superNorm.booleans))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
}
if (get<PrimitiveTypeVar>(subNorm.nils))
if (!get<PrimitiveTypeVar>(superNorm.nils))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
if (get<PrimitiveTypeVar>(subNorm.numbers))
if (!get<PrimitiveTypeVar>(superNorm.numbers))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
if (!isSubtype(subNorm.strings, superNorm.strings))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
if (get<PrimitiveTypeVar>(subNorm.threads))
if (!get<PrimitiveTypeVar>(superNorm.errors))
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
for (TypeId subClass : subNorm.classes)
{
@ -894,7 +895,7 @@ void Unifier::tryUnifyNormalizedTypes(
}
}
if (!found)
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
}
for (TypeId subTable : subNorm.tables)
@ -919,21 +920,19 @@ void Unifier::tryUnifyNormalizedTypes(
return reportError(*e);
}
if (!found)
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
}
if (subNorm.functions)
if (!subNorm.functions.isNever())
{
if (!superNorm.functions)
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
if (superNorm.functions->empty())
return;
for (TypeId superFun : *superNorm.functions)
if (superNorm.functions.isNever())
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
for (TypeId superFun : *superNorm.functions.parts)
{
Unifier innerState = makeChildUnifier();
const FunctionTypeVar* superFtv = get<FunctionTypeVar>(superFun);
if (!superFtv)
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
TypePackId tgt = innerState.tryApplyOverloadedFunction(subTy, subNorm.functions, superFtv->argTypes);
innerState.tryUnify_(tgt, superFtv->retTypes);
if (innerState.errors.empty())
@ -941,7 +940,7 @@ void Unifier::tryUnifyNormalizedTypes(
else if (auto e = hasUnificationTooComplex(innerState.errors))
return reportError(*e);
else
return reportError(TypeError{location, TypeMismatch{superTy, subTy, reason, error}});
return reportError(location, TypeMismatch{superTy, subTy, reason, error});
}
}
@ -959,15 +958,15 @@ void Unifier::tryUnifyNormalizedTypes(
TypePackId Unifier::tryApplyOverloadedFunction(TypeId function, const NormalizedFunctionType& overloads, TypePackId args)
{
if (!overloads || overloads->empty())
if (overloads.isNever())
{
reportError(TypeError{location, CannotCallNonFunction{function}});
reportError(location, CannotCallNonFunction{function});
return singletonTypes->errorRecoveryTypePack();
}
std::optional<TypePackId> result;
const FunctionTypeVar* firstFun = nullptr;
for (TypeId overload : *overloads)
for (TypeId overload : *overloads.parts)
{
if (const FunctionTypeVar* ftv = get<FunctionTypeVar>(overload))
{
@ -1015,12 +1014,12 @@ TypePackId Unifier::tryApplyOverloadedFunction(TypeId function, const Normalized
// TODO: better error reporting?
// The logic for error reporting overload resolution
// is currently over in TypeInfer.cpp, should we move it?
reportError(TypeError{location, GenericError{"No matching overload."}});
reportError(location, GenericError{"No matching overload."});
return singletonTypes->errorRecoveryTypePack(firstFun->retTypes);
}
else
{
reportError(TypeError{location, CannotCallNonFunction{function}});
reportError(location, CannotCallNonFunction{function});
return singletonTypes->errorRecoveryTypePack();
}
}
@ -1199,7 +1198,7 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
if (sharedState.counters.iterationLimit > 0 && sharedState.counters.iterationLimit < sharedState.counters.iterationCount)
{
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
return;
}
@ -1372,7 +1371,7 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
size_t actualSize = size(subTp);
if (ctx == CountMismatch::FunctionResult || ctx == CountMismatch::ExprListResult)
std::swap(expectedSize, actualSize);
reportError(TypeError{location, CountMismatch{expectedSize, std::nullopt, actualSize, ctx}});
reportError(location, CountMismatch{expectedSize, std::nullopt, actualSize, ctx});
while (superIter.good())
{
@ -1394,9 +1393,9 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
else
{
if (FFlag::LuauReportTypeMismatchForTypePackUnificationFailure)
reportError(TypeError{location, TypePackMismatch{subTp, superTp}});
reportError(location, TypePackMismatch{subTp, superTp});
else
reportError(TypeError{location, GenericError{"Failed to unify type packs"}});
reportError(location, GenericError{"Failed to unify type packs"});
}
}
@ -1408,7 +1407,7 @@ void Unifier::tryUnifyPrimitives(TypeId subTy, TypeId superTy)
ice("passed non primitive types to unifyPrimitives");
if (superPrim->type != subPrim->type)
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
}
void Unifier::tryUnifySingletons(TypeId subTy, TypeId superTy)
@ -1429,7 +1428,7 @@ void Unifier::tryUnifySingletons(TypeId subTy, TypeId superTy)
if (superPrim && superPrim->type == PrimitiveTypeVar::String && get<StringSingleton>(subSingleton) && variance == Covariant)
return;
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
}
void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCall)
@ -1465,21 +1464,21 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
}
else
{
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
}
}
else if (numGenerics != subFunction->generics.size())
{
numGenerics = std::min(superFunction->generics.size(), subFunction->generics.size());
reportError(TypeError{location, TypeMismatch{superTy, subTy, "different number of generic type parameters"}});
reportError(location, TypeMismatch{superTy, subTy, "different number of generic type parameters"});
}
if (numGenericPacks != subFunction->genericPacks.size())
{
numGenericPacks = std::min(superFunction->genericPacks.size(), subFunction->genericPacks.size());
reportError(TypeError{location, TypeMismatch{superTy, subTy, "different number of generic type pack parameters"}});
reportError(location, TypeMismatch{superTy, subTy, "different number of generic type pack parameters"});
}
for (size_t i = 0; i < numGenerics; i++)
@ -1506,11 +1505,10 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
if (auto e = hasUnificationTooComplex(innerState.errors))
reportError(*e);
else if (!innerState.errors.empty() && innerState.firstPackErrorPos)
reportError(
TypeError{location, TypeMismatch{superTy, subTy, format("Argument #%d type is not compatible.", *innerState.firstPackErrorPos),
innerState.errors.front()}});
reportError(location, TypeMismatch{superTy, subTy, format("Argument #%d type is not compatible.", *innerState.firstPackErrorPos),
innerState.errors.front()});
else if (!innerState.errors.empty())
reportError(TypeError{location, TypeMismatch{superTy, subTy, "", innerState.errors.front()}});
reportError(location, TypeMismatch{superTy, subTy, "", innerState.errors.front()});
innerState.ctx = CountMismatch::FunctionResult;
innerState.tryUnify_(subFunction->retTypes, superFunction->retTypes);
@ -1520,13 +1518,12 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
if (auto e = hasUnificationTooComplex(innerState.errors))
reportError(*e);
else if (!innerState.errors.empty() && size(superFunction->retTypes) == 1 && finite(superFunction->retTypes))
reportError(TypeError{location, TypeMismatch{superTy, subTy, "Return type is not compatible.", innerState.errors.front()}});
reportError(location, TypeMismatch{superTy, subTy, "Return type is not compatible.", innerState.errors.front()});
else if (!innerState.errors.empty() && innerState.firstPackErrorPos)
reportError(
TypeError{location, TypeMismatch{superTy, subTy, format("Return #%d type is not compatible.", *innerState.firstPackErrorPos),
innerState.errors.front()}});
reportError(location, TypeMismatch{superTy, subTy, format("Return #%d type is not compatible.", *innerState.firstPackErrorPos),
innerState.errors.front()});
else if (!innerState.errors.empty())
reportError(TypeError{location, TypeMismatch{superTy, subTy, "", innerState.errors.front()}});
reportError(location, TypeMismatch{superTy, subTy, "", innerState.errors.front()});
}
log.concat(std::move(innerState.log));
@ -1608,7 +1605,7 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
}
else
{
reportError(TypeError{location, UnificationTooComplex{}});
reportError(location, UnificationTooComplex{});
}
}
}
@ -1626,7 +1623,7 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
if (!missingProperties.empty())
{
reportError(TypeError{location, MissingProperties{superTy, subTy, std::move(missingProperties)}});
reportError(location, MissingProperties{superTy, subTy, std::move(missingProperties)});
return;
}
}
@ -1644,7 +1641,7 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
if (!extraProperties.empty())
{
reportError(TypeError{location, MissingProperties{superTy, subTy, std::move(extraProperties), MissingProperties::Extra}});
reportError(location, MissingProperties{superTy, subTy, std::move(extraProperties), MissingProperties::Extra});
return;
}
}
@ -1703,8 +1700,20 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
TypeId superTyNew = FFlag::LuauScalarShapeUnifyToMtOwner ? log.follow(superTy) : superTy;
TypeId subTyNew = FFlag::LuauScalarShapeUnifyToMtOwner ? log.follow(subTy) : subTy;
if (FFlag::LuauScalarShapeUnifyToMtOwner)
{
// If one of the types stopped being a table altogether, we need to restart from the top
if ((superTy != superTyNew || subTy != subTyNew) && errors.empty())
return tryUnify(subTy, superTy, false, isIntersection);
}
// Otherwise, restart only the table unification
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTyNew);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTyNew);
if (superTable != newSuperTable || (subTable != newSubTable && subTable != instantiatedSubTable))
{
if (errors.empty())
@ -1825,13 +1834,13 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
if (!missingProperties.empty())
{
reportError(TypeError{location, MissingProperties{superTy, subTy, std::move(missingProperties)}});
reportError(location, MissingProperties{superTy, subTy, std::move(missingProperties)});
return;
}
if (!extraProperties.empty())
{
reportError(TypeError{location, MissingProperties{superTy, subTy, std::move(extraProperties), MissingProperties::Extra}});
reportError(location, MissingProperties{superTy, subTy, std::move(extraProperties), MissingProperties::Extra});
return;
}
@ -1866,15 +1875,17 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
if (reversed)
std::swap(subTy, superTy);
if (auto ttv = log.get<TableTypeVar>(superTy); !ttv || ttv->state != TableState::Free)
return reportError(TypeError{location, TypeMismatch{osuperTy, osubTy}});
TableTypeVar* superTable = log.getMutable<TableTypeVar>(superTy);
if (!superTable || superTable->state != TableState::Free)
return reportError(location, TypeMismatch{osuperTy, osubTy});
auto fail = [&](std::optional<TypeError> e) {
std::string reason = "The former's metatable does not satisfy the requirements.";
if (e)
reportError(TypeError{location, TypeMismatch{osuperTy, osubTy, reason, *e}});
reportError(location, TypeMismatch{osuperTy, osubTy, reason, *e});
else
reportError(TypeError{location, TypeMismatch{osuperTy, osubTy, reason}});
reportError(location, TypeMismatch{osuperTy, osubTy, reason});
};
// Given t1 where t1 = { lower: (t1) -> (a, b...) }
@ -1891,6 +1902,20 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
Unifier child = makeChildUnifier();
child.tryUnify_(ty, superTy);
if (FFlag::LuauScalarShapeUnifyToMtOwner)
{
// To perform subtype <: free table unification, we have tried to unify (subtype's metatable) <: free table
// There is a chance that it was unified with the origial subtype, but then, (subtype's metatable) <: subtype could've failed
// Here we check if we have a new supertype instead of the original free table and try original subtype <: new supertype check
TypeId newSuperTy = child.log.follow(superTy);
if (superTy != newSuperTy && canUnify(subTy, newSuperTy).empty())
{
log.replace(superTy, BoundTypeVar{subTy});
return;
}
}
if (auto e = hasUnificationTooComplex(child.errors))
reportError(*e);
else if (!child.errors.empty())
@ -1898,6 +1923,14 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
log.concat(std::move(child.log));
if (FFlag::LuauScalarShapeUnifyToMtOwner)
{
// To perform subtype <: free table unification, we have tried to unify (subtype's metatable) <: free table
// We return success because subtype <: free table which means that correct unification is to replace free table with the subtype
if (child.errors.empty())
log.replace(superTy, BoundTypeVar{subTy});
}
return;
}
else
@ -1906,7 +1939,7 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
}
}
reportError(TypeError{location, TypeMismatch{osuperTy, osubTy}});
reportError(location, TypeMismatch{osuperTy, osubTy});
return;
}
@ -1947,7 +1980,7 @@ void Unifier::tryUnifyWithMetatable(TypeId subTy, TypeId superTy, bool reversed)
if (auto e = hasUnificationTooComplex(innerState.errors))
reportError(*e);
else if (!innerState.errors.empty())
reportError(TypeError{location, TypeMismatch{reversed ? subTy : superTy, reversed ? superTy : subTy, "", innerState.errors.front()}});
reportError(location, TypeMismatch{reversed ? subTy : superTy, reversed ? superTy : subTy, "", innerState.errors.front()});
log.concat(std::move(innerState.log));
}
@ -2024,9 +2057,9 @@ void Unifier::tryUnifyWithClass(TypeId subTy, TypeId superTy, bool reversed)
auto fail = [&]() {
if (!reversed)
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
else
reportError(TypeError{location, TypeMismatch{subTy, superTy}});
reportError(location, TypeMismatch{subTy, superTy});
};
const ClassTypeVar* superClass = get<ClassTypeVar>(superTy);
@ -2071,7 +2104,7 @@ void Unifier::tryUnifyWithClass(TypeId subTy, TypeId superTy, bool reversed)
if (!classProp)
{
ok = false;
reportError(TypeError{location, UnknownProperty{superTy, propName}});
reportError(location, UnknownProperty{superTy, propName});
}
else
{
@ -2095,7 +2128,7 @@ void Unifier::tryUnifyWithClass(TypeId subTy, TypeId superTy, bool reversed)
{
ok = false;
std::string msg = "Class " + superClass->name + " does not have an indexer";
reportError(TypeError{location, GenericError{msg}});
reportError(location, GenericError{msg});
}
if (!ok)
@ -2116,13 +2149,13 @@ void Unifier::tryUnifyTypeWithNegation(TypeId subTy, TypeId superTy)
const NormalizedType* subNorm = normalizer->normalize(subTy);
const NormalizedType* superNorm = normalizer->normalize(superTy);
if (!subNorm || !superNorm)
return reportError(TypeError{location, UnificationTooComplex{}});
return reportError(location, UnificationTooComplex{});
// T </: ~U iff T <: U
Unifier state = makeChildUnifier();
state.tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "");
if (state.errors.empty())
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
}
void Unifier::tryUnifyNegationWithType(TypeId subTy, TypeId superTy)
@ -2132,7 +2165,7 @@ void Unifier::tryUnifyNegationWithType(TypeId subTy, TypeId superTy)
ice("tryUnifyNegationWithType subTy must be a negation type");
// TODO: ~T </: U iff T <: U
reportError(TypeError{location, TypeMismatch{superTy, subTy}});
reportError(location, TypeMismatch{superTy, subTy});
}
static void queueTypePack(std::vector<TypeId>& queue, DenseHashSet<TypePackId>& seenTypePacks, Unifier& state, TypePackId a, TypePackId anyTypePack)
@ -2195,7 +2228,7 @@ void Unifier::tryUnifyVariadics(TypePackId subTp, TypePackId superTp, bool rever
}
else if (get<Unifiable::Generic>(tail))
{
reportError(TypeError{location, GenericError{"Cannot unify variadic and generic packs"}});
reportError(location, GenericError{"Cannot unify variadic and generic packs"});
}
else if (get<Unifiable::Error>(tail))
{
@ -2209,7 +2242,7 @@ void Unifier::tryUnifyVariadics(TypePackId subTp, TypePackId superTp, bool rever
}
else
{
reportError(TypeError{location, GenericError{"Failed to unify variadic packs"}});
reportError(location, GenericError{"Failed to unify variadic packs"});
}
}
@ -2351,7 +2384,7 @@ bool Unifier::occursCheck(DenseHashSet<TypeId>& seen, TypeId needle, TypeId hays
if (needle == haystack)
{
reportError(TypeError{location, OccursCheckFailed{}});
reportError(location, OccursCheckFailed{});
log.replace(needle, *singletonTypes->errorRecoveryType());
return true;
@ -2402,7 +2435,7 @@ bool Unifier::occursCheck(DenseHashSet<TypePackId>& seen, TypePackId needle, Typ
{
if (needle == haystack)
{
reportError(TypeError{location, OccursCheckFailed{}});
reportError(location, OccursCheckFailed{});
log.replace(needle, *singletonTypes->errorRecoveryTypePack());
return true;
@ -2423,18 +2456,31 @@ bool Unifier::occursCheck(DenseHashSet<TypePackId>& seen, TypePackId needle, Typ
Unifier Unifier::makeChildUnifier()
{
Unifier u = Unifier{normalizer, mode, scope, location, variance, &log};
u.anyIsTop = anyIsTop;
u.normalize = normalize;
u.useScopes = useScopes;
return u;
}
// A utility function that appends the given error to the unifier's error log.
// This allows setting a breakpoint wherever the unifier reports an error.
//
// Note: report error accepts its arguments by value intentionally to reduce the stack usage of functions which call `reportError`.
void Unifier::reportError(Location location, TypeErrorData data)
{
errors.emplace_back(std::move(location), std::move(data));
}
// A utility function that appends the given error to the unifier's error log.
// This allows setting a breakpoint wherever the unifier reports an error.
//
// Note: to conserve stack space in calling functions it is generally preferred to call `Unifier::reportError(Location location, TypeErrorData data)`
// instead of this method.
void Unifier::reportError(TypeError err)
{
errors.push_back(std::move(err));
}
bool Unifier::isNonstrictMode() const
{
return (mode == Mode::Nonstrict) || (mode == Mode::NoCheck);
@ -2445,7 +2491,7 @@ void Unifier::checkChildUnifierTypeMismatch(const ErrorVec& innerErrors, TypeId
if (auto e = hasUnificationTooComplex(innerErrors))
reportError(*e);
else if (!innerErrors.empty())
reportError(TypeError{location, TypeMismatch{wantedType, givenType}});
reportError(location, TypeMismatch{wantedType, givenType});
}
void Unifier::checkChildUnifierTypeMismatch(const ErrorVec& innerErrors, const std::string& prop, TypeId wantedType, TypeId givenType)

View file

@ -641,8 +641,8 @@ Lexeme Lexer::readInterpolatedStringSection(Position start, Lexeme::Type formatT
return brokenDoubleBrace;
}
Lexeme lexemeOutput(Location(start, position()), Lexeme::InterpStringBegin, &buffer[startOffset], offset - startOffset);
consume();
Lexeme lexemeOutput(Location(start, position()), Lexeme::InterpStringBegin, &buffer[startOffset], offset - startOffset - 1);
return lexemeOutput;
}

View file

@ -25,6 +25,9 @@ LUAU_DYNAMIC_FASTFLAGVARIABLE(LuaReportParseIntegerIssues, false)
LUAU_FASTFLAGVARIABLE(LuauInterpolatedStringBaseSupport, false)
LUAU_FASTFLAGVARIABLE(LuauCommaParenWarnings, false)
LUAU_FASTFLAGVARIABLE(LuauTableConstructorRecovery, false)
LUAU_FASTFLAGVARIABLE(LuauParserErrorsOnMissingDefaultTypePackArgument, false)
bool lua_telemetry_parsed_out_of_range_bin_integer = false;
bool lua_telemetry_parsed_out_of_range_hex_integer = false;
@ -2310,9 +2313,13 @@ AstExpr* Parser::parseTableConstructor()
MatchLexeme matchBrace = lexer.current();
expectAndConsume('{', "table literal");
unsigned lastElementIndent = 0;
while (lexer.current().type != '}')
{
if (FFlag::LuauTableConstructorRecovery)
lastElementIndent = lexer.current().location.begin.column;
if (lexer.current().type == '[')
{
MatchLexeme matchLocationBracket = lexer.current();
@ -2357,10 +2364,14 @@ AstExpr* Parser::parseTableConstructor()
{
nextLexeme();
}
else
else if (FFlag::LuauTableConstructorRecovery && (lexer.current().type == '[' || lexer.current().type == Lexeme::Name) &&
lexer.current().location.begin.column == lastElementIndent)
{
if (lexer.current().type != '}')
break;
report(lexer.current().location, "Expected ',' after table constructor element");
}
else if (lexer.current().type != '}')
{
break;
}
}
@ -2494,7 +2505,7 @@ std::pair<AstArray<AstGenericType>, AstArray<AstGenericTypePack>> Parser::parseG
namePacks.push_back({name, nameLocation, typePack});
}
else if (lexer.current().type == '(')
else if (!FFlag::LuauParserErrorsOnMissingDefaultTypePackArgument && lexer.current().type == '(')
{
auto [type, typePack] = parseTypeOrPackAnnotation();
@ -2503,6 +2514,15 @@ std::pair<AstArray<AstGenericType>, AstArray<AstGenericTypePack>> Parser::parseG
namePacks.push_back({name, nameLocation, typePack});
}
else if (FFlag::LuauParserErrorsOnMissingDefaultTypePackArgument)
{
auto [type, typePack] = parseTypeOrPackAnnotation();
if (type)
report(type->location, "Expected type pack after '=', got type");
namePacks.push_back({name, nameLocation, typePack});
}
}
else
{

View file

@ -978,7 +978,8 @@ int replMain(int argc, char** argv)
if (compileFormat == CompileFormat::Null)
printf("Compiled %d KLOC into %d KB bytecode\n", int(stats.lines / 1000), int(stats.bytecode / 1024));
else if (compileFormat == CompileFormat::CodegenNull)
printf("Compiled %d KLOC into %d KB bytecode => %d KB native code\n", int(stats.lines / 1000), int(stats.bytecode / 1024), int(stats.codegen / 1024));
printf("Compiled %d KLOC into %d KB bytecode => %d KB native code\n", int(stats.lines / 1000), int(stats.bytecode / 1024),
int(stats.codegen / 1024));
return failed ? 1 : 0;
}

View file

@ -0,0 +1,53 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterA64.h"
namespace Luau
{
namespace CodeGen
{
enum class AddressKindA64 : uint8_t
{
imm, // reg + imm
reg, // reg + reg
// TODO:
// reg + reg << shift
// reg + sext(reg) << shift
// reg + uext(reg) << shift
};
struct AddressA64
{
AddressA64(RegisterA64 base, int off = 0)
: kind(AddressKindA64::imm)
, base(base)
, offset(xzr)
, data(off)
{
LUAU_ASSERT(base.kind == KindA64::x || base == sp);
LUAU_ASSERT(off >= -256 && off < 4096);
}
AddressA64(RegisterA64 base, RegisterA64 offset)
: kind(AddressKindA64::reg)
, base(base)
, offset(offset)
, data(0)
{
LUAU_ASSERT(base.kind == KindA64::x);
LUAU_ASSERT(offset.kind == KindA64::x);
}
AddressKindA64 kind;
RegisterA64 base;
RegisterA64 offset;
int data;
};
using mem = AddressA64;
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,161 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterA64.h"
#include "Luau/AddressA64.h"
#include "Luau/ConditionA64.h"
#include "Luau/Label.h"
#include <string>
#include <vector>
namespace Luau
{
namespace CodeGen
{
class AssemblyBuilderA64
{
public:
explicit AssemblyBuilderA64(bool logText);
~AssemblyBuilderA64();
// Moves
void mov(RegisterA64 dst, RegisterA64 src);
void mov(RegisterA64 dst, uint16_t src, int shift = 0);
void movk(RegisterA64 dst, uint16_t src, int shift = 0);
// Arithmetics
void add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void add(RegisterA64 dst, RegisterA64 src1, int src2);
void sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void sub(RegisterA64 dst, RegisterA64 src1, int src2);
void neg(RegisterA64 dst, RegisterA64 src);
// Comparisons
// Note: some arithmetic instructions also have versions that update flags (ADDS etc) but we aren't using them atm
void cmp(RegisterA64 src1, RegisterA64 src2);
void cmp(RegisterA64 src1, int src2);
// Bitwise
// Note: shifted-register support and bitfield operations are omitted for simplicity
// TODO: support immediate arguments (they have odd encoding and forbid many values)
void and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void mvn(RegisterA64 dst, RegisterA64 src);
// Shifts
void lsl(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void lsr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void asr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void ror(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void clz(RegisterA64 dst, RegisterA64 src);
void rbit(RegisterA64 dst, RegisterA64 src);
// Load
// Note: paired loads are currently omitted for simplicity
void ldr(RegisterA64 dst, AddressA64 src);
void ldrb(RegisterA64 dst, AddressA64 src);
void ldrh(RegisterA64 dst, AddressA64 src);
void ldrsb(RegisterA64 dst, AddressA64 src);
void ldrsh(RegisterA64 dst, AddressA64 src);
void ldrsw(RegisterA64 dst, AddressA64 src);
// Store
void str(RegisterA64 src, AddressA64 dst);
void strb(RegisterA64 src, AddressA64 dst);
void strh(RegisterA64 src, AddressA64 dst);
// Control flow
// Note: tbz/tbnz are currently not supported because they have 15-bit offsets and we don't support branch thunks
void b(Label& label);
void b(ConditionA64 cond, Label& label);
void cbz(RegisterA64 src, Label& label);
void cbnz(RegisterA64 src, Label& label);
void br(RegisterA64 src);
void blr(RegisterA64 src);
void ret();
// Address of embedded data
void adr(RegisterA64 dst, const void* ptr, size_t size);
void adr(RegisterA64 dst, uint64_t value);
void adr(RegisterA64 dst, double value);
// Run final checks
bool finalize();
// Places a label at current location and returns it
Label setLabel();
// Assigns label position to the current location
void setLabel(Label& label);
void logAppend(const char* fmt, ...) LUAU_PRINTF_ATTR(2, 3);
uint32_t getCodeSize() const;
// Resulting data and code that need to be copied over one after the other
// The *end* of 'data' has to be aligned to 16 bytes, this will also align 'code'
std::vector<uint8_t> data;
std::vector<uint32_t> code;
std::string text;
const bool logText = false;
private:
// Instruction archetypes
void place0(const char* name, uint32_t word);
void placeSR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift = 0);
void placeSR2(const char* name, RegisterA64 dst, RegisterA64 src, uint8_t op, uint8_t op2 = 0);
void placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t op2);
void placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op);
void placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op);
void placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift = 0);
void placeA(const char* name, RegisterA64 dst, AddressA64 src, uint8_t op, uint8_t size);
void placeBC(const char* name, Label& label, uint8_t op, uint8_t cond);
void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond);
void placeBR(const char* name, RegisterA64 src, uint32_t op);
void placeADR(const char* name, RegisterA64 src, uint8_t op);
void place(uint32_t word);
void patchLabel(Label& label);
void patchImm19(uint32_t location, int value);
void commit();
LUAU_NOINLINE void extend();
// Data
size_t allocateData(size_t size, size_t align);
// Logging of assembly in text form
LUAU_NOINLINE void log(const char* opcode);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, int src2);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, int src, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, Label label);
LUAU_NOINLINE void log(Label label);
LUAU_NOINLINE void log(RegisterA64 reg);
LUAU_NOINLINE void log(AddressA64 addr);
uint32_t nextLabel = 1;
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
bool finalized = false;
bool overflowed = false;
size_t dataPos = 0;
uint32_t* codePos = nullptr;
uint32_t* codeEnd = nullptr;
};
} // namespace CodeGen
} // namespace Luau

View file

@ -2,8 +2,8 @@
#pragma once
#include "Luau/Common.h"
#include "Luau/Condition.h"
#include "Luau/Label.h"
#include "Luau/ConditionX64.h"
#include "Luau/OperandX64.h"
#include "Luau/RegisterX64.h"
@ -84,7 +84,7 @@ public:
void ret();
// Control flow
void jcc(Condition cond, Label& label);
void jcc(ConditionX64 cond, Label& label);
void jmp(Label& label);
void jmp(OperandX64 op);

View file

@ -0,0 +1,37 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
enum class ConditionA64
{
Equal,
NotEqual,
CarrySet,
CarryClear,
Minus,
Plus,
Overflow,
NoOverflow,
UnsignedGreater,
UnsignedLessEqual,
GreaterEqual,
Less,
Greater,
LessEqual,
Always,
Count
};
} // namespace CodeGen
} // namespace Luau

View file

@ -6,7 +6,7 @@ namespace Luau
namespace CodeGen
{
enum class Condition
enum class ConditionX64 : uint8_t
{
Overflow,
NoOverflow,

View file

@ -61,7 +61,7 @@ struct OperandX64
constexpr OperandX64 operator[](OperandX64&& addr) const
{
LUAU_ASSERT(cat == CategoryX64::mem);
LUAU_ASSERT(memSize != SizeX64::none && index == noreg && scale == 1 && base == noreg && imm == 0);
LUAU_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0);
LUAU_ASSERT(addr.memSize == SizeX64::none);
addr.cat = CategoryX64::mem;
@ -70,13 +70,13 @@ struct OperandX64
}
};
constexpr OperandX64 addr{SizeX64::none, noreg, 1, noreg, 0};
constexpr OperandX64 byte{SizeX64::byte, noreg, 1, noreg, 0};
constexpr OperandX64 word{SizeX64::word, noreg, 1, noreg, 0};
constexpr OperandX64 dword{SizeX64::dword, noreg, 1, noreg, 0};
constexpr OperandX64 qword{SizeX64::qword, noreg, 1, noreg, 0};
constexpr OperandX64 xmmword{SizeX64::xmmword, noreg, 1, noreg, 0};
constexpr OperandX64 ymmword{SizeX64::ymmword, noreg, 1, noreg, 0};
constexpr OperandX64 ptr{sizeof(void*) == 4 ? SizeX64::dword : SizeX64::qword, noreg, 1, noreg, 0};
constexpr OperandX64 operator*(RegisterX64 reg, uint8_t scale)
{

View file

@ -0,0 +1,105 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
enum class KindA64 : uint8_t
{
none,
w, // 32-bit GPR
x, // 64-bit GPR
};
struct RegisterA64
{
KindA64 kind : 3;
uint8_t index : 5;
constexpr bool operator==(RegisterA64 rhs) const
{
return kind == rhs.kind && index == rhs.index;
}
constexpr bool operator!=(RegisterA64 rhs) const
{
return !(*this == rhs);
}
};
constexpr RegisterA64 w0{KindA64::w, 0};
constexpr RegisterA64 w1{KindA64::w, 1};
constexpr RegisterA64 w2{KindA64::w, 2};
constexpr RegisterA64 w3{KindA64::w, 3};
constexpr RegisterA64 w4{KindA64::w, 4};
constexpr RegisterA64 w5{KindA64::w, 5};
constexpr RegisterA64 w6{KindA64::w, 6};
constexpr RegisterA64 w7{KindA64::w, 7};
constexpr RegisterA64 w8{KindA64::w, 8};
constexpr RegisterA64 w9{KindA64::w, 9};
constexpr RegisterA64 w10{KindA64::w, 10};
constexpr RegisterA64 w11{KindA64::w, 11};
constexpr RegisterA64 w12{KindA64::w, 12};
constexpr RegisterA64 w13{KindA64::w, 13};
constexpr RegisterA64 w14{KindA64::w, 14};
constexpr RegisterA64 w15{KindA64::w, 15};
constexpr RegisterA64 w16{KindA64::w, 16};
constexpr RegisterA64 w17{KindA64::w, 17};
constexpr RegisterA64 w18{KindA64::w, 18};
constexpr RegisterA64 w19{KindA64::w, 19};
constexpr RegisterA64 w20{KindA64::w, 20};
constexpr RegisterA64 w21{KindA64::w, 21};
constexpr RegisterA64 w22{KindA64::w, 22};
constexpr RegisterA64 w23{KindA64::w, 23};
constexpr RegisterA64 w24{KindA64::w, 24};
constexpr RegisterA64 w25{KindA64::w, 25};
constexpr RegisterA64 w26{KindA64::w, 26};
constexpr RegisterA64 w27{KindA64::w, 27};
constexpr RegisterA64 w28{KindA64::w, 28};
constexpr RegisterA64 w29{KindA64::w, 29};
constexpr RegisterA64 w30{KindA64::w, 30};
constexpr RegisterA64 wzr{KindA64::w, 31};
constexpr RegisterA64 x0{KindA64::x, 0};
constexpr RegisterA64 x1{KindA64::x, 1};
constexpr RegisterA64 x2{KindA64::x, 2};
constexpr RegisterA64 x3{KindA64::x, 3};
constexpr RegisterA64 x4{KindA64::x, 4};
constexpr RegisterA64 x5{KindA64::x, 5};
constexpr RegisterA64 x6{KindA64::x, 6};
constexpr RegisterA64 x7{KindA64::x, 7};
constexpr RegisterA64 x8{KindA64::x, 8};
constexpr RegisterA64 x9{KindA64::x, 9};
constexpr RegisterA64 x10{KindA64::x, 10};
constexpr RegisterA64 x11{KindA64::x, 11};
constexpr RegisterA64 x12{KindA64::x, 12};
constexpr RegisterA64 x13{KindA64::x, 13};
constexpr RegisterA64 x14{KindA64::x, 14};
constexpr RegisterA64 x15{KindA64::x, 15};
constexpr RegisterA64 x16{KindA64::x, 16};
constexpr RegisterA64 x17{KindA64::x, 17};
constexpr RegisterA64 x18{KindA64::x, 18};
constexpr RegisterA64 x19{KindA64::x, 19};
constexpr RegisterA64 x20{KindA64::x, 20};
constexpr RegisterA64 x21{KindA64::x, 21};
constexpr RegisterA64 x22{KindA64::x, 22};
constexpr RegisterA64 x23{KindA64::x, 23};
constexpr RegisterA64 x24{KindA64::x, 24};
constexpr RegisterA64 x25{KindA64::x, 25};
constexpr RegisterA64 x26{KindA64::x, 26};
constexpr RegisterA64 x27{KindA64::x, 27};
constexpr RegisterA64 x28{KindA64::x, 28};
constexpr RegisterA64 x29{KindA64::x, 29};
constexpr RegisterA64 x30{KindA64::x, 30};
constexpr RegisterA64 xzr{KindA64::x, 31};
constexpr RegisterA64 sp{KindA64::none, 31};
} // namespace CodeGen
} // namespace Luau

View file

@ -128,5 +128,10 @@ constexpr RegisterX64 dwordReg(RegisterX64 reg)
return RegisterX64{SizeX64::dword, reg.index};
}
constexpr RegisterX64 qwordReg(RegisterX64 reg)
{
return RegisterX64{SizeX64::qword, reg.index};
}
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,723 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderA64.h"
#include "ByteUtils.h"
#include <stdarg.h>
namespace Luau
{
namespace CodeGen
{
static const uint8_t codeForCondition[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14};
static_assert(sizeof(codeForCondition) / sizeof(codeForCondition[0]) == size_t(ConditionA64::Count), "all conditions have to be covered");
static const char* textForCondition[] = {
"b.eq", "b.ne", "b.cs", "b.cc", "b.mi", "b.pl", "b.vs", "b.vc", "b.hi", "b.ls", "b.ge", "b.lt", "b.gt", "b.le", "b.al"};
static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(ConditionA64::Count), "all conditions have to be covered");
const unsigned kMaxAlign = 32;
AssemblyBuilderA64::AssemblyBuilderA64(bool logText)
: logText(logText)
{
data.resize(4096);
dataPos = data.size(); // data is filled backwards
code.resize(1024);
codePos = code.data();
codeEnd = code.data() + code.size();
}
AssemblyBuilderA64::~AssemblyBuilderA64()
{
LUAU_ASSERT(finalized);
}
void AssemblyBuilderA64::mov(RegisterA64 dst, RegisterA64 src)
{
if (dst == sp || src == sp)
placeR1("mov", dst, src, 0b00'100010'0'000000000000);
else
placeSR2("mov", dst, src, 0b01'01010);
}
void AssemblyBuilderA64::mov(RegisterA64 dst, uint16_t src, int shift)
{
placeI16("mov", dst, src, 0b10'100101, shift);
}
void AssemblyBuilderA64::movk(RegisterA64 dst, uint16_t src, int shift)
{
placeI16("movk", dst, src, 0b11'100101, shift);
}
void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("add", dst, src1, src2, 0b00'01011, shift);
}
void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, int src2)
{
placeI12("add", dst, src1, src2, 0b00'10001);
}
void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("sub", dst, src1, src2, 0b10'01011, shift);
}
void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, int src2)
{
placeI12("sub", dst, src1, src2, 0b10'10001);
}
void AssemblyBuilderA64::neg(RegisterA64 dst, RegisterA64 src)
{
placeSR2("neg", dst, src, 0b10'01011);
}
void AssemblyBuilderA64::cmp(RegisterA64 src1, RegisterA64 src2)
{
RegisterA64 dst = src1.kind == KindA64::x ? xzr : wzr;
placeSR3("cmp", dst, src1, src2, 0b11'01011);
}
void AssemblyBuilderA64::cmp(RegisterA64 src1, int src2)
{
RegisterA64 dst = src1.kind == KindA64::x ? xzr : wzr;
placeI12("cmp", dst, src1, src2, 0b11'10001);
}
void AssemblyBuilderA64::and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeSR3("and", dst, src1, src2, 0b00'01010);
}
void AssemblyBuilderA64::orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeSR3("orr", dst, src1, src2, 0b01'01010);
}
void AssemblyBuilderA64::eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeSR3("eor", dst, src1, src2, 0b10'01010);
}
void AssemblyBuilderA64::mvn(RegisterA64 dst, RegisterA64 src)
{
placeSR2("mvn", dst, src, 0b01'01010, 0b1);
}
void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeR3("lsl", dst, src1, src2, 0b11010110, 0b0010'00);
}
void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeR3("lsr", dst, src1, src2, 0b11010110, 0b0010'01);
}
void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeR3("asr", dst, src1, src2, 0b11010110, 0b0010'10);
}
void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
placeR3("ror", dst, src1, src2, 0b11010110, 0b0010'11);
}
void AssemblyBuilderA64::clz(RegisterA64 dst, RegisterA64 src)
{
placeR1("clz", dst, src, 0b10'11010110'00000'00010'0);
}
void AssemblyBuilderA64::rbit(RegisterA64 dst, RegisterA64 src)
{
placeR1("rbit", dst, src, 0b10'11010110'00000'0000'00);
}
void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldr", dst, src, 0b11100001, 0b10 | uint8_t(dst.kind == KindA64::x));
}
void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrb", dst, src, 0b11100001, 0b00);
}
void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrh", dst, src, 0b11100001, 0b01);
}
void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsb", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b00);
}
void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsh", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b01);
}
void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x);
placeA("ldrsw", dst, src, 0b11100010, 0b10);
}
void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w);
placeA("str", src, dst, 0b11100000, 0b10 | uint8_t(src.kind == KindA64::x));
}
void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strb", src, dst, 0b11100000, 0b00);
}
void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strh", src, dst, 0b11100000, 0b01);
}
void AssemblyBuilderA64::b(Label& label)
{
// Note: we aren't using 'b' form since it has a 26-bit immediate which requires custom fixup logic
placeBC("b", label, 0b0101010'0, codeForCondition[int(ConditionA64::Always)]);
}
void AssemblyBuilderA64::b(ConditionA64 cond, Label& label)
{
placeBC(textForCondition[int(cond)], label, 0b0101010'0, codeForCondition[int(cond)]);
}
void AssemblyBuilderA64::cbz(RegisterA64 src, Label& label)
{
placeBCR("cbz", label, 0b011010'0, src);
}
void AssemblyBuilderA64::cbnz(RegisterA64 src, Label& label)
{
placeBCR("cbnz", label, 0b011010'1, src);
}
void AssemblyBuilderA64::br(RegisterA64 src)
{
placeBR("br", src, 0b1101011'0'0'00'11111'0000'0'0);
}
void AssemblyBuilderA64::blr(RegisterA64 src)
{
placeBR("blr", src, 0b1101011'0'0'01'11111'0000'0'0);
}
void AssemblyBuilderA64::ret()
{
place0("ret", 0b1101011'0'0'10'11111'0000'0'0'11110'00000);
}
void AssemblyBuilderA64::adr(RegisterA64 dst, const void* ptr, size_t size)
{
size_t pos = allocateData(size, 4);
uint32_t location = getCodeSize();
memcpy(&data[pos], ptr, size);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
}
void AssemblyBuilderA64::adr(RegisterA64 dst, uint64_t value)
{
size_t pos = allocateData(8, 8);
uint32_t location = getCodeSize();
writeu64(&data[pos], value);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
}
void AssemblyBuilderA64::adr(RegisterA64 dst, double value)
{
size_t pos = allocateData(8, 8);
uint32_t location = getCodeSize();
writef64(&data[pos], value);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
}
bool AssemblyBuilderA64::finalize()
{
code.resize(codePos - code.data());
// Resolve jump targets
for (Label fixup : pendingLabels)
{
// If this assertion fires, a label was used in jmp without calling setLabel
LUAU_ASSERT(labelLocations[fixup.id - 1] != ~0u);
int value = int(labelLocations[fixup.id - 1]) - int(fixup.location);
patchImm19(fixup.location, value);
}
size_t dataSize = data.size() - dataPos;
// Shrink data
if (dataSize > 0)
memmove(&data[0], &data[dataPos], dataSize);
data.resize(dataSize);
finalized = true;
return !overflowed;
}
Label AssemblyBuilderA64::setLabel()
{
Label label{nextLabel++, getCodeSize()};
labelLocations.push_back(~0u);
if (logText)
log(label);
return label;
}
void AssemblyBuilderA64::setLabel(Label& label)
{
if (label.id == 0)
{
label.id = nextLabel++;
labelLocations.push_back(~0u);
}
label.location = getCodeSize();
labelLocations[label.id - 1] = label.location;
if (logText)
log(label);
}
void AssemblyBuilderA64::logAppend(const char* fmt, ...)
{
char buf[256];
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
text.append(buf);
}
uint32_t AssemblyBuilderA64::getCodeSize() const
{
return uint32_t(codePos - code.data());
}
void AssemblyBuilderA64::place0(const char* name, uint32_t op)
{
if (logText)
log(name);
place(op);
commit();
}
void AssemblyBuilderA64::placeSR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift)
{
if (logText)
log(name, dst, src1, src2, shift);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
LUAU_ASSERT(shift >= 0 && shift < 64); // right shift requires changing some encoding bits
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
place(dst.index | (src1.index << 5) | (shift << 10) | (src2.index << 16) | (op << 24) | sf);
commit();
}
void AssemblyBuilderA64::placeSR2(const char* name, RegisterA64 dst, RegisterA64 src, uint8_t op, uint8_t op2)
{
if (logText)
log(name, dst, src);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(dst.kind == src.kind);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
place(dst.index | (0x1f << 5) | (src.index << 16) | (op2 << 21) | (op << 24) | sf);
commit();
}
void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t op2)
{
if (logText)
log(name, dst, src1, src2);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
place(dst.index | (src1.index << 5) | (op2 << 10) | (src2.index << 16) | (op << 21) | sf);
commit();
}
void AssemblyBuilderA64::placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op)
{
if (logText)
log(name, dst, src);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
LUAU_ASSERT(dst.kind == src.kind || (dst.kind == KindA64::x && src == sp) || (dst == sp && src.kind == KindA64::x));
uint32_t sf = (dst.kind != KindA64::w) ? 0x80000000 : 0;
place(dst.index | (src.index << 5) | (op << 10) | sf);
commit();
}
void AssemblyBuilderA64::placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op)
{
if (logText)
log(name, dst, src1, src2);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
LUAU_ASSERT(dst.kind == src1.kind || (dst.kind == KindA64::x && src1 == sp) || (dst == sp && src1.kind == KindA64::x));
LUAU_ASSERT(src2 >= 0 && src2 < (1 << 12));
uint32_t sf = (dst.kind != KindA64::w) ? 0x80000000 : 0;
place(dst.index | (src1.index << 5) | (src2 << 10) | (op << 24) | sf);
commit();
}
void AssemblyBuilderA64::placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift)
{
if (logText)
log(name, dst, src, shift);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(src >= 0 && src <= 0xffff);
LUAU_ASSERT(shift == 0 || shift == 16 || shift == 32 || shift == 48);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
place(dst.index | (src << 5) | ((shift >> 4) << 21) | (op << 23) | sf);
commit();
}
void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 src, uint8_t op, uint8_t size)
{
if (logText)
log(name, dst, src);
switch (src.kind)
{
case AddressKindA64::imm:
if (src.data >= 0 && src.data % (1 << size) == 0)
place(dst.index | (src.base.index << 5) | ((src.data >> size) << 10) | (op << 22) | (1 << 24) | (size << 30));
else if (src.data >= -256 && src.data <= 255)
place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (op << 22) | (size << 30));
else
LUAU_ASSERT(!"Unable to encode large immediate offset");
break;
case AddressKindA64::reg:
place(dst.index | (src.base.index << 5) | (0b10 << 10) | (0b011 << 13) | (src.offset.index << 16) | (1 << 21) | (op << 22) | (size << 30));
break;
}
commit();
}
void AssemblyBuilderA64::placeBC(const char* name, Label& label, uint8_t op, uint8_t cond)
{
place(cond | (op << 24));
commit();
patchLabel(label);
if (logText)
log(name, label);
}
void AssemblyBuilderA64::placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond)
{
LUAU_ASSERT(cond.kind == KindA64::w || cond.kind == KindA64::x);
uint32_t sf = (cond.kind == KindA64::x) ? 0x80000000 : 0;
place(cond.index | (op << 24) | sf);
commit();
patchLabel(label);
if (logText)
log(name, cond, label);
}
void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op)
{
if (logText)
log(name, src);
LUAU_ASSERT(src.kind == KindA64::x);
place((src.index << 5) | (op << 10));
commit();
}
void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op)
{
if (logText)
log(name, dst);
LUAU_ASSERT(dst.kind == KindA64::x);
place(dst.index | (op << 24));
commit();
}
void AssemblyBuilderA64::place(uint32_t word)
{
LUAU_ASSERT(codePos < codeEnd);
*codePos++ = word;
}
void AssemblyBuilderA64::patchLabel(Label& label)
{
uint32_t location = getCodeSize() - 1;
if (label.location == ~0u)
{
if (label.id == 0)
{
label.id = nextLabel++;
labelLocations.push_back(~0u);
}
pendingLabels.push_back({label.id, location});
}
else
{
int value = int(label.location) - int(location);
patchImm19(location, value);
}
}
void AssemblyBuilderA64::patchImm19(uint32_t location, int value)
{
// imm19 encoding word offset, at bit offset 5
// note that 18 bits of word offsets = 20 bits of byte offsets = +-1MB
if (value > -(1 << 18) && value < (1 << 18))
code[location] |= (value & ((1 << 19) - 1)) << 5;
else
overflowed = true;
}
void AssemblyBuilderA64::commit()
{
LUAU_ASSERT(codePos <= codeEnd);
if (codeEnd == codePos)
extend();
}
void AssemblyBuilderA64::extend()
{
uint32_t count = getCodeSize();
code.resize(code.size() * 2);
codePos = code.data() + count;
codeEnd = code.data() + code.size();
}
size_t AssemblyBuilderA64::allocateData(size_t size, size_t align)
{
LUAU_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0);
if (dataPos < size)
{
size_t oldSize = data.size();
data.resize(data.size() * 2);
memcpy(&data[oldSize], &data[0], oldSize);
memset(&data[0], 0, oldSize);
dataPos += oldSize;
}
dataPos = (dataPos - size) & ~(align - 1);
return dataPos;
}
void AssemblyBuilderA64::log(const char* opcode)
{
logAppend(" %s\n", opcode);
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
logAppend(" %-12s", opcode);
if (dst != xzr && dst != wzr)
{
log(dst);
text.append(",");
}
log(src1);
text.append(",");
log(src2);
if (shift > 0)
logAppend(" LSL #%d", shift);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 src1, int src2)
{
logAppend(" %-12s", opcode);
if (dst != xzr && dst != wzr)
{
log(dst);
text.append(",");
}
log(src1);
text.append(",");
logAppend("#%d", src2);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, AddressA64 src)
{
logAppend(" %-12s", opcode);
log(dst);
text.append(",");
log(src);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 src)
{
logAppend(" %-12s", opcode);
log(dst);
text.append(",");
log(src);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, int src, int shift)
{
logAppend(" %-12s", opcode);
log(dst);
text.append(",");
logAppend("#%d", src);
if (shift > 0)
logAppend(" LSL #%d", shift);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 src, Label label)
{
logAppend(" %-12s", opcode);
log(src);
text.append(",");
logAppend(".L%d\n", label.id);
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 src)
{
logAppend(" %-12s", opcode);
log(src);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, Label label)
{
logAppend(" %-12s.L%d\n", opcode, label.id);
}
void AssemblyBuilderA64::log(Label label)
{
logAppend(".L%d:\n", label.id);
}
void AssemblyBuilderA64::log(RegisterA64 reg)
{
switch (reg.kind)
{
case KindA64::w:
if (reg.index == 31)
text.append("wzr");
else
logAppend("w%d", reg.index);
break;
case KindA64::x:
if (reg.index == 31)
text.append("xzr");
else
logAppend("x%d", reg.index);
break;
case KindA64::none:
if (reg.index == 31)
text.append("sp");
else
LUAU_ASSERT(!"Unexpected register kind");
break;
}
}
void AssemblyBuilderA64::log(AddressA64 addr)
{
text.append("[");
switch (addr.kind)
{
case AddressKindA64::imm:
log(addr.base);
if (addr.data != 0)
logAppend(",#%d", addr.data);
break;
case AddressKindA64::reg:
log(addr.base);
text.append(",");
log(addr.offset);
if (addr.data != 0)
logAppend(" LSL #%d", addr.data);
break;
}
text.append("]");
}
} // namespace CodeGen
} // namespace Luau

View file

@ -13,13 +13,13 @@ namespace CodeGen
{
// TODO: more assertions on operand sizes
const uint8_t codeForCondition[] = {
static const uint8_t codeForCondition[] = {
0x0, 0x1, 0x2, 0x3, 0x2, 0x6, 0x7, 0x3, 0x4, 0xc, 0xe, 0xf, 0xd, 0x3, 0x7, 0x6, 0x2, 0x5, 0xd, 0xf, 0xe, 0xc, 0x4, 0x5, 0xa, 0xb};
static_assert(sizeof(codeForCondition) / sizeof(codeForCondition[0]) == size_t(Condition::Count), "all conditions have to be covered");
static_assert(sizeof(codeForCondition) / sizeof(codeForCondition[0]) == size_t(ConditionX64::Count), "all conditions have to be covered");
const char* textForCondition[] = {"jo", "jno", "jc", "jnc", "jb", "jbe", "ja", "jae", "je", "jl", "jle", "jg", "jge", "jnb", "jnbe", "jna", "jnae",
"jne", "jnl", "jnle", "jng", "jnge", "jz", "jnz", "jp", "jnp"};
static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(Condition::Count), "all conditions have to be covered");
static const char* textForCondition[] = {"jo", "jno", "jc", "jnc", "jb", "jbe", "ja", "jae", "je", "jl", "jle", "jg", "jge", "jnb", "jnbe", "jna",
"jnae", "jne", "jnl", "jnle", "jng", "jnge", "jz", "jnz", "jp", "jnp"};
static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(ConditionX64::Count), "all conditions have to be covered");
#define OP_PLUS_REG(op, reg) ((op) + (reg & 0x7))
#define OP_PLUS_CC(op, cc) ((op) + uint8_t(cc))
@ -328,7 +328,10 @@ void AssemblyBuilderX64::lea(OperandX64 lhs, OperandX64 rhs)
if (logText)
log("lea", lhs, rhs);
LUAU_ASSERT(rhs.cat == CategoryX64::mem);
LUAU_ASSERT(lhs.cat == CategoryX64::reg && rhs.cat == CategoryX64::mem && rhs.memSize == SizeX64::none);
LUAU_ASSERT(rhs.base == rip || rhs.base.size == lhs.base.size);
LUAU_ASSERT(rhs.index == noreg || rhs.index.size == lhs.base.size);
rhs.memSize = lhs.base.size;
placeBinaryRegAndRegMem(lhs, rhs, 0x8d, 0x8d);
}
@ -363,7 +366,7 @@ void AssemblyBuilderX64::ret()
commit();
}
void AssemblyBuilderX64::jcc(Condition cond, Label& label)
void AssemblyBuilderX64::jcc(ConditionX64 cond, Label& label)
{
placeJcc(textForCondition[size_t(cond)], label, codeForCondition[size_t(cond)]);
}
@ -781,7 +784,7 @@ OperandX64 AssemblyBuilderX64::bytes(const void* ptr, size_t size, size_t align)
{
size_t pos = allocateData(size, align);
memcpy(&data[pos], ptr, size);
return OperandX64(SizeX64::qword, noreg, 1, rip, int32_t(pos - data.size()));
return OperandX64(SizeX64::none, noreg, 1, rip, int32_t(pos - data.size()));
}
void AssemblyBuilderX64::logAppend(const char* fmt, ...)
@ -1072,7 +1075,7 @@ void AssemblyBuilderX64::placeVex(OperandX64 dst, OperandX64 src1, OperandX64 sr
place(AVX_3_3(setW, src1.base, dst.base.size == SizeX64::ymmword, prefix));
}
uint8_t getScaleEncoding(uint8_t scale)
static uint8_t getScaleEncoding(uint8_t scale)
{
static const uint8_t scales[9] = {0xff, 0, 1, 0xff, 2, 0xff, 0xff, 0xff, 3};

View file

@ -51,10 +51,15 @@ static void assembleHelpers(AssemblyBuilderX64& build, ModuleHelpers& helpers)
build.logAppend("; exitNoContinueVm\n");
helpers.exitNoContinueVm = build.setLabel();
emitExit(build, /* continueInVm */ false);
if (build.logText)
build.logAppend("; continueCallInVm\n");
helpers.continueCallInVm = build.setLabel();
emitContinueCallInVm(build);
}
static int emitInst(
AssemblyBuilderX64& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, LuauOpcode op, const Instruction* pc, int i, Label* labelarr, Label& fallback)
static int emitInst(AssemblyBuilderX64& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, LuauOpcode op, const Instruction* pc, int i,
Label* labelarr, Label& fallback)
{
int skip = 0;
@ -86,6 +91,9 @@ static int emitInst(
case LOP_SETGLOBAL:
emitInstSetGlobal(build, pc, i, labelarr, fallback);
break;
case LOP_CALL:
emitInstCall(build, helpers, pc, i, labelarr);
break;
case LOP_RETURN:
emitInstReturn(build, helpers, pc, i, labelarr);
break;
@ -123,19 +131,19 @@ static int emitInst(
emitInstJumpIfEq(build, pc, i, labelarr, /* not_ */ false, fallback);
break;
case LOP_JUMPIFLE:
emitInstJumpIfCond(build, pc, i, labelarr, Condition::LessEqual, fallback);
emitInstJumpIfCond(build, pc, i, labelarr, ConditionX64::LessEqual, fallback);
break;
case LOP_JUMPIFLT:
emitInstJumpIfCond(build, pc, i, labelarr, Condition::Less, fallback);
emitInstJumpIfCond(build, pc, i, labelarr, ConditionX64::Less, fallback);
break;
case LOP_JUMPIFNOTEQ:
emitInstJumpIfEq(build, pc, i, labelarr, /* not_ */ true, fallback);
break;
case LOP_JUMPIFNOTLE:
emitInstJumpIfCond(build, pc, i, labelarr, Condition::NotLessEqual, fallback);
emitInstJumpIfCond(build, pc, i, labelarr, ConditionX64::NotLessEqual, fallback);
break;
case LOP_JUMPIFNOTLT:
emitInstJumpIfCond(build, pc, i, labelarr, Condition::NotLess, fallback);
emitInstJumpIfCond(build, pc, i, labelarr, ConditionX64::NotLess, fallback);
break;
case LOP_JUMPX:
emitInstJumpX(build, pc, i, labelarr);
@ -291,19 +299,19 @@ static void emitInstFallback(AssemblyBuilderX64& build, NativeState& data, LuauO
emitInstJumpIfEqFallback(build, pc, i, labelarr, /* not_ */ false);
break;
case LOP_JUMPIFLE:
emitInstJumpIfCondFallback(build, pc, i, labelarr, Condition::LessEqual);
emitInstJumpIfCondFallback(build, pc, i, labelarr, ConditionX64::LessEqual);
break;
case LOP_JUMPIFLT:
emitInstJumpIfCondFallback(build, pc, i, labelarr, Condition::Less);
emitInstJumpIfCondFallback(build, pc, i, labelarr, ConditionX64::Less);
break;
case LOP_JUMPIFNOTEQ:
emitInstJumpIfEqFallback(build, pc, i, labelarr, /* not_ */ true);
break;
case LOP_JUMPIFNOTLE:
emitInstJumpIfCondFallback(build, pc, i, labelarr, Condition::NotLessEqual);
emitInstJumpIfCondFallback(build, pc, i, labelarr, ConditionX64::NotLessEqual);
break;
case LOP_JUMPIFNOTLT:
emitInstJumpIfCondFallback(build, pc, i, labelarr, Condition::NotLess);
emitInstJumpIfCondFallback(build, pc, i, labelarr, ConditionX64::NotLess);
break;
case LOP_ADD:
emitInstBinaryFallback(build, pc, i, TM_ADD);
@ -687,6 +695,9 @@ void compile(lua_State* L, int idx)
{
for (int i = 0; i < result->proto->sizecode; i++)
result->instTargets[i] += uintptr_t(codeStart + result->location);
LUAU_ASSERT(result->proto->sizecode);
result->entryTarget = result->instTargets[0];
}
// Link native proto objects to Proto; the memory is now managed by VM and will be freed via onDestroyFunction

View file

@ -72,5 +72,59 @@ void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc)
}
}
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults)
{
// slow-path: not a function call
if (LUAU_UNLIKELY(!ttisfunction(ra)))
{
luaV_tryfuncTM(L, ra);
argtop++; // __call adds an extra self
}
Closure* ccl = clvalue(ra);
CallInfo* ci = incr_ci(L);
ci->func = ra;
ci->base = ra + 1;
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = nresults;
L->base = ci->base;
L->top = argtop;
// note: this reallocs stack, but we don't need to VM_PROTECT this
// this is because we're going to modify base/savedpc manually anyhow
// crucially, we can't use ra/argtop after this line
luaD_checkstack(L, ccl->stacksize);
return ccl;
}
void callEpilogC(lua_State* L, int nresults, int n)
{
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
StkId res = ci->func;
StkId vali = L->top - n;
StkId valend = L->top;
int i;
for (i = nresults; i != 0 && vali < valend; i--)
setobj2s(L, res++, vali++);
while (i-- > 0)
setnilvalue(res++);
// pop the stack frame
L->ci = cip;
L->base = cip->base;
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
}
} // namespace CodeGen
} // namespace Luau

View file

@ -13,5 +13,8 @@ bool forgLoopNonTableFallback(lua_State* L, int insnA, int aux);
void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc);
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults);
void callEpilogC(lua_State* L, int nresults, int n);
} // namespace CodeGen
} // namespace Luau

View file

@ -14,15 +14,15 @@
* Each line is 8 bytes, stack grows downwards.
*
* | ... previous frames ...
* | rdx home space | (saved only on windows)
* | rcx home space | (saved only on windows)
* | rdx home space | (unused)
* | rcx home space | (unused)
* | return address |
* | ... saved non-volatile registers ...
* | ... saved non-volatile registers ... <-- rsp + kStackSize + kLocalsSize
* | unused | for 16 byte alignment of the stack
* | sCode |
* | sClosure | <-- rbp points here
* | argument 6 |
* | argument 5 |
* | sClosure | <-- rsp + kStackSize
* | argument 6 | <-- rsp + 40
* | argument 5 | <-- rsp + 32
* | r9 home space |
* | r8 home space |
* | rdx home space |
@ -48,28 +48,18 @@ bool initEntryFunction(NativeState& data)
unwind.start();
if (build.abi == ABIX64::Windows)
{
// Place arguments in home space
build.mov(qword[rsp + 16], rArg2);
unwind.spill(16, rArg2);
build.mov(qword[rsp + 8], rArg1);
unwind.spill(8, rArg1);
// Save non-volatile registers that are specific to Windows x64 ABI
build.push(rdi);
unwind.save(rdi);
build.push(rsi);
unwind.save(rsi);
// Once we start using non-volatile SIMD registers, we will save those here
}
// Save common non-volatile registers
build.push(rbx);
unwind.save(rbx);
build.push(rbp);
unwind.save(rbp);
if (build.abi == ABIX64::SystemV)
{
build.mov(rbp, rsp);
unwind.setupFrameReg(rbp, 0);
}
build.push(rbx);
unwind.save(rbx);
build.push(r12);
unwind.save(r12);
build.push(r13);
@ -79,16 +69,20 @@ bool initEntryFunction(NativeState& data)
build.push(r15);
unwind.save(r15);
int stacksize = 32 + 16; // 4 home locations for registers, 16 bytes for additional function call arguments
int localssize = 24; // 3 local pointers that also correctly align the stack
if (build.abi == ABIX64::Windows)
{
// Save non-volatile registers that are specific to Windows x64 ABI
build.push(rdi);
unwind.save(rdi);
build.push(rsi);
unwind.save(rsi);
// TODO: once we start using non-volatile SIMD registers on Windows, we will save those here
}
// Allocate stack space (reg home area + local data)
build.sub(rsp, stacksize + localssize);
unwind.allocStack(stacksize + localssize);
// Setup frame pointer
build.lea(rbp, qword[rsp + stacksize]);
unwind.setupFrameReg(rbp, stacksize);
build.sub(rsp, kStackSize + kLocalsSize);
unwind.allocStack(kStackSize + kLocalsSize);
unwind.finish();
@ -113,13 +107,7 @@ bool initEntryFunction(NativeState& data)
Label returnOff = build.setLabel();
// Cleanup and exit
build.lea(rsp, qword[rbp + localssize]);
build.pop(r15);
build.pop(r14);
build.pop(r13);
build.pop(r12);
build.pop(rbp);
build.pop(rbx);
build.add(rsp, kStackSize + kLocalsSize);
if (build.abi == ABIX64::Windows)
{
@ -127,6 +115,12 @@ bool initEntryFunction(NativeState& data)
build.pop(rdi);
}
build.pop(r15);
build.pop(r14);
build.pop(r13);
build.pop(r12);
build.pop(rbx);
build.pop(rbp);
build.ret();
build.finalize();

View file

@ -14,7 +14,7 @@ namespace Luau
namespace CodeGen
{
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, Condition cond, Label& label)
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, ConditionX64 cond, Label& label)
{
// Refresher on comi/ucomi EFLAGS:
// CF only: less
@ -35,53 +35,54 @@ void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs,
// And because of NaN, integer check interchangeability like 'not less or equal' <-> 'greater' does not hold
switch (cond)
{
case Condition::NotLessEqual:
case ConditionX64::NotLessEqual:
// (b < a) is the same as !(a <= b). jnae checks CF=1 which means < or NaN
build.jcc(Condition::NotAboveEqual, label);
build.jcc(ConditionX64::NotAboveEqual, label);
break;
case Condition::LessEqual:
case ConditionX64::LessEqual:
// (b >= a) is the same as (a <= b). jae checks CF=0 which means >= and not NaN
build.jcc(Condition::AboveEqual, label);
build.jcc(ConditionX64::AboveEqual, label);
break;
case Condition::NotLess:
case ConditionX64::NotLess:
// (b <= a) is the same as !(a < b). jna checks CF=1 or ZF=1 which means <= or NaN
build.jcc(Condition::NotAbove, label);
build.jcc(ConditionX64::NotAbove, label);
break;
case Condition::Less:
case ConditionX64::Less:
// (b > a) is the same as (a < b). ja checks CF=0 and ZF=0 which means > and not NaN
build.jcc(Condition::Above, label);
build.jcc(ConditionX64::Above, label);
break;
case Condition::NotEqual:
case ConditionX64::NotEqual:
// ZF=0 or PF=1 means != or NaN
build.jcc(Condition::NotZero, label);
build.jcc(Condition::Parity, label);
build.jcc(ConditionX64::NotZero, label);
build.jcc(ConditionX64::Parity, label);
break;
default:
LUAU_ASSERT(!"Unsupported condition");
}
}
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, Condition cond, Label& label, int pcpos)
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, ConditionX64 cond, Label& label, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
build.lea(rArg2, luauRegAddress(ra));
build.lea(rArg3, luauRegAddress(rb));
if (cond == Condition::NotLessEqual || cond == Condition::LessEqual)
if (cond == ConditionX64::NotLessEqual || cond == ConditionX64::LessEqual)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessequal)]);
else if (cond == Condition::NotLess || cond == Condition::Less)
else if (cond == ConditionX64::NotLess || cond == ConditionX64::Less)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessthan)]);
else if (cond == Condition::NotEqual || cond == Condition::Equal)
else if (cond == ConditionX64::NotEqual || cond == ConditionX64::Equal)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
else
LUAU_ASSERT(!"Unsupported condition");
emitUpdateBase(build);
build.test(eax, eax);
build.jcc(
cond == Condition::NotLessEqual || cond == Condition::NotLess || cond == Condition::NotEqual ? Condition::Zero : Condition::NotZero, label);
build.jcc(cond == ConditionX64::NotLessEqual || cond == ConditionX64::NotLess || cond == ConditionX64::NotEqual ? ConditionX64::Zero
: ConditionX64::NotZero,
label);
}
RegisterX64 getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 table, int pcpos)
@ -120,7 +121,7 @@ void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, Regi
build.vucomisd(tmp, numd); // Sets ZF=1 if equal or NaN
// We don't need non-integer values
// But to skip the PF=1 check, we proceed with NaN because 0x80000000 index is out of bounds
build.jcc(Condition::NotZero, label);
build.jcc(ConditionX64::NotZero, label);
}
void callArithHelper(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, int pcpos, TMS tm)
@ -133,8 +134,8 @@ void callArithHelper(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, in
build.mov(rArg5, tm);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
build.lea(rArg2, luauRegAddress(ra));
build.lea(rArg3, luauRegAddress(rb));
build.lea(rArg4, c);
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_doarith)]);
@ -146,8 +147,8 @@ void callLengthHelper(AssemblyBuilderX64& build, int ra, int rb, int pcpos)
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
build.lea(rArg2, luauRegAddress(ra));
build.lea(rArg3, luauRegAddress(rb));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_dolen)]);
emitUpdateBase(build);
@ -158,9 +159,9 @@ void callPrepareForN(AssemblyBuilderX64& build, int limit, int step, int init, i
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(limit));
build.lea(rArg3, luauRegValue(step));
build.lea(rArg4, luauRegValue(init));
build.lea(rArg2, luauRegAddress(limit));
build.lea(rArg3, luauRegAddress(step));
build.lea(rArg4, luauRegAddress(init));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_prepareFORN)]);
}
@ -169,9 +170,9 @@ void callGetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int p
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(rb));
build.lea(rArg2, luauRegAddress(rb));
build.lea(rArg3, c);
build.lea(rArg4, luauRegValue(ra));
build.lea(rArg4, luauRegAddress(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_gettable)]);
emitUpdateBase(build);
@ -182,9 +183,9 @@ void callSetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int p
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(rb));
build.lea(rArg2, luauRegAddress(rb));
build.lea(rArg3, c);
build.lea(rArg4, luauRegValue(ra));
build.lea(rArg4, luauRegAddress(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_settable)]);
emitUpdateBase(build);
@ -197,16 +198,16 @@ static void callBarrierImpl(AssemblyBuilderX64& build, RegisterX64 tmp, Register
// iscollectable(ra)
build.cmp(luauRegTag(ra), LUA_TSTRING);
build.jcc(Condition::Less, skip);
build.jcc(ConditionX64::Less, skip);
// isblack(obj2gco(o))
build.test(byte[object + offsetof(GCheader, marked)], bitmask(BLACKBIT));
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
// iswhite(gcvalue(ra))
build.mov(tmp, luauRegValue(ra));
build.test(byte[tmp + offsetof(GCheader, marked)], bit2mask(WHITE0BIT, WHITE1BIT));
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
LUAU_ASSERT(object != rArg3);
build.mov(rArg3, tmp);
@ -229,12 +230,12 @@ void callBarrierTableFast(AssemblyBuilderX64& build, RegisterX64 table, Label& s
{
// isblack(obj2gco(t))
build.test(byte[table + offsetof(GCheader, marked)], bitmask(BLACKBIT));
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
// Argument setup re-ordered to avoid conflicts with table register
if (table != rArg2)
build.mov(rArg2, table);
build.lea(rArg3, qword[rArg2 + offsetof(Table, gclist)]);
build.lea(rArg3, addr[rArg2 + offsetof(Table, gclist)]);
build.mov(rArg1, rState);
build.call(qword[rNativeContext + offsetof(NativeContext, luaC_barrierback)]);
}
@ -244,13 +245,13 @@ void callCheckGc(AssemblyBuilderX64& build, int pcpos, bool savepc, Label& skip)
build.mov(rax, qword[rState + offsetof(lua_State, global)]);
build.mov(rdx, qword[rax + offsetof(global_State, totalbytes)]);
build.cmp(rdx, qword[rax + offsetof(global_State, GCthreshold)]);
build.jcc(Condition::Below, skip);
build.jcc(ConditionX64::Below, skip);
if (savepc)
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.mov(rArg2, 1);
build.mov(dwordReg(rArg2), 1);
build.call(qword[rNativeContext + offsetof(NativeContext, luaC_step)]);
emitUpdateBase(build);
@ -288,7 +289,7 @@ void emitInterrupt(AssemblyBuilderX64& build, int pcpos)
build.mov(r8, qword[rState + offsetof(lua_State, global)]);
build.mov(r8, qword[r8 + offsetof(global_State, cb.interrupt)]);
build.test(r8, r8);
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
emitSetSavedPc(build, pcpos + 1); // uses rax/rdx
@ -301,7 +302,7 @@ void emitInterrupt(AssemblyBuilderX64& build, int pcpos)
// Check if we need to exit
build.mov(al, byte[rState + offsetof(lua_State, status)]);
build.test(al, al);
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.sub(qword[rax + offsetof(CallInfo, savedpc)], sizeof(Instruction));
@ -329,17 +330,6 @@ void emitFallback(AssemblyBuilderX64& build, NativeState& data, int op, int pcpo
build.mov(rArg4, rConstants);
build.call(qword[rNativeContext + offsetof(NativeContext, fallback) + op * sizeof(NativeFallback) + offsetof(NativeFallback, fallback)]);
// Some instructions may interrupt the execution
if (opinfo.flags & kFallbackCheckInterrupt)
{
Label skip;
build.test(rax, rax);
build.jcc(Condition::NotZero, skip);
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
}
emitUpdateBase(build);
// Some instructions may jump to a different instruction or a completely different function
@ -359,50 +349,17 @@ void emitFallback(AssemblyBuilderX64& build, NativeState& data, int op, int pcpo
build.mov(rcx, qword[rdx + offsetof(NativeProto, instTargets)]);
build.jmp(qword[rax * 2 + rcx]);
}
else if (opinfo.flags & kFallbackUpdateCi)
{
// Need to update state of the current function before we jump away
build.mov(rcx, qword[rState + offsetof(lua_State, ci)]); // L->ci
build.mov(rcx, qword[rcx + offsetof(CallInfo, func)]); // L->ci->func
build.mov(rcx, qword[rcx + offsetof(TValue, value.gc)]); // L->ci->func->value.gc aka cl
build.mov(sClosure, rcx);
build.mov(rsi, qword[rcx + offsetof(Closure, l.p)]); // cl->l.p aka proto
build.mov(rConstants, qword[rsi + offsetof(Proto, k)]); // proto->k
build.mov(rcx, qword[rsi + offsetof(Proto, code)]); // proto->code
build.mov(sCode, rcx);
}
// We'll need original instruction pointer later to handle return to interpreter
if (op == LOP_CALL)
build.mov(r9, rax);
void emitContinueCallInVm(AssemblyBuilderX64& build)
{
RegisterX64 proto = rcx; // Sync with emitInstCall
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 8 back to byte offset later so it cancels out
build.sub(rax, sCode);
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], rdx);
// We need to check if the new function can be executed natively
Label returnToInterpreter;
build.mov(rdx, qword[rsi + offsetofProtoExecData]);
build.test(rdx, rdx);
build.jcc(Condition::Zero, returnToInterpreter);
// Get new instruction location and jump to it
build.mov(rcx, qword[rdx + offsetof(NativeProto, instTargets)]);
build.jmp(qword[rax * 2 + rcx]);
build.setLabel(returnToInterpreter);
// If we are returning to the interpreter to make a call, we need to update the current instruction
if (op == LOP_CALL)
{
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], r9);
}
// Continue in the interpreter
emitExit(build, /* continueInVm */ true);
}
emitExit(build, /* continueInVm */ true);
}
} // namespace CodeGen

View file

@ -32,8 +32,12 @@ constexpr RegisterX64 rNativeContext = r13; // NativeContext* context
constexpr RegisterX64 rConstants = r12; // TValue* k
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
constexpr OperandX64 sClosure = qword[rbp + 0]; // Closure* cl
constexpr OperandX64 sCode = qword[rbp + 8]; // Instruction* code
// See CodeGenX64.cpp for layout
constexpr unsigned kStackSize = 32 + 16; // 4 home locations for registers, 16 bytes for additional function call arguments
constexpr unsigned kLocalsSize = 24; // 3 extra slots for our custom locals (also aligns the stack to 16 byte boundary)
constexpr OperandX64 sClosure = qword[rsp + kStackSize + 0]; // Closure* cl
constexpr OperandX64 sCode = qword[rsp + kStackSize + 8]; // Instruction* code
// TODO: These should be replaced with a portable call function that checks the ABI at runtime and reorders moves accordingly to avoid conflicts
#if defined(_WIN32)
@ -72,6 +76,7 @@ struct ModuleHelpers
{
Label exitContinueVm;
Label exitNoContinueVm;
Label continueCallInVm;
};
inline OperandX64 luauReg(int ri)
@ -79,6 +84,11 @@ inline OperandX64 luauReg(int ri)
return xmmword[rBase + ri * sizeof(TValue)];
}
inline OperandX64 luauRegAddress(int ri)
{
return addr[rBase + ri * sizeof(TValue)];
}
inline OperandX64 luauRegValue(int ri)
{
return qword[rBase + ri * sizeof(TValue) + offsetof(TValue, value)];
@ -99,6 +109,11 @@ inline OperandX64 luauConstant(int ki)
return xmmword[rConstants + ki * sizeof(TValue)];
}
inline OperandX64 luauConstantAddress(int ki)
{
return addr[rConstants + ki * sizeof(TValue)];
}
inline OperandX64 luauConstantTag(int ki)
{
return dword[rConstants + ki * sizeof(TValue) + offsetof(TValue, tt)];
@ -144,13 +159,13 @@ inline void setNodeValue(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64
inline void jumpIfTagIs(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(Condition::Equal, label);
build.jcc(ConditionX64::Equal, label);
}
inline void jumpIfTagIsNot(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(Condition::NotEqual, label);
build.jcc(ConditionX64::NotEqual, label);
}
// Note: fallthrough label should be placed after this condition
@ -160,7 +175,7 @@ inline void jumpIfFalsy(AssemblyBuilderX64& build, int ri, Label& target, Label&
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, fallthrough); // true if not nil or boolean
build.cmp(luauRegValueBoolean(ri), 0);
build.jcc(Condition::Equal, target); // true if boolean value is 'true'
build.jcc(ConditionX64::Equal, target); // true if boolean value is 'true'
}
// Note: fallthrough label should be placed after this condition
@ -170,13 +185,13 @@ inline void jumpIfTruthy(AssemblyBuilderX64& build, int ri, Label& target, Label
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, target); // true if not nil or boolean
build.cmp(luauRegValueBoolean(ri), 0);
build.jcc(Condition::NotEqual, target); // true if boolean value is 'true'
build.jcc(ConditionX64::NotEqual, target); // true if boolean value is 'true'
}
inline void jumpIfMetatablePresent(AssemblyBuilderX64& build, RegisterX64 table, Label& target)
{
build.cmp(qword[table + offsetof(Table, metatable)], 0);
build.jcc(Condition::NotEqual, target);
build.jcc(ConditionX64::NotEqual, target);
}
inline void jumpIfUnsafeEnv(AssemblyBuilderX64& build, RegisterX64 tmp, Label& label)
@ -184,13 +199,13 @@ inline void jumpIfUnsafeEnv(AssemblyBuilderX64& build, RegisterX64 tmp, Label& l
build.mov(tmp, sClosure);
build.mov(tmp, qword[tmp + offsetof(Closure, env)]);
build.test(byte[tmp + offsetof(Table, safeenv)], 1);
build.jcc(Condition::Zero, label); // Not a safe environment
build.jcc(ConditionX64::Zero, label); // Not a safe environment
}
inline void jumpIfTableIsReadOnly(AssemblyBuilderX64& build, RegisterX64 table, Label& label)
{
build.cmp(byte[table + offsetof(Table, readonly)], 0);
build.jcc(Condition::NotEqual, label);
build.jcc(ConditionX64::NotEqual, label);
}
inline void jumpIfNodeKeyTagIsNot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, lua_Type tag, Label& label)
@ -200,13 +215,13 @@ inline void jumpIfNodeKeyTagIsNot(AssemblyBuilderX64& build, RegisterX64 tmp, Re
build.mov(tmp, luauNodeKeyTag(node));
build.and_(tmp, kLuaNodeTagMask);
build.cmp(tmp, tag);
build.jcc(Condition::NotEqual, label);
build.jcc(ConditionX64::NotEqual, label);
}
inline void jumpIfNodeValueTagIs(AssemblyBuilderX64& build, RegisterX64 node, lua_Type tag, Label& label)
{
build.cmp(dword[node + offsetof(LuaNode, val) + offsetof(TValue, tt)], tag);
build.jcc(Condition::Equal, label);
build.jcc(ConditionX64::Equal, label);
}
inline void jumpIfNodeKeyNotInExpectedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, OperandX64 expectedKey, Label& label)
@ -215,13 +230,13 @@ inline void jumpIfNodeKeyNotInExpectedSlot(AssemblyBuilderX64& build, RegisterX6
build.mov(tmp, expectedKey);
build.cmp(tmp, luauNodeKeyValue(node));
build.jcc(Condition::NotEqual, label);
build.jcc(ConditionX64::NotEqual, label);
jumpIfNodeValueTagIs(build, node, LUA_TNIL, label);
}
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, Condition cond, Label& label);
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, Condition cond, Label& label, int pcpos);
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, ConditionX64 cond, Label& label);
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, ConditionX64 cond, Label& label, int pcpos);
RegisterX64 getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 table, int pcpos);
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, int ri, Label& label);
@ -242,5 +257,8 @@ void emitSetSavedPc(AssemblyBuilderX64& build, int pcpos); // Note: only uses ra
void emitInterrupt(AssemblyBuilderX64& build, int pcpos);
void emitFallback(AssemblyBuilderX64& build, NativeState& data, int op, int pcpos);
void emitContinueCallInVm(AssemblyBuilderX64& build);
void emitExitFromLastReturn(AssemblyBuilderX64& build);
} // namespace CodeGen
} // namespace Luau

View file

@ -11,9 +11,6 @@
#include "lobject.h"
#include "ltm.h"
// TODO: all uses of luauRegValue and luauConstantValue need to be audited; some need to be changed to luauReg/ConstantAddress (doesn't exist yet)
// (the problem with existing use is that it includes additional offsetof(TValue, value) which happens to be 0 but isn't guaranteed to be)
namespace Luau
{
namespace CodeGen
@ -72,6 +69,159 @@ void emitInstMove(AssemblyBuilderX64& build, const Instruction* pc)
build.vmovups(luauReg(ra), xmm0);
}
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
int nparams = LUAU_INSN_B(*pc) - 1;
int nresults = LUAU_INSN_C(*pc) - 1;
emitInterrupt(build, pcpos);
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegAddress(ra));
if (nparams == LUA_MULTRET)
build.mov(rArg3, qword[rState + offsetof(lua_State, top)]);
else
build.lea(rArg3, luauRegAddress(ra + 1 + nparams));
build.mov(dwordReg(rArg4), nresults);
build.call(qword[rNativeContext + offsetof(NativeContext, callProlog)]);
RegisterX64 ccl = rax; // Returned from callProlog
emitUpdateBase(build);
Label cFuncCall;
build.test(byte[ccl + offsetof(Closure, isC)], 1);
build.jcc(ConditionX64::NotZero, cFuncCall);
{
RegisterX64 proto = rcx; // Sync with emitContinueCallInVm
RegisterX64 ci = rdx;
RegisterX64 argi = rsi;
RegisterX64 argend = rdi;
build.mov(proto, qword[ccl + offsetof(Closure, l.p)]);
// Switch current Closure
build.mov(sClosure, ccl); // Last use of 'ccl'
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
Label fillnil, exitfillnil;
// argi = L->top
build.mov(argi, qword[rState + offsetof(lua_State, top)]);
// argend = L->base + p->numparams
build.movzx(eax, byte[proto + offsetof(Proto, numparams)]);
build.shl(eax, kTValueSizeLog2);
build.lea(argend, addr[rBase + rax]);
// while (argi < argend) setnilvalue(argi++);
build.setLabel(fillnil);
build.cmp(argi, argend);
build.jcc(ConditionX64::NotBelow, exitfillnil);
build.mov(dword[argi + offsetof(TValue, tt)], LUA_TNIL);
build.add(argi, sizeof(TValue));
build.jmp(fillnil); // This loop rarely runs so it's not worth repeating cmp/jcc
build.setLabel(exitfillnil);
// Set L->top to ci->top as most function expect (no vararg)
build.mov(rax, qword[ci + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
build.mov(rax, qword[proto + offsetofProtoExecData]); // We'll need this value later
// But if it is vararg, update it to 'argi'
Label skipVararg;
build.test(byte[proto + offsetof(Proto, is_vararg)], 1);
build.jcc(ConditionX64::Zero, skipVararg);
build.mov(qword[rState + offsetof(lua_State, top)], argi);
build.setLabel(skipVararg);
// Check native function data
build.test(rax, rax);
build.jcc(ConditionX64::Zero, helpers.continueCallInVm);
// Switch current constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
// Switch current code
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(sCode, rdx);
build.jmp(qword[rax + offsetof(NativeProto, entryTarget)]);
}
build.setLabel(cFuncCall);
{
// results = ccl->c.f(L);
build.mov(rArg1, rState);
build.call(qword[ccl + offsetof(Closure, c.f)]); // Last use of 'ccl'
RegisterX64 results = eax;
build.test(results, results); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, helpers.exitNoContinueVm); // jl jumps if SF != OF
// We have special handling for small number of expected results below
if (nresults != 0 && nresults != 1)
{
build.mov(rArg1, rState);
build.mov(dwordReg(rArg2), nresults);
build.mov(dwordReg(rArg3), results);
build.call(qword[rNativeContext + offsetof(NativeContext, callEpilogC)]);
emitUpdateBase(build);
return;
}
RegisterX64 ci = rdx;
RegisterX64 cip = rcx;
RegisterX64 vali = rsi;
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// L->base = cip->base
build.mov(rBase, qword[cip + offsetof(CallInfo, base)]);
build.mov(qword[rState + offsetof(lua_State, base)], rBase);
if (nresults == 1)
{
// Opportunistically copy the result we expected from (L->top - results)
build.mov(vali, qword[rState + offsetof(lua_State, top)]);
build.shl(results, kTValueSizeLog2);
build.sub(vali, qwordReg(results));
build.vmovups(xmm0, xmmword[vali]);
build.vmovups(luauReg(ra), xmm0);
Label skipnil;
// If there was no result, override the value with 'nil'
build.test(results, results);
build.jcc(ConditionX64::NotZero, skipnil);
build.mov(luauRegTag(ra), LUA_TNIL);
build.setLabel(skipnil);
}
// L->ci = cip
build.mov(qword[rState + offsetof(lua_State, ci)], cip);
// L->top = cip->top
build.mov(rax, qword[cip + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
}
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInterrupt(build, pcpos);
@ -85,7 +235,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
RegisterX64 nresults = esi;
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
build.lea(cip, qword[ci - sizeof(CallInfo)]);
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// res = ci->func; note: we assume CALL always puts func+args and expects results to start at func
build.mov(res, qword[ci + offsetof(CallInfo, func)]);
@ -100,8 +250,8 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
if (b == 0)
{
// Our instruction doesn't have any results, so just fill results expected in parent with 'nil'
build.test(nresults, nresults); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(Condition::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
build.test(nresults, nresults); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
build.mov(counter, nresults);
@ -109,29 +259,29 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(Condition::NotZero, repeatNilLoop);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
}
else if (b == 1)
{
// Try setting our 1 result
build.test(nresults, nresults);
build.jcc(Condition::Zero, skipResultCopy);
build.jcc(ConditionX64::Zero, skipResultCopy);
build.lea(counter, dword[nresults - 1]);
build.lea(counter, addr[nresults - 1]);
build.vmovups(xmm0, luauReg(ra));
build.vmovups(xmmword[res], xmm0);
build.add(res, sizeof(TValue));
// Fill the rest of the expected results with 'nil'
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(Condition::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(Condition::NotZero, repeatNilLoop);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
}
else
{
@ -140,16 +290,16 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
// Copy return values into parent stack (but only up to nresults!)
build.test(nresults, nresults);
build.jcc(Condition::Zero, skipResultCopy);
build.jcc(ConditionX64::Zero, skipResultCopy);
// vali = ra
build.lea(vali, luauRegValue(ra));
build.lea(vali, luauRegAddress(ra));
// Copy as much as possible for MULTRET calls, and only as much as needed otherwise
if (b == LUA_MULTRET)
build.mov(valend, qword[rState + offsetof(lua_State, top)]); // valend = L->top
else
build.lea(valend, luauRegValue(ra + b)); // valend = ra + b
build.lea(valend, luauRegAddress(ra + b)); // valend = ra + b
build.mov(counter, nresults);
@ -157,26 +307,26 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.setLabel(repeatValueLoop);
build.cmp(vali, valend);
build.jcc(Condition::NotBelow, exitValueLoop);
build.jcc(ConditionX64::NotBelow, exitValueLoop);
build.vmovups(xmm0, xmmword[vali]);
build.vmovups(xmmword[res], xmm0);
build.add(vali, sizeof(TValue));
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(Condition::NotZero, repeatValueLoop);
build.jcc(ConditionX64::NotZero, repeatValueLoop);
build.setLabel(exitValueLoop);
// Fill the rest of the expected results with 'nil'
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(Condition::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(Condition::NotZero, repeatNilLoop);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
}
build.setLabel(skipResultCopy);
@ -191,11 +341,11 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
// Unlikely, but this might be the last return from VM
build.test(byte[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_RETURN);
build.jcc(Condition::NotZero, helpers.exitNoContinueVm);
build.jcc(ConditionX64::NotZero, helpers.exitNoContinueVm);
Label skipFixedRetTop;
build.test(nresults, nresults); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(Condition::Less, skipFixedRetTop); // jl jumps if SF != OF
build.test(nresults, nresults); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, skipFixedRetTop); // jl jumps if SF != OF
build.mov(rax, qword[cip + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax); // L->top = cip->top
build.setLabel(skipFixedRetTop);
@ -214,7 +364,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.mov(execdata, qword[proto + offsetofProtoExecData]);
build.test(execdata, execdata);
build.jcc(Condition::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
// Change constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
@ -269,13 +419,13 @@ void emitInstJumpIfEq(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
build.mov(eax, luauRegTag(ra));
build.cmp(eax, luauRegTag(rb));
build.jcc(Condition::NotEqual, not_ ? target : exit);
build.jcc(ConditionX64::NotEqual, not_ ? target : exit);
// fast-path: number
build.cmp(eax, LUA_TNUMBER);
build.jcc(Condition::NotEqual, fallback);
build.jcc(ConditionX64::NotEqual, fallback);
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), luauRegValue(rb), Condition::NotEqual, not_ ? target : exit);
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), luauRegValue(rb), ConditionX64::NotEqual, not_ ? target : exit);
if (!not_)
build.jmp(target);
@ -285,10 +435,10 @@ void emitInstJumpIfEqFallback(AssemblyBuilderX64& build, const Instruction* pc,
{
Label& target = labelarr[pcpos + 1 + LUAU_INSN_D(*pc)];
jumpOnAnyCmpFallback(build, LUAU_INSN_A(*pc), pc[1], not_ ? Condition::NotEqual : Condition::Equal, target, pcpos);
jumpOnAnyCmpFallback(build, LUAU_INSN_A(*pc), pc[1], not_ ? ConditionX64::NotEqual : ConditionX64::Equal, target, pcpos);
}
void emitInstJumpIfCond(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Condition cond, Label& fallback)
void emitInstJumpIfCond(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, ConditionX64 cond, Label& fallback)
{
int ra = LUAU_INSN_A(*pc);
int rb = pc[1];
@ -302,7 +452,7 @@ void emitInstJumpIfCond(AssemblyBuilderX64& build, const Instruction* pc, int pc
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), luauRegValue(rb), cond, target);
}
void emitInstJumpIfCondFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Condition cond)
void emitInstJumpIfCondFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, ConditionX64 cond)
{
Label& target = labelarr[pcpos + 1 + LUAU_INSN_D(*pc)];
@ -324,7 +474,7 @@ void emitInstJumpxEqNil(AssemblyBuilderX64& build, const Instruction* pc, int pc
Label& target = labelarr[pcpos + 1 + LUAU_INSN_D(*pc)];
build.cmp(luauRegTag(ra), LUA_TNIL);
build.jcc(not_ ? Condition::NotEqual : Condition::Equal, target);
build.jcc(not_ ? ConditionX64::NotEqual : ConditionX64::Equal, target);
}
void emitInstJumpxEqB(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
@ -339,7 +489,7 @@ void emitInstJumpxEqB(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
jumpIfTagIsNot(build, ra, LUA_TBOOLEAN, not_ ? target : exit);
build.test(luauRegValueBoolean(ra), 1);
build.jcc((aux & 0x1) ^ not_ ? Condition::NotZero : Condition::Zero, target);
build.jcc((aux & 0x1) ^ not_ ? ConditionX64::NotZero : ConditionX64::Zero, target);
}
void emitInstJumpxEqN(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
@ -356,15 +506,15 @@ void emitInstJumpxEqN(AssemblyBuilderX64& build, const Instruction* pc, const TV
if (not_)
{
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), build.f64(kv.value.n), Condition::NotEqual, target);
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), build.f64(kv.value.n), ConditionX64::NotEqual, target);
}
else
{
// Compact equality check requires two labels, so it's not supported in generic 'jumpOnNumberCmp'
build.vmovsd(xmm0, luauRegValue(ra));
build.vucomisd(xmm0, build.f64(kv.value.n));
build.jcc(Condition::Parity, exit); // We first have to check PF=1 for NaN operands, because it also sets ZF=1
build.jcc(Condition::Zero, target); // Now that NaN is out of the way, we can check ZF=1 for equality
build.jcc(ConditionX64::Parity, exit); // We first have to check PF=1 for NaN operands, because it also sets ZF=1
build.jcc(ConditionX64::Zero, target); // Now that NaN is out of the way, we can check ZF=1 for equality
}
}
@ -381,7 +531,7 @@ void emitInstJumpxEqS(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
build.mov(rax, luauRegValue(ra));
build.cmp(rax, luauConstantValue(aux & 0xffffff));
build.jcc(not_ ? Condition::NotEqual : Condition::Equal, target);
build.jcc(not_ ? ConditionX64::NotEqual : ConditionX64::Equal, target);
}
static void emitInstBinaryNumeric(AssemblyBuilderX64& build, int ra, int rb, int rc, OperandX64 opc, int pcpos, TMS tm, Label& fallback)
@ -437,7 +587,7 @@ void emitInstBinary(AssemblyBuilderX64& build, const Instruction* pc, int pcpos,
void emitInstBinaryFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, TMS tm)
{
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, tm);
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauRegAddress(LUAU_INSN_C(*pc)), pcpos, tm);
}
void emitInstBinaryK(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, TMS tm, Label& fallback)
@ -447,7 +597,7 @@ void emitInstBinaryK(AssemblyBuilderX64& build, const Instruction* pc, int pcpos
void emitInstBinaryKFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, TMS tm)
{
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstantValue(LUAU_INSN_C(*pc)), pcpos, tm);
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstantAddress(LUAU_INSN_C(*pc)), pcpos, tm);
}
void emitInstPowK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos, Label& fallback)
@ -525,7 +675,7 @@ void emitInstMinus(AssemblyBuilderX64& build, const Instruction* pc, int pcpos,
void emitInstMinusFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauRegValue(LUAU_INSN_B(*pc)), pcpos, TM_UNM);
callArithHelper(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauRegAddress(LUAU_INSN_B(*pc)), pcpos, TM_UNM);
}
void emitInstLength(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label& fallback)
@ -563,8 +713,8 @@ void emitInstNewTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.mov(rArg2, aux);
build.mov(rArg3, b == 0 ? 0 : 1 << (b - 1));
build.mov(dwordReg(rArg2), aux);
build.mov(dwordReg(rArg3), 1 << (b - 1));
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_new)]);
build.mov(luauRegValue(ra), rax);
build.mov(luauRegTag(ra), LUA_TTABLE);
@ -610,7 +760,7 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, int pcpos
// c = L->top - rb
build.mov(cscaled, qword[rState + offsetof(lua_State, top)]);
build.lea(tmp, luauRegValue(rb));
build.lea(tmp, luauRegAddress(rb));
build.sub(cscaled, tmp); // Using byte difference
// L->top = L->ci->top
@ -633,7 +783,7 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, int pcpos
// Resize if h->sizearray < last
build.cmp(dword[table + offsetof(Table, sizearray)], last);
build.jcc(Condition::NotBelow, skipResize);
build.jcc(ConditionX64::NotBelow, skipResize);
// Argument setup reordered to avoid conflicts
LUAU_ASSERT(rArg3 != table);
@ -676,7 +826,7 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, int pcpos
if (c == LUA_MULTRET)
{
build.cmp(offset, limit);
build.jcc(Condition::NotBelow, endLoop);
build.jcc(ConditionX64::NotBelow, endLoop);
}
build.setLabel(repeatLoop);
@ -687,7 +837,7 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, int pcpos
build.add(offset, sizeof(TValue));
build.cmp(offset, limit);
build.jcc(Condition::Below, repeatLoop);
build.jcc(ConditionX64::Below, repeatLoop);
build.setLabel(endLoop);
}
@ -707,7 +857,7 @@ void emitInstGetUpval(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
Label skip;
// TODO: jumpIfTagIsNot can be generalized to take OperandX64 and then we can use it here; let's wait until we see this more though
build.cmp(dword[rax + offsetof(TValue, tt)], LUA_TUPVAL);
build.jcc(Condition::NotEqual, skip);
build.jcc(ConditionX64::NotEqual, skip);
// UpVal.v points to the value (either on stack, or on heap inside each UpVal, but we can deref it unconditionally)
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]);
@ -746,12 +896,12 @@ void emitInstCloseUpvals(AssemblyBuilderX64& build, const Instruction* pc, int p
// L->openupval != 0
build.mov(rax, qword[rState + offsetof(lua_State, openupval)]);
build.test(rax, rax);
build.jcc(Condition::Zero, skip);
build.jcc(ConditionX64::Zero, skip);
// ra <= L->openuval->v
build.lea(rcx, qword[rBase + ra * sizeof(TValue)]);
build.lea(rcx, addr[rBase + ra * sizeof(TValue)]);
build.cmp(rcx, qword[rax + offsetof(UpVal, v)]);
build.jcc(Condition::Above, skip);
build.jcc(ConditionX64::Above, skip);
build.mov(rArg2, rcx);
build.mov(rArg1, rState);
@ -771,7 +921,7 @@ static int emitInstFastCallN(AssemblyBuilderX64& build, const Instruction* pc, b
int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1;
int nresults = LUAU_INSN_C(call) - 1;
int arg = customParams ? LUAU_INSN_B(*pc) : ra + 1;
OperandX64 args = customParams ? customArgs : luauRegValue(ra + 2);
OperandX64 args = customParams ? customArgs : luauRegAddress(ra + 2);
Label& exit = labelarr[pcpos + instLen];
@ -784,7 +934,7 @@ static int emitInstFastCallN(AssemblyBuilderX64& build, const Instruction* pc, b
if (nresults == LUA_MULTRET)
{
// L->top = ra + n;
build.lea(rax, qword[rBase + (ra + br.actualResultCount) * sizeof(TValue)]);
build.lea(rax, addr[rBase + (ra + br.actualResultCount) * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
else if (nparams == LUA_MULTRET)
@ -822,7 +972,7 @@ static int emitInstFastCallN(AssemblyBuilderX64& build, const Instruction* pc, b
// TODO: for SystemV ABI we can compute the result directly into rArg6
// L->top - (ra + 1)
build.mov(rcx, qword[rState + offsetof(lua_State, top)]);
build.lea(rdx, qword[rBase + (ra + 1) * sizeof(TValue)]);
build.lea(rdx, addr[rBase + (ra + 1) * sizeof(TValue)]);
build.sub(rcx, rdx);
build.shr(rcx, kTValueSizeLog2);
@ -840,20 +990,20 @@ static int emitInstFastCallN(AssemblyBuilderX64& build, const Instruction* pc, b
}
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(arg));
build.mov(rArg4, nresults);
build.lea(rArg2, luauRegAddress(ra));
build.lea(rArg3, luauRegAddress(arg));
build.mov(dwordReg(rArg4), nresults);
build.call(rax);
build.test(eax, eax); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(Condition::Less, exit); // jl jumps if SF != OF
build.test(eax, eax); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, exit); // jl jumps if SF != OF
if (nresults == LUA_MULTRET)
{
// L->top = ra + n;
build.shl(rax, kTValueSizeLog2);
build.lea(rax, qword[rBase + rax + ra * sizeof(TValue)]);
build.lea(rax, addr[rBase + rax + ra * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
else if (nparams == LUA_MULTRET)
@ -875,13 +1025,13 @@ int emitInstFastCall1(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
int emitInstFastCall2(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
return emitInstFastCallN(
build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauRegValue(pc[1]), pcpos, /* instLen */ 2, labelarr);
build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauRegAddress(pc[1]), pcpos, /* instLen */ 2, labelarr);
}
int emitInstFastCall2K(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
return emitInstFastCallN(
build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauConstantValue(pc[1]), pcpos, /* instLen */ 2, labelarr);
build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauConstantAddress(pc[1]), pcpos, /* instLen */ 2, labelarr);
}
int emitInstFastCall(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
@ -916,16 +1066,16 @@ void emitInstForNPrep(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
Label reverse;
// step <= 0
jumpOnNumberCmp(build, noreg, step, zero, Condition::LessEqual, reverse);
jumpOnNumberCmp(build, noreg, step, zero, ConditionX64::LessEqual, reverse);
// TODO: target branches can probably be arranged better, but we need tests for NaN behavior preservation
// false: idx <= limit
jumpOnNumberCmp(build, noreg, idx, limit, Condition::LessEqual, exit);
jumpOnNumberCmp(build, noreg, idx, limit, ConditionX64::LessEqual, exit);
build.jmp(loopExit);
// true: limit <= idx
build.setLabel(reverse);
jumpOnNumberCmp(build, noreg, limit, idx, Condition::LessEqual, exit);
jumpOnNumberCmp(build, noreg, limit, idx, ConditionX64::LessEqual, exit);
build.jmp(loopExit);
// TOOD: place at the end of the function
@ -958,15 +1108,15 @@ void emitInstForNLoop(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
Label reverse, exit;
// step <= 0
jumpOnNumberCmp(build, noreg, step, zero, Condition::LessEqual, reverse);
jumpOnNumberCmp(build, noreg, step, zero, ConditionX64::LessEqual, reverse);
// false: idx <= limit
jumpOnNumberCmp(build, noreg, idx, limit, Condition::LessEqual, loopRepeat);
jumpOnNumberCmp(build, noreg, idx, limit, ConditionX64::LessEqual, loopRepeat);
build.jmp(exit);
// true: limit <= idx
build.setLabel(reverse);
jumpOnNumberCmp(build, noreg, limit, idx, Condition::LessEqual, loopRepeat);
jumpOnNumberCmp(build, noreg, limit, idx, ConditionX64::LessEqual, loopRepeat);
build.setLabel(exit);
}
@ -1010,13 +1160,13 @@ void emitinstForGLoop(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
// while (unsigned(index) < unsigned(sizearray))
Label arrayLoop = build.setLabel();
build.cmp(dwordReg(index), dword[table + offsetof(Table, sizearray)]);
build.jcc(Condition::NotBelow, isIpairsIter ? exit : skipArray);
build.jcc(ConditionX64::NotBelow, isIpairsIter ? exit : skipArray);
// If element is nil, we increment the index; if it's not, we still need 'index + 1' inside
build.inc(index);
build.cmp(dword[elemPtr + offsetof(TValue, tt)], LUA_TNIL);
build.jcc(Condition::Equal, isIpairsIter ? exit : skipArrayNil);
build.jcc(ConditionX64::Equal, isIpairsIter ? exit : skipArrayNil);
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
build.mov(luauRegValue(ra + 2), index);
@ -1045,10 +1195,10 @@ void emitinstForGLoop(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
// Call helper to assign next node value or to signal loop exit
build.mov(rArg1, rState);
// rArg2 and rArg3 are already set
build.lea(rArg4, luauRegValue(ra));
build.lea(rArg4, luauRegAddress(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, forgLoopNodeIter)]);
build.test(al, al);
build.jcc(Condition::NotZero, loopRepeat);
build.jcc(ConditionX64::NotZero, loopRepeat);
}
}
@ -1062,12 +1212,12 @@ void emitinstForGLoopFallback(AssemblyBuilderX64& build, const Instruction* pc,
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.mov(rArg2, ra);
build.mov(rArg3, aux);
build.mov(dwordReg(rArg2), ra);
build.mov(dwordReg(rArg3), aux);
build.call(qword[rNativeContext + offsetof(NativeContext, forgLoopNonTableFallback)]);
emitUpdateBase(build);
build.test(al, al);
build.jcc(Condition::NotZero, loopRepeat);
build.jcc(ConditionX64::NotZero, loopRepeat);
}
void emitInstForGPrepNext(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Label& fallback)
@ -1103,7 +1253,7 @@ void emitInstForGPrepInext(AssemblyBuilderX64& build, const Instruction* pc, int
build.vxorpd(xmm0, xmm0, xmm0);
build.vmovsd(xmm1, luauRegValue(ra + 2));
jumpOnNumberCmp(build, noreg, xmm0, xmm1, Condition::NotEqual, fallback);
jumpOnNumberCmp(build, noreg, xmm0, xmm1, ConditionX64::NotEqual, fallback);
build.mov(luauRegTag(ra), LUA_TNIL);
@ -1121,8 +1271,8 @@ void emitInstForGPrepXnextFallback(AssemblyBuilderX64& build, const Instruction*
Label& target = labelarr[pcpos + 1 + LUAU_INSN_D(*pc)];
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.mov(rArg3, pcpos + 1);
build.lea(rArg2, luauRegAddress(ra));
build.mov(dwordReg(rArg3), pcpos + 1);
build.call(qword[rNativeContext + offsetof(NativeContext, forgPrepXnextFallback)]);
build.jmp(target);
}
@ -1216,7 +1366,7 @@ void emitInstGetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcp
// unsigned(c) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], c);
build.jcc(Condition::BelowEqual, fallback);
build.jcc(ConditionX64::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
@ -1244,7 +1394,7 @@ void emitInstSetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcp
// unsigned(c) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], c);
build.jcc(Condition::BelowEqual, fallback);
build.jcc(ConditionX64::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
jumpIfTableIsReadOnly(build, table, fallback);
@ -1284,7 +1434,7 @@ void emitInstGetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
// unsigned(index - 1) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], eax);
build.jcc(Condition::BelowEqual, fallback);
build.jcc(ConditionX64::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
@ -1296,7 +1446,7 @@ void emitInstGetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
void emitInstGetTableFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
callGetTable(build, LUAU_INSN_B(*pc), luauRegValue(LUAU_INSN_C(*pc)), LUAU_INSN_A(*pc), pcpos);
callGetTable(build, LUAU_INSN_B(*pc), luauRegAddress(LUAU_INSN_C(*pc)), LUAU_INSN_A(*pc), pcpos);
}
void emitInstSetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Label& fallback)
@ -1319,7 +1469,7 @@ void emitInstSetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
// unsigned(index - 1) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], eax);
build.jcc(Condition::BelowEqual, fallback);
build.jcc(ConditionX64::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
jumpIfTableIsReadOnly(build, table, fallback);
@ -1335,7 +1485,7 @@ void emitInstSetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpo
void emitInstSetTableFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
callSetTable(build, LUAU_INSN_B(*pc), luauRegValue(LUAU_INSN_C(*pc)), LUAU_INSN_A(*pc), pcpos);
callSetTable(build, LUAU_INSN_B(*pc), luauRegAddress(LUAU_INSN_C(*pc)), LUAU_INSN_A(*pc), pcpos);
}
void emitInstGetImport(AssemblyBuilderX64& build, const Instruction* pc, Label& fallback)
@ -1348,7 +1498,7 @@ void emitInstGetImport(AssemblyBuilderX64& build, const Instruction* pc, Label&
// note: if import failed, k[] is nil; we could check this during codegen, but we instead use runtime fallback
// this allows us to handle ahead-of-time codegen smoothly when an import fails to resolve at runtime
build.cmp(luauConstantTag(k), LUA_TNIL);
build.jcc(Condition::Equal, fallback);
build.jcc(ConditionX64::Equal, fallback);
build.vmovups(xmm0, luauConstant(k));
build.vmovups(luauReg(ra), xmm0);
@ -1367,7 +1517,7 @@ void emitInstGetImportFallback(AssemblyBuilderX64& build, const Instruction* pc,
build.mov(rArg1, rState);
build.mov(rArg2, qword[rax + offsetof(Closure, env)]);
build.mov(rArg3, rConstants);
build.mov(rArg4, aux);
build.mov(dwordReg(rArg4), aux);
if (build.abi == ABIX64::Windows)
build.mov(sArg5, 0);
@ -1470,8 +1620,8 @@ void emitInstConcat(AssemblyBuilderX64& build, const Instruction* pc, int pcpos,
// luaV_concat(L, c - b + 1, c)
build.mov(rArg1, rState);
build.mov(rArg2, rc - rb + 1);
build.mov(rArg3, rc);
build.mov(dwordReg(rArg2), rc - rb + 1);
build.mov(dwordReg(rArg3), rc);
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_concat)]);
emitUpdateBase(build);

View file

@ -14,7 +14,7 @@ namespace CodeGen
{
class AssemblyBuilderX64;
enum class Condition;
enum class ConditionX64 : uint8_t;
struct Label;
struct ModuleHelpers;
@ -24,14 +24,15 @@ void emitInstLoadN(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstLoadK(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstLoadKX(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstMove(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJump(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpBack(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpIf(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, bool not_);
void emitInstJumpIfEq(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, bool not_, Label& fallback);
void emitInstJumpIfEqFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, bool not_);
void emitInstJumpIfCond(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Condition cond, Label& fallback);
void emitInstJumpIfCondFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, Condition cond);
void emitInstJumpIfCond(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, ConditionX64 cond, Label& fallback);
void emitInstJumpIfCondFallback(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr, ConditionX64 cond);
void emitInstJumpX(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpxEqNil(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpxEqB(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);

File diff suppressed because it is too large Load diff

View file

@ -10,84 +10,15 @@ typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
typedef TValue* StkId;
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);

View file

@ -35,10 +35,9 @@ NativeState::~NativeState() = default;
void initFallbackTable(NativeState& data)
{
// TODO: lvmexecute_split.py could be taught to generate a subset of instructions we actually need
// When fallback is completely removed, remove it from includeInsts list in lvmexecute_split.py
CODEGEN_SET_FALLBACK(LOP_NEWCLOSURE, 0);
CODEGEN_SET_FALLBACK(LOP_NAMECALL, 0);
CODEGEN_SET_FALLBACK(LOP_CALL, kFallbackUpdateCi | kFallbackCheckInterrupt);
CODEGEN_SET_FALLBACK(LOP_FORGPREP, kFallbackUpdatePc);
CODEGEN_SET_FALLBACK(LOP_GETVARARGS, 0);
CODEGEN_SET_FALLBACK(LOP_DUPCLOSURE, 0);
@ -86,6 +85,8 @@ void initHelperFunctions(NativeState& data)
data.context.forgLoopNodeIter = forgLoopNodeIter;
data.context.forgLoopNonTableFallback = forgLoopNonTableFallback;
data.context.forgPrepXnextFallback = forgPrepXnextFallback;
data.context.callProlog = callProlog;
data.context.callEpilogC = callEpilogC;
}
} // namespace CodeGen

View file

@ -26,8 +26,6 @@ class UnwindBuilder;
using FallbackFn = const Instruction*(lua_State* L, const Instruction* pc, StkId base, TValue* k);
constexpr uint8_t kFallbackUpdatePc = 1 << 0;
constexpr uint8_t kFallbackUpdateCi = 1 << 1;
constexpr uint8_t kFallbackCheckInterrupt = 1 << 2;
struct NativeFallback
{
@ -37,7 +35,8 @@ struct NativeFallback
struct NativeProto
{
uintptr_t* instTargets = nullptr;
uintptr_t entryTarget = 0;
uintptr_t* instTargets = nullptr; // TODO: NativeProto should be variable-size with all target embedded
Proto* proto = nullptr;
uint32_t location = 0;
@ -85,6 +84,8 @@ struct NativeContext
bool (*forgLoopNodeIter)(lua_State* L, Table* h, int index, TValue* ra) = nullptr;
bool (*forgLoopNonTableFallback)(lua_State* L, int insnA, int aux) = nullptr;
void (*forgPrepXnextFallback)(lua_State* L, TValue* ra, int pc) = nullptr;
Closure* (*callProlog)(lua_State* L, TValue* ra, StkId argtop, int nresults) = nullptr;
void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr;
};
struct NativeState

View file

@ -13,11 +13,10 @@
// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf [System V Application Binary Interface (AMD64 Architecture Processor Supplement)]
// Interaction between Dwarf2 and System V ABI can be found in sections '3.6.2 DWARF Register Number Mapping' and '4.2.4 EH_FRAME sections'
// Call frame instruction opcodes
// Call frame instruction opcodes (Dwarf2, page 78, ch. 7.23 figure 37)
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
@ -33,17 +32,11 @@
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_nop 0x00
#define DW_CFA_lo_user 0x1c
#define DW_CFA_hi_user 0x3f
// Register numbers for x64
// Register numbers for x64 (System V ABI, page 57, ch. 3.7, figure 3.36)
#define DW_REG_RAX 0
#define DW_REG_RDX 1
#define DW_REG_RCX 2
@ -197,7 +190,12 @@ void UnwindBuilderDwarf2::allocStack(int size)
void UnwindBuilderDwarf2::setupFrameReg(RegisterX64 reg, int espOffset)
{
// Not required for unwinding
if (espOffset != 0)
pos = advanceLocation(pos, 5); // REX.W lea rbp, [rsp + imm8]
else
pos = advanceLocation(pos, 3); // REX.W mov rbp, rsp
// Cfa is based on rsp, so no additonal commands are required
}
void UnwindBuilderDwarf2::finish()

View file

@ -77,7 +77,11 @@ void UnwindBuilderWin::setupFrameReg(RegisterX64 reg, int espOffset)
frameReg = reg;
frameRegOffset = uint8_t(espOffset / 16);
prologSize += 5; // REX.W lea rbp, [rsp + imm8]
if (espOffset != 0)
prologSize += 5; // REX.W lea rbp, [rsp + imm8]
else
prologSize += 3; // REX.W mov rbp, rsp
unwindCodes.push_back({prologSize, UWOP_SET_FPREG, frameRegOffset});
}

View file

@ -13,6 +13,8 @@ inline bool isFlagExperimental(const char* flag)
static const char* kList[] = {
"LuauInterpolatedStringBaseSupport",
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
"LuauOptionalNextKey", // waiting for a fix to land in lua-apps
"LuauTryhardAnd", // waiting for a fix in graphql-lua -> apollo-client-lia -> lua-apps
// makes sure we always have at least one entry
nullptr,
};

View file

@ -55,18 +55,23 @@ target_sources(Luau.Compiler PRIVATE
# Luau.CodeGen Sources
target_sources(Luau.CodeGen PRIVATE
CodeGen/include/Luau/AddressA64.h
CodeGen/include/Luau/AssemblyBuilderA64.h
CodeGen/include/Luau/AssemblyBuilderX64.h
CodeGen/include/Luau/CodeAllocator.h
CodeGen/include/Luau/CodeBlockUnwind.h
CodeGen/include/Luau/CodeGen.h
CodeGen/include/Luau/Condition.h
CodeGen/include/Luau/ConditionA64.h
CodeGen/include/Luau/ConditionX64.h
CodeGen/include/Luau/Label.h
CodeGen/include/Luau/OperandX64.h
CodeGen/include/Luau/RegisterA64.h
CodeGen/include/Luau/RegisterX64.h
CodeGen/include/Luau/UnwindBuilder.h
CodeGen/include/Luau/UnwindBuilderDwarf2.h
CodeGen/include/Luau/UnwindBuilderWin.h
CodeGen/src/AssemblyBuilderA64.cpp
CodeGen/src/AssemblyBuilderX64.cpp
CodeGen/src/CodeAllocator.cpp
CodeGen/src/CodeBlockUnwind.cpp
@ -103,6 +108,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/BuiltinDefinitions.h
Analysis/include/Luau/Clone.h
Analysis/include/Luau/Config.h
Analysis/include/Luau/Connective.h
Analysis/include/Luau/Constraint.h
Analysis/include/Luau/ConstraintGraphBuilder.h
Analysis/include/Luau/ConstraintSolver.h
@ -156,6 +162,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/BuiltinDefinitions.cpp
Analysis/src/Clone.cpp
Analysis/src/Config.cpp
Analysis/src/Connective.cpp
Analysis/src/Constraint.cpp
Analysis/src/ConstraintGraphBuilder.cpp
Analysis/src/ConstraintSolver.cpp
@ -299,6 +306,7 @@ if(TARGET Luau.UnitTest)
tests/AstQueryDsl.cpp
tests/ConstraintGraphBuilderFixture.cpp
tests/Fixture.cpp
tests/AssemblyBuilderA64.test.cpp
tests/AssemblyBuilderX64.test.cpp
tests/AstJsonEncoder.test.cpp
tests/AstQuery.test.cpp

View file

@ -20,14 +20,6 @@
#define LUAU_FASTMATH_END
#endif
// Some functions like floor/ceil have SSE4.1 equivalents but we currently support systems without SSE4.1
// On newer GCC and Clang we can use function multi-versioning to generate SSE4.1 code plus CPUID based dispatch.
#if !defined(__APPLE__) && (defined(__x86_64__) || defined(_M_X64)) && ((defined(__clang__) && __clang_major__ >= 14) || (defined(__GNUC__) && __GNUC__ >= 6)) && !defined(__SSE4_1__)
#define LUAU_DISPATCH_SSE41 __attribute__((target_clones("default", "sse4.1")))
#else
#define LUAU_DISPATCH_SSE41
#endif
// Used on functions that have a printf-like interface to validate them statically
#if defined(__GNUC__)
#define LUA_PRINTF_ATTR(fmt, arg) __attribute__((format(printf, fmt, arg)))

View file

@ -96,7 +96,6 @@ static int luauF_atan(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
}
LUAU_FASTMATH_BEGIN
LUAU_DISPATCH_SSE41
static int luauF_ceil(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1 && ttisnumber(arg0))
@ -160,7 +159,6 @@ static int luauF_exp(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
}
LUAU_FASTMATH_BEGIN
LUAU_DISPATCH_SSE41
static int luauF_floor(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1 && ttisnumber(arg0))
@ -938,7 +936,6 @@ static int luauF_sign(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
}
LUAU_FASTMATH_BEGIN
LUAU_DISPATCH_SSE41
static int luauF_round(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1 && ttisnumber(arg0))
@ -1326,9 +1323,9 @@ const luau_FastFunction luauF_table[256] = {
luauF_getmetatable,
luauF_setmetatable,
// When adding builtins, add them above this line; what follows is 64 "dummy" entries with luauF_missing fallback.
// This is important so that older versions of the runtime that don't support newer builtins automatically fall back via luauF_missing.
// Given the builtin addition velocity this should always provide a larger compatibility window than bytecode versions suggest.
// When adding builtins, add them above this line; what follows is 64 "dummy" entries with luauF_missing fallback.
// This is important so that older versions of the runtime that don't support newer builtins automatically fall back via luauF_missing.
// Given the builtin addition velocity this should always provide a larger compatibility window than bytecode versions suggest.
#define MISSING8 luauF_missing, luauF_missing, luauF_missing, luauF_missing, luauF_missing, luauF_missing, luauF_missing, luauF_missing
MISSING8,

View file

@ -34,7 +34,6 @@ inline bool luai_vecisnan(const float* a)
}
LUAU_FASTMATH_BEGIN
// TODO: LUAU_DISPATCH_SSE41 would be nice here, but clang-14 doesn't support it correctly on inline functions...
inline double luai_nummod(double a, double b)
{
return a - floor(a / b) * b;

View file

@ -0,0 +1,82 @@
---
layout: single
title: "Luau Recap: September & October 2022"
---
Luau is our new language that you can read more about at [https://luau-lang.org](https://luau-lang.org).
[Cross-posted to the [Roblox Developer Forum](https://devforum.roblox.com/t/luau-recap-september-october-2022/).]
## Semantic subtyping
One of the most important goals for Luau is to avoid *false
positives*, that is cases where Script Analysis reports a type error,
but in fact the code is correct. This is very frustrating, especially
for beginners. Spending time chasing down a gnarly type error only to
discover that it was the type system that's wrong is nobody's idea of fun!
We are pleased to announce that a major component of minimizing false
positives has landed, *semantic subtyping*, which removes a class of false positives caused
by failures of subtyping. For example, in the program
```lua
local x : CFrame = CFrame.new()
local y : Vector3 | CFrame
if (math.random()) then
y = CFrame.new()
else
y = Vector3.new()
end
local z : Vector3 | CFrame = x * y -- Type Error!
```
an error is reported, even though there is no problem at runtime. This
is because `CFrame`'s multiplication has two overloads:
```lua
((CFrame, CFrame) -> CFrame)
& ((CFrame, Vector3) -> Vector3)
```
The current syntax-driven algorithm for subtyping is not sophisticated
enough to realize that this is a subtype of the desired type:
```lua
(CFrame, Vector3 | CFrame) -> (Vector3 | CFrame)
```
Our new algorithm is driven by the semantics of subtyping, not the syntax of types,
and eliminates this class of false positives.
If you want to know more about semantic subtyping in Luau, check out our
[technical blog post](https://luau-lang.org/2022/10/31/luau-semantic-subtyping.html)
on the subject.
## Other analysis improvements
* Improve stringification of function types.
* Improve parse error warnings in the case of missing tokens after a comma.
* Improve typechecking of expressions involving variadics such as `{ ... }`.
* Make sure modules don't return unbound generic types.
* Improve cycle detection in stringifying types.
* Improve type inference of combinations of intersections and generic functions.
* Improve typechecking when calling a function which returns a variadic e.g. `() -> (number...)`.
* Improve typechecking when passing a function expression as a parameter to a function.
* Improve error reporting locations.
* Remove some sources of memory corruption and crashes.
## Other runtime and debugger improvements
* Improve performance of accessing debug info.
* Improve performance of `getmetatable` and `setmetatable`.
* Remove a source of freezes in the debugger.
* Improve GC accuracy and performance.
## Thanks
Thanks for all the contributions!
* [AllanJeremy](https://github.com/AllanJeremy)
* [JohnnyMorganz](https://github.com/JohnnyMorganz)
* [jujhar16](https://github.com/jujhar16)
* [petrihakkinen](https://github.com/petrihakkinen)

View file

@ -0,0 +1,104 @@
---
layout: single
title: "Luau origins and evolution"
author: Arseny Kapoulkine
---
At the heart of Roblox technology lies [Luau](https://luau-lang.org), a scripting language derived from Lua 5.1 that is being [developed](https://github.com/Roblox/luau) by an internal team of programming language experts with the help of open source contributors.
It powers all user-generated content on Roblox, providing access to a very rich set of APIs that allows manipulation of objects in the 3D world, backend API access, UI interaction and more. Hundreds of thousands of developers write code in Luau every month, with top experiences using hundreds of thousands of lines of code, adding up to hundreds of millions of lines of code across the platform. For many of them, it is the first programming language they learn, and one they spend the majority of their time programming in. Using a set of extended APIs developers also customize their workflows by writing plugins to Roblox Studio, where they work on their experiences, using an extended API surface to interact with all aspects of the editor.
It also powers a lot of application code that Roblox engineers are writing: Universal App, the gateway to the worlds of Roblox that is used by tens of millions of people every day, has 95% of its functionality implemented in Luau, and Roblox Studio has a lot of builtin critical functionality such as part and terrain editors, marketplace browser, avatar and animation editors, material manager and more, implemented in Luau as a plugin, mostly using the same APIs that developers have access to. Every week, updates to this internal codebase that is now over 2 million lines large, are shipped to all Roblox users.
In addition to Roblox use cases, Luau is also open-source and is seeing an increased adoption in other projects and applications.
But why did we use Lua in the first place, and why did we decide to pursue building a new language on top of it?
# Early beginnings
Around 2006, when a very early version of the Roblox platform was developed, the question of user generated behaviors emerged. Before that, users were able to build non-interactive content on Roblox, and the only form of interaction was physics simulation. While this provided rich emergent behavior, it was hard to build gameplay on top of this: for example, to build a Capture The Flag game, you need to handle collision between players and flags spread throughout the map with a bit of logic that dictates how to adjust team points and when to remove or recreate the objects.
After an early and brief misstep when we decided to add a few gameplay objects to the core definition of Roblox worlds (some developers may recognize FlagStand as a class name...), the Roblox co-founder Erik Cassel realized that an approach like this is fundamentally limiting the power of user generated content. Its not enough to give creators the basic blocks on top of which to build their creations, its critical to expose the power of a full Turing-complete programming language. Without this, the expressive capability and the reach of the platform would have been restricted far too much.
But which programming language to choose? This is where [Lua](https://lua.org/), which was, and still is, one of the dominant programming languages used in video games, comes in.
In addition to its simplicity, which made the language easy to learn and get productive in, Lua was the fastest scripting language compared to popular alternatives like Python or JavaScript at the time[^1], designed to be embedded which meant an easy ability to expose APIs from the host application to the scripts as well as high degree of execution control from the host, and implemented coroutines, a very powerful concurrency primitive that allowed to easily and intuitively script behaviors for independent actors in game using linear control flow.
Instead of having a large standard library, the expectation was that the embedding application would define a set of APIs that that application needed, as well as establish policies of running the code - which gave us a lot of freedom in how to structure the APIs and when the scripts would get triggered during the simulation of a single frame.
# Power of simplicity
Lua is a simple language. What does simplicity mean for us?
Being a simple language means having a small set of features. Lua has all the fundamental features but doesnt have a lot of syntax sugar - this means the language is easier to teach and learn, and you rarely run into code thats difficult to understand syntactically because it uses an unfamiliar construct. Of course, this also means that some programs in Lua are longer than equivalent programs in languages that have more dedicated constructs to solve specific problems, such as list comprehensions in Python.
Being a simple language means having a minimal set of rules for every feature. Lua does deviate from this in certain respects (which is to say, the language could have been even simpler!), but notably for a dynamic language the behavior of fundamental operators is generally easy to explain and unsurprising - for example, two values in Lua are equal iff they have the same type and the same value, as such `0 == “0”` is `false`; as another example, `for` loops introduce unique variable bindings on every iteration, as such capturing the iteration variable in a closure produces unique values. These decisions lead to more concise and efficient implementation and eliminate a class of bugs in programs.
Being a simple language means having a small implementation. This may be immaterial to people writing code in the language, but it leads to an implementation that can be of higher quality; simpler implementations can also be easier to optimize for memory or performance, and are easier to build upon.
Developers on the Roblox platform have very diverse programming backgrounds. Some are writing their first line of code in Roblox Studio, while others have computer science degrees and experience working in multiple different programming languages. While its always possible to support two different programming languages that target different segments of the audience, that fragments the ecosystem and makes the programming story less consistent (impacting documentation, tutorials, code reuse, ability for community members to help each other write code, presents challenges with interaction between different languages in the same experience and more). A better outcome is one where a single language can serve both audiences - this requires a language that strikes a balance between simplicity and generality, and while Lua isnt perfect here, its great as a foundation for a language like this[^2].
In many ways, Lua is simultaneously simple and pragmatic: many parts of the language are difficult to make much better without a lot of added complexity, but at the same time it requires little in the way of extra functionality to be able to solve problems efficiently. That said, no language is perfect, and within several areas of Lua we felt that the tradeoffs werent quite right for our use case.
# Respectful evolution
In 2019, we decided to build [Luau](https://luau-lang.org) - a language derived from Lua and compatible with Lua 5.1, which is the version weve been using all these years. At the time we evaluated other routes, but ultimately settled on this as the most optimal long-term.
On one hand, we loved a lot of things about Lua - both design wise and implementation wise, while there were some decisions we felt were suboptimal, by and large it was an almost perfect foundation for what weve set out to achieve.
On the other hand, weve been running into the limitations of Lua on large code bases in absence of type checking, performance was good but not great, and some missing features would have been great to have.
Some of the things weve been missing have been added in later versions of Lua, yet we were still using Lua 5.1. While we would have loved to use a later version of the language standard, Lua 5.x releases are not backwards compatible, and some releases remove support for features that are in wide use at Roblox. For Roblox, backwards compatibility is an essential feature of the platform - while we dont have a guarantee that content created 10 years ago still works, to the extent that we can achieve that without restricting the platform evolution too much, we try.
What weve realized is that Lua is a great foundation for a perfect language that we can build for Roblox.
We would maintain backwards compatibility with Lua 5.1 but evolve the language from there; sometimes this means taking later features from Lua that dont conflict with the existing language or our design values, sometimes this means innovating beyond what Lua has done. Crucially, we must maintain the balance between simplicity and power - we still value simplicity, we still need to avoid a feature explosion to ensure that the features compose and are of high quality, and we still need the language to be a good fit for beginners.
One of the largest limitations that weve seen is the lack of type checking making it easy to make mistakes in large code bases, as such [support for type checking](https://luau-lang.org/typecheck) was a requirement for Luau. However, its important that the type checker is mostly transparent to the developers who dont want to invest the time to learn it - anything else would change the learning curve too much for the language to be suitable for beginners. As such, weve investing in gradual typing, and our type checker is learning to strike a balance between inferring useful types for completely untyped programs (which, among other things, greatly enhances editing experience through type-aware autocomplete), and the lack of false positive diagnostics that can be confusing and distracting.
While we did need to introduce [extra syntax](https://luau-lang.org/syntax) to the language - most notably, to support optional type annotations - it was important for us to maintain the cohesion of the overall syntax. We arent seeking to make a new language with a syntax alien to Lua programmers - Luau programs are still recognizably Lua, and to the extent possible we try to avoid new syntactic features. In a sense, we still want the syntax, semantics, and the runtime to be simple and minimal - but at the same time we have important problems to solve with respect to ergonomics, robustness and performance of the language, and solving some of them requires having slightly more complex syntax, semantics, or implementation.
So in finding ways to evolve Luau, we strive to design features that feel like they would be at home in Lua. At the same time, weve adopted a more open evolution process - the language development is driven [through RFCs](https://github.com/Roblox/luau/blob/master/rfcs/README.md) that are designs open to the public that anyone can contribute to - this is in contrast with Lua, which has a very closed development process, and is one of the reasons why it would have been difficult for us to keep using Lua as we wouldnt get a say in its development. At the same time, to ensure the design criterias are met, its important that the Luau development team at Roblox maintains a final say over design and implementation of the language[^3], while taking the community's proposals and input into consideration.
# Importance of co-design
Luau language is developed in concert with the language compiler, runtime, type checker and other analysis tools, autocomplete engine and other tooling, and that development is guided by the vast volume of existing Luau code, both internal and external.
This is one of the key principles behind our evolution philosophy - neither layer is developed in isolation, and instead concerns at every level inform all other aspects of the language design and implementation.
This means that when designing language features, we make sure that they can be implemented efficiently, type checked properly, can be supported well in editing and analysis tools and have a positive impact on the code internal and external engineers write. When we find issues in any component, we can always ask, what changes to other components or even language design would make for a better overall solution.
This avoids some classes of design problems, for example we wont specify a language feature that has a prohibitively high implementation cost, as it violates our simplicity criteria, or that is impractical to implement efficiently, as that would create a performance hazard. This also means that when implementing various components of the language we cross-check the concerns and applicability of these across the entire stack - for example, weve reworked our auto-complete system to use the same type inference engine that the type checking / analysis tools use, which had immense benefits for the experience of editing code, but also applied significant back pressure on the type inference itself, forcing us to improve it substantially and fix a lot of corner cases that would otherwise have lingered unnoticed.
Whenever we develop features, optimizations, improve our analysis engine or enhance the standard libraries, we also heavily rely on code written in Luau to validate our hypotheses. When working on new features we find motivation in the real problems that we see our developers face. For example, we implemented the [new ternary operator](https://luau-lang.org/syntax#if-then-else-expressions) after seeing a large set of cases where existing Luas `a and b or c` pattern was error-prone for boolean values, which made it easy to accidentally introduce a mistake that was hard to identify automatically. All optimizations and new analysis features are validated on our internal 2M LOC codebase before being added to Luau, which allows us to quickly get initial validation of ideas, or invalidate some approaches as infeasible / unhelpful.
In addition to that, while we dont have direct access to community-developed source code for privacy reasons, we can run experiments and collect telemetry[^4], which also helps us make decisions regarding backwards compatibility. Due to [Hyrums law](https://www.hyrumslaw.com/), technically any change in the language or libraries, no matter how small, would be backwards incompatible - instead we adopt the notion of pragmatic balance between strict backwards compatibility[^5] and pragmatic compatibility concerns. For example, later versions of Lua make some library functions like `table.insert`/`table.remove` more strict with how they handle out of range indices. We have evaluated this change for compatibility by collecting telemetry on the use of out of range indices in these functions on the Roblox platform and concluded that applying the stricter checking would break existing programs, and instead had to slightly adjust the rules for out of range behavior in ways that was benign for existing code but prevented catastrophic performance degradation for large out of range indices. Because we couldnt afford to introduce new runtime errors in this case, we also added a set of linting rules to our analysis engine to flag potential misuse of `table.insert`/`table.remove` before the code ever gets to run - this diagnostics is informational and as such doesnt affect backwards compatibility, but does help prevent mistakes.
There are also cases where this co-design approach prevents introduction of features that can lead to easy misuse, which can be difficult to see in the design of the feature itself, but becomes more apparent when you consider features in context of the entire ecosystem. This is a good thing - it means co-design acts as a forcing function on the language simplicity and makes it easier to flag potential bad interactions between different language features, or language features and tooling, or language features and existing programming patterns that are in widespread use in real-world code. By making sure that all features are validated for their impact across the stack and on code written in Luau, we ultimately get a better, simpler and more cohesive language.
# Efficient execution
One of the critical goals in front of Luau is efficiency, both from the performance and memory perspective. Theres only so many milliseconds in a frame, and we simultaneously see the need to increase the scale and complexity of simulated experiences, which requires more memory and computation, as well as the need to fit more comfortably into smaller budgets of performance memory for better experience on smaller devices. In fact, one of the motivations for Luau in 2019 has been improved performance, as we saw many opportunities to go beyond Lua with a redesigned implementation.
Crucially, our performance needs are somewhat unique and require somewhat unique solutions.
We need Luau to run on many platforms where native code generation is either prohibited by the platform vendor or impractical due to tight memory constraints. As such, in terms of execution performance its critical that we have a very fast interpreter[^6]. However, we have freedom in terms of high level design of the entire stack - for example, clients never see the source code of the scripts as all compilation to bytecode happens on the server; this gives us an opportunity to perform more involved and expensive optimizations during that process as well as have the smallest possible startup time on the client without complex pre-parse steps. Notably, our bytecode compiler performs a series of high level optimizations including function inlining and loop unrolling that in other dynamic languages is often left to the just-in-time compiler.
Another area where performance is critical is garbage collection. Garbage collection is crucial for the languages simplicity as it makes memory management easier to reason about, but it does require a substantial amount of implementation effort to keep it efficient. For Roblox and for any other game engine or interactive simulation, latency is critical and so our collector is heavily optimized for that - to the extent possible collection is incremental and stop-the-world pauses are very brief. Another part of the performance story here however is the language and data structure design - by making sure that core data types are efficient in how they are laid out in memory we reduce the amount of work garbage collector takes to trace the heap, and, as another example of co-design, we try to make sure that language features are conscious of the impact they have on memory and garbage collection efficiency.
However, from a whole-platform standpoint theres a lot of performance aspects that go beyond single-threaded execution. This is an active area of research and development for the team, as to really leverage the hardware the code is running on we need to think about SIMD, hardware thread utilization as well as running code in a cluster of nodes. These considerations inform current and future development of the runtime and the language (for example, our runtime now supports efficient operations on short SIMD vectors even in interpreted mode, and the VM is fairly lightweight to instantiate which makes running many VMs per core practical, with message passing or access to shared Roblox data model used to make gameplay features feasible to implement), but were definitely in the early days here - our first implementation of parallel script execution in Roblox just [shipped earlier this year](https://devforum.roblox.com/t/full-release-of-parallel-luau-v1/1836187). This is likely the area where a lot of future innovations will happen as well.
# Future
Were very happy with the success of Luau - in several years weve established consistent processes for evolving the language and so far we found a good balance between simplicity, ease of use, performance and robustness of the language, its implementation and the tooling surrounding it. The language keeps continuously evolving but at a pace that is easy to stay on top of - in 2022 we shipped a few syntactic extensions for type annotations but no changes to the syntax of the language outside of types, and only one major [semantic change to the for loop iteration](https://luau-lang.org/syntax#generalized-iteration) that actually made the language easier to use by avoiding the need to specify the table traversal style via `pairs`/`ipairs`. We try to make sure that the features are general and provide enough extensibility so that libraries can be built on top of the language to make it easier to write code, while also making it practical to use the language without complex supporting frameworks.
Theres still a lot of ground to cover, and well be working on Luau for years to come. Were in the process of building the next version of our type inference / checking engine to make sure that all users of the language regardless of their expertise benefit from it, weve started investing in native code generation as were reaching the limits of interpreted performance (although some exciting opportunities for compiler optimization are still on the horizon), and theres still a lot of hard design and implementation work ahead of us for some important language features and standard libraries. And as mentioned, our execution model will likely see a lot of innovation as we push the boundaries of hardware utilization across cores and nodes.
Overall, Luau is like an iceberg - the surface is simple to learn and use, but it hides the tremendous amount of careful design, engineering and attention to detail, and we plan to continue to invest in it while trying to keep the outer surface comparatively small. We're excited to see how far we can take it!
[^1]: High-performance JavaScript engines didnt exist at the time! LuaJIT was around the corner and redefined the performance expectations of dynamic languages.
[^2]: In fact, scaling to large teams of expert programmers is one of the core motivations behind our creating Luau, while a requirement to still be suitable for beginner programmers guides our evolution direction.
[^3]: This would have been difficult to drive in any existing large established language like JavaScript or Python.
[^4]: This is limited to Roblox platform and doesn't exist in open-source releases.
[^5]: Which we do follow in some areas, such as syntactic compatibility - all existing programs that parse must continue to parse the same way as the language evolves.
[^6]: Some design decisions and implementation techniques are documented on our [performance page](https://luau-lang.org/performance).

View file

@ -0,0 +1,272 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/StringUtils.h"
#include "doctest.h"
#include <string.h>
using namespace Luau::CodeGen;
static std::string bytecodeAsArray(const std::vector<uint8_t>& bytecode)
{
std::string result = "{";
for (size_t i = 0; i < bytecode.size(); i++)
Luau::formatAppend(result, "%s0x%02x", i == 0 ? "" : ", ", bytecode[i]);
return result.append("}");
}
static std::string bytecodeAsArray(const std::vector<uint32_t>& code)
{
std::string result = "{";
for (size_t i = 0; i < code.size(); i++)
Luau::formatAppend(result, "%s0x%08x", i == 0 ? "" : ", ", code[i]);
return result.append("}");
}
class AssemblyBuilderA64Fixture
{
public:
bool check(void (*f)(AssemblyBuilderA64& build), std::vector<uint32_t> code, std::vector<uint8_t> data = {})
{
AssemblyBuilderA64 build(/* logText= */ false);
f(build);
build.finalize();
if (build.code != code)
{
printf("Expected code: %s\nReceived code: %s\n", bytecodeAsArray(code).c_str(), bytecodeAsArray(build.code).c_str());
return false;
}
if (build.data != data)
{
printf("Expected data: %s\nReceived data: %s\n", bytecodeAsArray(data).c_str(), bytecodeAsArray(build.data).c_str());
return false;
}
return true;
}
};
// armconverter.com can be used to validate instruction sequences
TEST_SUITE_BEGIN("A64Assembly");
#define SINGLE_COMPARE(inst, ...) \
CHECK(check( \
[](AssemblyBuilderA64& build) { \
build.inst; \
}, \
{__VA_ARGS__}))
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Unary")
{
SINGLE_COMPARE(neg(x0, x1), 0xCB0103E0);
SINGLE_COMPARE(neg(w0, w1), 0x4B0103E0);
SINGLE_COMPARE(mvn(x0, x1), 0xAA2103E0);
SINGLE_COMPARE(clz(x0, x1), 0xDAC01020);
SINGLE_COMPARE(clz(w0, w1), 0x5AC01020);
SINGLE_COMPARE(rbit(x0, x1), 0xDAC00020);
SINGLE_COMPARE(rbit(w0, w1), 0x5AC00020);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Binary")
{
// reg, reg
SINGLE_COMPARE(add(x0, x1, x2), 0x8B020020);
SINGLE_COMPARE(add(w0, w1, w2), 0x0B020020);
SINGLE_COMPARE(add(x0, x1, x2, 7), 0x8B021C20);
SINGLE_COMPARE(sub(x0, x1, x2), 0xCB020020);
SINGLE_COMPARE(and_(x0, x1, x2), 0x8A020020);
SINGLE_COMPARE(orr(x0, x1, x2), 0xAA020020);
SINGLE_COMPARE(eor(x0, x1, x2), 0xCA020020);
SINGLE_COMPARE(lsl(x0, x1, x2), 0x9AC22020);
SINGLE_COMPARE(lsl(w0, w1, w2), 0x1AC22020);
SINGLE_COMPARE(lsr(x0, x1, x2), 0x9AC22420);
SINGLE_COMPARE(asr(x0, x1, x2), 0x9AC22820);
SINGLE_COMPARE(ror(x0, x1, x2), 0x9AC22C20);
SINGLE_COMPARE(cmp(x0, x1), 0xEB01001F);
// reg, imm
SINGLE_COMPARE(add(x3, x7, 78), 0x910138E3);
SINGLE_COMPARE(add(w3, w7, 78), 0x110138E3);
SINGLE_COMPARE(sub(w3, w7, 78), 0x510138E3);
SINGLE_COMPARE(cmp(w0, 42), 0x7100A81F);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Loads")
{
// address forms
SINGLE_COMPARE(ldr(x0, x1), 0xF9400020);
SINGLE_COMPARE(ldr(x0, mem(x1, 8)), 0xF9400420);
SINGLE_COMPARE(ldr(x0, mem(x1, x7)), 0xF8676820);
SINGLE_COMPARE(ldr(x0, mem(x1, -7)), 0xF85F9020);
// load sizes
SINGLE_COMPARE(ldr(x0, x1), 0xF9400020);
SINGLE_COMPARE(ldr(w0, x1), 0xB9400020);
SINGLE_COMPARE(ldrb(w0, x1), 0x39400020);
SINGLE_COMPARE(ldrh(w0, x1), 0x79400020);
SINGLE_COMPARE(ldrsb(x0, x1), 0x39800020);
SINGLE_COMPARE(ldrsb(w0, x1), 0x39C00020);
SINGLE_COMPARE(ldrsh(x0, x1), 0x79800020);
SINGLE_COMPARE(ldrsh(w0, x1), 0x79C00020);
SINGLE_COMPARE(ldrsw(x0, x1), 0xB9800020);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Stores")
{
// address forms
SINGLE_COMPARE(str(x0, x1), 0xF9000020);
SINGLE_COMPARE(str(x0, mem(x1, 8)), 0xF9000420);
SINGLE_COMPARE(str(x0, mem(x1, x7)), 0xF8276820);
SINGLE_COMPARE(strh(w0, mem(x1, -7)), 0x781F9020);
// store sizes
SINGLE_COMPARE(str(x0, x1), 0xF9000020);
SINGLE_COMPARE(str(w0, x1), 0xB9000020);
SINGLE_COMPARE(strb(w0, x1), 0x39000020);
SINGLE_COMPARE(strh(w0, x1), 0x79000020);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Moves")
{
SINGLE_COMPARE(mov(x0, x1), 0xAA0103E0);
SINGLE_COMPARE(mov(w0, w1), 0x2A0103E0);
SINGLE_COMPARE(mov(x0, 42), 0xD2800540);
SINGLE_COMPARE(mov(w0, 42), 0x52800540);
SINGLE_COMPARE(movk(x0, 42, 16), 0xF2A00540);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "ControlFlow")
{
// Jump back
CHECK(check(
[](AssemblyBuilderA64& build) {
Label start = build.setLabel();
build.mov(x0, x1);
build.b(ConditionA64::Equal, start);
},
{0xAA0103E0, 0x54FFFFE0}));
// Jump forward
CHECK(check(
[](AssemblyBuilderA64& build) {
Label skip;
build.b(ConditionA64::Equal, skip);
build.mov(x0, x1);
build.setLabel(skip);
},
{0x54000040, 0xAA0103E0}));
// Jumps
CHECK(check(
[](AssemblyBuilderA64& build) {
Label skip;
build.b(ConditionA64::Equal, skip);
build.cbz(x0, skip);
build.cbnz(x0, skip);
build.setLabel(skip);
build.b(skip);
},
{0x54000060, 0xB4000040, 0xB5000020, 0x5400000E}));
// Basic control flow
SINGLE_COMPARE(br(x0), 0xD61F0000);
SINGLE_COMPARE(blr(x0), 0xD63F0000);
SINGLE_COMPARE(ret(), 0xD65F03C0);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "StackOps")
{
SINGLE_COMPARE(mov(x0, sp), 0x910003E0);
SINGLE_COMPARE(mov(sp, x0), 0x9100001F);
SINGLE_COMPARE(add(sp, sp, 4), 0x910013FF);
SINGLE_COMPARE(sub(sp, sp, 4), 0xD10013FF);
SINGLE_COMPARE(add(x0, sp, 4), 0x910013E0);
SINGLE_COMPARE(sub(sp, x0, 4), 0xD100101F);
SINGLE_COMPARE(ldr(x0, mem(sp, 8)), 0xF94007E0);
SINGLE_COMPARE(str(x0, mem(sp, 8)), 0xF90007E0);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Constants")
{
// clang-format off
CHECK(check(
[](AssemblyBuilderA64& build) {
char arr[12] = "hello world";
build.adr(x0, arr, 12);
build.adr(x0, uint64_t(0x1234567887654321));
build.adr(x0, 1.0);
},
{
0x10ffffa0, 0x10ffff20, 0x10fffec0
},
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f,
0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
0x00, 0x00, 0x00, 0x00, // 4b padding to align double
'h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0,
}));
// clang-format on
}
TEST_CASE("LogTest")
{
AssemblyBuilderA64 build(/* logText= */ true);
build.add(sp, sp, 4);
build.add(w0, w1, w2);
build.add(x0, x1, x2, 2);
build.add(w7, w8, 5);
build.add(x7, x8, 5);
build.ldr(x7, x8);
build.ldr(x7, mem(x8, 8));
build.ldr(x7, mem(x8, x9));
build.mov(x1, x2);
build.movk(x1, 42, 16);
build.cmp(x1, x2);
build.blr(x0);
Label l;
build.b(ConditionA64::Plus, l);
build.cbz(x7, l);
build.setLabel(l);
build.ret();
build.finalize();
std::string expected = R"(
add sp,sp,#4
add w0,w1,w2
add x0,x1,x2 LSL #2
add w7,w8,#5
add x7,x8,#5
ldr x7,[x8]
ldr x7,[x8,#8]
ldr x7,[x8,x9]
mov x1,x2
movk x1,#42 LSL #16
cmp x1,x2
blr x0
b.pl .L1
cbz x7,.L1
.L1:
ret
)";
CHECK("\n" + build.text == expected);
}
TEST_SUITE_END();

View file

@ -8,7 +8,7 @@
using namespace Luau::CodeGen;
std::string bytecodeAsArray(const std::vector<uint8_t>& bytecode)
static std::string bytecodeAsArray(const std::vector<uint8_t>& bytecode)
{
std::string result = "{";
@ -21,7 +21,7 @@ std::string bytecodeAsArray(const std::vector<uint8_t>& bytecode)
class AssemblyBuilderX64Fixture
{
public:
void check(void (*f)(AssemblyBuilderX64& build), std::vector<uint8_t> code, std::vector<uint8_t> data = {})
bool check(void (*f)(AssemblyBuilderX64& build), std::vector<uint8_t> code, std::vector<uint8_t> data = {})
{
AssemblyBuilderX64 build(/* logText= */ false);
@ -32,25 +32,27 @@ public:
if (build.code != code)
{
printf("Expected code: %s\nReceived code: %s\n", bytecodeAsArray(code).c_str(), bytecodeAsArray(build.code).c_str());
CHECK(false);
return false;
}
if (build.data != data)
{
printf("Expected data: %s\nReceived data: %s\n", bytecodeAsArray(data).c_str(), bytecodeAsArray(build.data).c_str());
CHECK(false);
return false;
}
return true;
}
};
TEST_SUITE_BEGIN("x64Assembly");
#define SINGLE_COMPARE(inst, ...) \
check( \
CHECK(check( \
[](AssemblyBuilderX64& build) { \
build.inst; \
}, \
{__VA_ARGS__})
{__VA_ARGS__}))
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "BaseBinaryInstructionForms")
{
@ -236,9 +238,9 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfShift")
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfLea")
{
SINGLE_COMPARE(lea(rax, qword[rdx + rcx]), 0x48, 0x8d, 0x04, 0x0a);
SINGLE_COMPARE(lea(rax, qword[rdx + rax * 4]), 0x48, 0x8d, 0x04, 0x82);
SINGLE_COMPARE(lea(rax, qword[r13 + r12 * 4 + 4]), 0x4b, 0x8d, 0x44, 0xa5, 0x04);
SINGLE_COMPARE(lea(rax, addr[rdx + rcx]), 0x48, 0x8d, 0x04, 0x0a);
SINGLE_COMPARE(lea(rax, addr[rdx + rax * 4]), 0x48, 0x8d, 0x04, 0x82);
SINGLE_COMPARE(lea(rax, addr[r13 + r12 * 4 + 4]), 0x4b, 0x8d, 0x44, 0xa5, 0x04);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfAbsoluteJumps")
@ -280,34 +282,34 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "NopForms")
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AlignmentForms")
{
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
build.ret();
build.align(8, AlignmentDataX64::Nop);
},
{0xc3, 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00});
{0xc3, 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}));
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
build.ret();
build.align(32, AlignmentDataX64::Nop);
},
{0xc3, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x0f, 0x1f, 0x84,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x1f, 0x40, 0x00});
0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x1f, 0x40, 0x00}));
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
build.ret();
build.align(8, AlignmentDataX64::Int3);
},
{0xc3, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc});
{0xc3, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc}));
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
build.ret();
build.align(8, AlignmentDataX64::Ud2);
},
{0xc3, 0x0f, 0x0b, 0x0f, 0x0b, 0x0f, 0x0b, 0xcc});
{0xc3, 0x0f, 0x0b, 0x0f, 0x0b, 0x0f, 0x0b, 0xcc}));
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AlignmentOverflow")
@ -349,40 +351,40 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AlignmentOverflow")
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "ControlFlow")
{
// Jump back
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
Label start = build.setLabel();
build.add(rsi, 1);
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
build.jcc(ConditionX64::Equal, start);
},
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf3, 0xff, 0xff, 0xff});
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf3, 0xff, 0xff, 0xff}));
// Jump back, but the label is set before use
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
Label start;
build.add(rsi, 1);
build.setLabel(start);
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
build.jcc(ConditionX64::Equal, start);
},
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf7, 0xff, 0xff, 0xff});
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf7, 0xff, 0xff, 0xff}));
// Jump forward
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
Label skip;
build.cmp(rsi, rdi);
build.jcc(Condition::Greater, skip);
build.jcc(ConditionX64::Greater, skip);
build.or_(rdi, 0x3e);
build.setLabel(skip);
},
{0x48, 0x3b, 0xf7, 0x0f, 0x8f, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xcf, 0x3e});
{0x48, 0x3b, 0xf7, 0x0f, 0x8f, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xcf, 0x3e}));
// Regular jump
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
Label skip;
@ -390,12 +392,12 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "ControlFlow")
build.and_(rdi, 0x3e);
build.setLabel(skip);
},
{0xe9, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xe7, 0x3e});
{0xe9, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xe7, 0x3e}));
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "LabelCall")
{
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
Label fnB;
@ -404,10 +406,10 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "LabelCall")
build.ret();
build.setLabel(fnB);
build.lea(rax, qword[rcx + 0x1f]);
build.lea(rax, addr[rcx + 0x1f]);
build.ret();
},
{0x48, 0x83, 0xe1, 0x3e, 0xe8, 0x01, 0x00, 0x00, 0x00, 0xc3, 0x48, 0x8d, 0x41, 0x1f, 0xc3});
{0x48, 0x83, 0xe1, 0x3e, 0xe8, 0x01, 0x00, 0x00, 0x00, 0xc3, 0x48, 0x8d, 0x41, 0x1f, 0xc3}));
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXBinaryInstructionForms")
@ -518,7 +520,7 @@ TEST_CASE("LogTest")
Label start = build.setLabel();
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
build.jcc(ConditionX64::Equal, start);
build.jmp(qword[rdx]);
build.vaddps(ymm9, ymm12, ymmword[rbp + 0xc]);
@ -548,7 +550,7 @@ TEST_CASE("LogTest")
build.finalize();
bool same = "\n" + build.text == R"(
std::string expected = R"(
push r12
; align 8
nop word ptr[rax+rax] ; 6-byte nop
@ -588,13 +590,14 @@ TEST_CASE("LogTest")
nop dword ptr[rax+rax] ; 8-byte nop
nop word ptr[rax+rax] ; 9-byte nop
)";
CHECK(same);
CHECK("\n" + build.text == expected);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "Constants")
{
// clang-format off
check(
CHECK(check(
[](AssemblyBuilderX64& build) {
build.xor_(rax, rax);
build.add(rax, build.i64(0x1234567887654321));
@ -625,7 +628,7 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "Constants")
0x00, 0x00, 0x00, 0x00, // padding to align f64
0x00, 0x00, 0x80, 0x3f,
0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
});
}));
// clang-format on
}

View file

@ -183,7 +183,7 @@ TEST_CASE_FIXTURE(JsonEncoderFixture, "encode_AstExprInterpString")
AstStat* statement = expectParseStatement("local a = `var = {x}`");
std::string_view expected =
R"({"type":"AstStatLocal","location":"0,0 - 0,17","vars":[{"luauType":null,"name":"a","type":"AstLocal","location":"0,6 - 0,7"}],"values":[{"type":"AstExprInterpString","location":"0,10 - 0,17","strings":["var = ",""],"expressions":[{"type":"AstExprGlobal","location":"0,18 - 0,19","global":"x"}]}]})";
R"({"type":"AstStatLocal","location":"0,0 - 0,18","vars":[{"luauType":null,"name":"a","type":"AstLocal","location":"0,6 - 0,7"}],"values":[{"type":"AstExprInterpString","location":"0,10 - 0,18","strings":["var = ",""],"expressions":[{"type":"AstExprGlobal","location":"0,18 - 0,19","global":"x"}]}]})";
CHECK(toJson(statement) == expected);
}

View file

@ -1,5 +1,6 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/CodeAllocator.h"
#include "Luau/CodeBlockUnwind.h"
#include "Luau/UnwindBuilder.h"
@ -184,7 +185,7 @@ TEST_CASE("Dwarf2UnwindCodesX64")
0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x0e, 0x10, 0x85, 0x02, 0x02, 0x02, 0x0e, 0x18, 0x84, 0x03, 0x02, 0x02, 0x0e, 0x20, 0x83,
0x04, 0x02, 0x02, 0x0e, 0x28, 0x86, 0x05, 0x02, 0x02, 0x0e, 0x30, 0x8c, 0x06, 0x02, 0x02, 0x0e, 0x38, 0x8d, 0x07, 0x02, 0x02, 0x0e, 0x40,
0x8e, 0x08, 0x02, 0x02, 0x0e, 0x48, 0x8f, 0x09, 0x02, 0x04, 0x0e, 0x90, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
0x8e, 0x08, 0x02, 0x02, 0x0e, 0x48, 0x8f, 0x09, 0x02, 0x04, 0x0e, 0x90, 0x01, 0x02, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00};
REQUIRE(data.size() == expected.size());
CHECK(memcmp(data.data(), expected.data(), expected.size()) == 0);
@ -207,7 +208,7 @@ constexpr RegisterX64 rArg3 = rdx;
constexpr RegisterX64 rNonVol1 = r12;
constexpr RegisterX64 rNonVol2 = rbx;
TEST_CASE("GeneratedCodeExecution")
TEST_CASE("GeneratedCodeExecutionX64")
{
AssemblyBuilderX64 build(/* logText= */ false);
@ -241,7 +242,7 @@ void throwing(int64_t arg)
throw std::runtime_error("testing");
}
TEST_CASE("GeneratedCodeExecutionWithThrow")
TEST_CASE("GeneratedCodeExecutionWithThrowX64")
{
AssemblyBuilderX64 build(/* logText= */ false);
@ -267,7 +268,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrow")
build.sub(rsp, stackSize + localsSize);
unwind->allocStack(stackSize + localsSize);
build.lea(rbp, qword[rsp + stackSize]);
build.lea(rbp, addr[rsp + stackSize]);
unwind->setupFrameReg(rbp, stackSize);
unwind->finish();
@ -281,7 +282,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrow")
build.call(rNonVol2);
// Epilogue
build.lea(rsp, qword[rbp + localsSize]);
build.lea(rsp, addr[rbp + localsSize]);
build.pop(rbp);
build.pop(rNonVol2);
build.pop(rNonVol1);
@ -317,7 +318,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrow")
}
}
TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGate")
TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGateX64")
{
AssemblyBuilderX64 build(/* logText= */ false);
@ -351,7 +352,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGate")
build.sub(rsp, stackSize + localsSize);
unwind->allocStack(stackSize + localsSize);
build.lea(rbp, qword[rsp + stackSize]);
build.lea(rbp, addr[rsp + stackSize]);
unwind->setupFrameReg(rbp, stackSize);
unwind->finish();
@ -366,7 +367,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGate")
Label returnOffset = build.setLabel();
// Epilogue
build.lea(rsp, qword[rbp + localsSize]);
build.lea(rsp, addr[rbp + localsSize]);
build.pop(rbp);
build.pop(r15);
build.pop(r14);
@ -432,4 +433,48 @@ TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGate")
#endif
#if defined(__aarch64__)
TEST_CASE("GeneratedCodeExecutionA64")
{
AssemblyBuilderA64 build(/* logText= */ false);
Label skip;
build.cbz(x1, skip);
build.ldrsw(x1, x1);
build.cbnz(x1, skip);
build.mov(x1, 0); // doesn't execute due to cbnz above
build.setLabel(skip);
uint8_t one = 1;
build.adr(x2, &one, 1);
build.ldrb(w2, x2);
build.sub(x1, x1, x2);
build.add(x1, x1, 2);
build.add(x0, x0, x1, /* LSL */ 1);
build.ret();
build.finalize();
size_t blockSize = 1024 * 1024;
size_t maxTotalSize = 1024 * 1024;
CodeAllocator allocator(blockSize, maxTotalSize);
uint8_t* nativeData;
size_t sizeNativeData;
uint8_t* nativeEntry;
REQUIRE(allocator.allocate(build.data.data(), build.data.size(), reinterpret_cast<uint8_t*>(build.code.data()), build.code.size() * 4, nativeData,
sizeNativeData, nativeEntry));
REQUIRE(nativeEntry);
using FunctionType = int64_t(int64_t, int*);
FunctionType* f = (FunctionType*)nativeEntry;
int input = 10;
int64_t result = f(20, &input);
CHECK(result == 42);
}
#endif
TEST_SUITE_END();

View file

@ -539,8 +539,14 @@ TEST_CASE("Debugger")
static bool singlestep = false;
static int stephits = 0;
SUBCASE("") { singlestep = false; }
SUBCASE("SingleStep") { singlestep = true; }
SUBCASE("")
{
singlestep = false;
}
SUBCASE("SingleStep")
{
singlestep = true;
}
breakhits = 0;
interruptedthread = nullptr;

View file

@ -21,13 +21,13 @@ void ConstraintGraphBuilderFixture::generateConstraints(const std::string& code)
frontend.getGlobalScope(), &logger, NotNull{dfg.get()});
cgb->visit(root);
rootScope = cgb->rootScope;
constraints = Luau::collectConstraints(NotNull{cgb->rootScope});
constraints = Luau::borrowConstraints(cgb->constraints);
}
void ConstraintGraphBuilderFixture::solve(const std::string& code)
{
generateConstraints(code);
ConstraintSolver cs{NotNull{&normalizer}, NotNull{rootScope}, "MainModule", NotNull(&moduleResolver), {}, &logger};
ConstraintSolver cs{NotNull{&normalizer}, NotNull{rootScope}, constraints, "MainModule", NotNull(&moduleResolver), {}, &logger};
cs.run();
}

View file

@ -506,13 +506,14 @@ std::optional<TypeId> linearSearchForBinding(Scope* scope, const char* name)
return std::nullopt;
}
void registerNotType(Fixture& fixture, TypeArena& arena)
void registerHiddenTypes(Fixture& fixture, TypeArena& arena)
{
TypeId t = arena.addType(GenericTypeVar{"T"});
GenericTypeDefinition genericT{t};
ScopePtr moduleScope = fixture.frontend.getGlobalScope();
moduleScope->exportedTypeBindings["Not"] = TypeFun{{genericT}, arena.addType(NegationTypeVar{t})};
moduleScope->exportedTypeBindings["fun"] = TypeFun{{}, fixture.singletonTypes->functionType};
}
void dump(const std::vector<Constraint>& constraints)

View file

@ -186,7 +186,7 @@ std::optional<TypeId> lookupName(ScopePtr scope, const std::string& name); // Wa
std::optional<TypeId> linearSearchForBinding(Scope* scope, const char* name);
void registerNotType(Fixture& fixture, TypeArena& arena);
void registerHiddenTypes(Fixture& fixture, TypeArena& arena);
} // namespace Luau

View file

@ -1054,10 +1054,6 @@ TEST_CASE("check_without_builtin_next")
TEST_CASE_FIXTURE(BuiltinsFixture, "reexport_cyclic_type")
{
ScopedFastFlag sff[] = {
{"LuauForceExportSurfacesToBeNormal", true},
};
fileResolver.source["Module/A"] = R"(
type F<T> = (set: G<T>) -> ()
@ -1089,10 +1085,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "reexport_cyclic_type")
TEST_CASE_FIXTURE(BuiltinsFixture, "reexport_type_alias")
{
ScopedFastFlag sff[] = {
{"LuauForceExportSurfacesToBeNormal", true},
};
fileResolver.source["Module/A"] = R"(
type KeyOfTestEvents = "test-file-start" | "test-file-success" | "test-file-failure" | "test-case-result"
type MyAny = any

View file

@ -3,6 +3,7 @@
#include "Fixture.h"
#include "Luau/Common.h"
#include "Luau/TypeVar.h"
#include "doctest.h"
#include "Luau/Normalize.h"
@ -19,7 +20,7 @@ struct IsSubtypeFixture : Fixture
return ::Luau::isSubtype(a, b, NotNull{getMainModule()->getModuleScope().get()}, singletonTypes, ice);
}
};
}
} // namespace
void createSomeClasses(Frontend& frontend)
{
@ -109,12 +110,10 @@ TEST_CASE_FIXTURE(IsSubtypeFixture, "functions_and_any")
TypeId a = requireType("a");
TypeId b = requireType("b");
// Intuition:
// We cannot use b where a is required because we cannot rely on b to return a string.
// We cannot use a where b is required because we cannot rely on a to accept non-number arguments.
// any makes things work even when it makes no sense.
CHECK(!isSubtype(b, a));
CHECK(!isSubtype(a, b));
CHECK(isSubtype(b, a));
CHECK(isSubtype(a, b));
}
TEST_CASE_FIXTURE(IsSubtypeFixture, "variadic_functions_with_no_head")
@ -199,7 +198,7 @@ TEST_CASE_FIXTURE(IsSubtypeFixture, "table_with_any_prop")
TypeId b = requireType("b");
CHECK(isSubtype(a, b));
CHECK(!isSubtype(b, a));
CHECK(isSubtype(b, a));
}
TEST_CASE_FIXTURE(IsSubtypeFixture, "intersection")
@ -261,7 +260,7 @@ TEST_CASE_FIXTURE(IsSubtypeFixture, "tables")
TypeId d = requireType("d");
CHECK(isSubtype(a, b));
CHECK(!isSubtype(b, a));
CHECK(isSubtype(b, a));
CHECK(!isSubtype(c, a));
CHECK(!isSubtype(a, c));
@ -394,7 +393,8 @@ TEST_SUITE_END();
struct NormalizeFixture : Fixture
{
ScopedFastFlag sff{"LuauNegatedStringSingletons", true};
ScopedFastFlag sff0{"LuauNegatedStringSingletons", true};
ScopedFastFlag sff1{"LuauNegatedFunctionTypes", true};
TypeArena arena;
InternalErrorReporter iceHandler;
@ -403,16 +403,21 @@ struct NormalizeFixture : Fixture
NormalizeFixture()
{
registerNotType(*this, arena);
registerHiddenTypes(*this, arena);
}
TypeId normal(const std::string& annotation)
const NormalizedType* toNormalizedType(const std::string& annotation)
{
CheckResult result = check("type _Res = " + annotation);
LUAU_REQUIRE_NO_ERRORS(result);
std::optional<TypeId> ty = lookupType("_Res");
REQUIRE(ty);
const NormalizedType* norm = normalizer.normalize(*ty);
return normalizer.normalize(*ty);
}
TypeId normal(const std::string& annotation)
{
const NormalizedType* norm = toNormalizedType(annotation);
REQUIRE(norm);
return normalizer.typeFromNormal(*norm);
}
@ -476,6 +481,13 @@ TEST_CASE_FIXTURE(NormalizeFixture, "union_of_negations")
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "disjoint_negations_normalize_to_string")
{
CHECK(R"(string)" == toString(normal(R"(
(string & Not<"hello"> & Not<"world">) | (string & Not<"goodbye">)
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "negate_boolean")
{
CHECK("true" == toString(normal(R"(
@ -490,10 +502,43 @@ TEST_CASE_FIXTURE(NormalizeFixture, "negate_boolean_2")
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "bare_negation")
TEST_CASE_FIXTURE(NormalizeFixture, "intersect_function_and_top_function")
{
CHECK("() -> ()" == toString(normal(R"(
fun & (() -> ())
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "intersect_function_and_top_function_reverse")
{
CHECK("() -> ()" == toString(normal(R"(
(() -> ()) & fun
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "union_function_and_top_function")
{
CHECK("function" == toString(normal(R"(
fun | (() -> ())
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "negated_function_is_anything_except_a_function")
{
CHECK("(boolean | number | string | thread)?" == toString(normal(R"(
Not<fun>
)")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "specific_functions_cannot_be_negated")
{
CHECK(nullptr == toNormalizedType("Not<(boolean) -> boolean>"));
}
TEST_CASE_FIXTURE(NormalizeFixture, "bare_negated_boolean")
{
// TODO: We don't yet have a way to say number | string | thread | nil | Class | Table | Function
CHECK("(number | string | thread)?" == toString(normal(R"(
CHECK("(function | number | string | thread)?" == toString(normal(R"(
Not<boolean>
)")));
}

View file

@ -1,6 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Parser.h"
#include "AstQueryDsl.h"
#include "Fixture.h"
#include "ScopedFlags.h"
@ -2775,4 +2776,68 @@ TEST_CASE_FIXTURE(Fixture, "get_a_nice_error_when_there_is_an_extra_comma_at_the
CHECK(2 == f->generics.size);
}
TEST_CASE_FIXTURE(Fixture, "get_a_nice_error_when_there_is_no_comma_between_table_members")
{
ScopedFastFlag luauTableConstructorRecovery{"LuauTableConstructorRecovery", true};
ParseResult result = tryParse(R"(
local t = {
first = 1
second = 2,
third = 3,
fouth = 4,
}
)");
REQUIRE(1 == result.errors.size());
CHECK(Location({3, 12}, {3, 18}) == result.errors[0].getLocation());
CHECK("Expected ',' after table constructor element" == result.errors[0].getMessage());
REQUIRE(1 == result.root->body.size);
AstExprTable* table = Luau::query<AstExprTable>(result.root);
REQUIRE(table);
CHECK(table->items.size == 4);
}
TEST_CASE_FIXTURE(Fixture, "get_a_nice_error_when_there_is_no_comma_after_last_table_member")
{
ParseResult result = tryParse(R"(
local t = {
first = 1
local ok = true
local good = ok == true
)");
REQUIRE(1 == result.errors.size());
CHECK(Location({4, 8}, {4, 13}) == result.errors[0].getLocation());
CHECK("Expected '}' (to close '{' at line 2), got 'local'" == result.errors[0].getMessage());
REQUIRE(3 == result.root->body.size);
AstExprTable* table = Luau::query<AstExprTable>(result.root);
REQUIRE(table);
CHECK(table->items.size == 1);
}
TEST_CASE_FIXTURE(Fixture, "missing_default_type_pack_argument_after_variadic_type_parameter")
{
ScopedFastFlag sff{"LuauParserErrorsOnMissingDefaultTypePackArgument", true};
ParseResult result = tryParse(R"(
type Foo<T... = > = nil
)");
REQUIRE_EQ(2, result.errors.size());
CHECK_EQ(Location{{1, 23}, {1, 25}}, result.errors[0].getLocation());
CHECK_EQ("Expected type, got '>'", result.errors[0].getMessage());
CHECK_EQ(Location{{1, 23}, {1, 24}}, result.errors[1].getLocation());
CHECK_EQ("Expected type pack after '=', got type", result.errors[1].getMessage());
}
TEST_SUITE_END();

View file

@ -404,3 +404,20 @@ t60 = makeChainedTable(60)
}
TEST_SUITE_END();
TEST_SUITE_BEGIN("RegressionTests");
TEST_CASE_FIXTURE(ReplFixture, "InfiniteRecursion")
{
// If the infinite recrusion is not caught, test will fail
runCode(L, R"(
local NewProxyOne = newproxy(true)
local MetaTableOne = getmetatable(NewProxyOne)
MetaTableOne.__index = function()
return NewProxyOne.Game
end
print(NewProxyOne.HelloICauseACrash)
)");
}
TEST_SUITE_END();

View file

@ -10,8 +10,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauRecursiveTypeParameterRestriction);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
LUAU_FASTFLAG(LuauFixNameMaps);
LUAU_FASTFLAG(LuauFunctionReturnStringificationFixup);
TEST_SUITE_BEGIN("ToString");
@ -267,18 +265,22 @@ TEST_CASE_FIXTURE(Fixture, "quit_stringifying_type_when_length_is_exceeded")
ToStringOptions o;
o.exhaustive = false;
o.maxTypeLength = 40;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "(() -> ()) -> () -> ()");
if (FFlag::LuauSpecialTypesAsterisked)
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
o.maxTypeLength = 30;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> () -> ()");
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> <a>(a) -> () -> ()");
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> <b>(b) -> <a>(a) -> (... *TRUNCATED*");
}
else
{
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... <TRUNCATED>");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... <TRUNCATED>");
o.maxTypeLength = 40;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "(() -> ()) -> () -> ()");
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
}
}
@ -294,18 +296,21 @@ TEST_CASE_FIXTURE(Fixture, "stringifying_type_is_still_capped_when_exhaustive")
ToStringOptions o;
o.exhaustive = true;
o.maxTypeLength = 40;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "(() -> ()) -> () -> ()");
if (FFlag::LuauSpecialTypesAsterisked)
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
o.maxTypeLength = 30;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> () -> ()");
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> <a>(a) -> () -> ()");
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> <b>(b) -> <a>(a) -> (... *TRUNCATED*");
}
else
{
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... <TRUNCATED>");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... <TRUNCATED>");
o.maxTypeLength = 40;
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
CHECK_EQ(toString(requireType("f1"), o), "(() -> ()) -> () -> ()");
CHECK_EQ(toString(requireType("f2"), o), "((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
CHECK_EQ(toString(requireType("f3"), o), "(((() -> ()) -> () -> ()) -> (() -> ()) -> ... *TRUNCATED*");
}
}
@ -440,28 +445,19 @@ TEST_CASE_FIXTURE(Fixture, "toStringDetailed")
TypeId id3Type = requireType("id3");
ToStringResult nameData = toStringDetailed(id3Type, opts);
if (FFlag::LuauFixNameMaps)
REQUIRE(3 == opts.nameMap.typeVars.size());
else
REQUIRE_EQ(3, nameData.DEPRECATED_nameMap.typeVars.size());
REQUIRE(3 == opts.nameMap.typeVars.size());
REQUIRE_EQ("<a, b, c>(a, b, c) -> (a, b, c)", nameData.name);
ToStringOptions opts2; // TODO: delete opts2 when clipping FFlag::LuauFixNameMaps
if (FFlag::LuauFixNameMaps)
opts2.nameMap = std::move(opts.nameMap);
else
opts2.DEPRECATED_nameMap = std::move(nameData.DEPRECATED_nameMap);
const FunctionTypeVar* ftv = get<FunctionTypeVar>(follow(id3Type));
REQUIRE(ftv != nullptr);
auto params = flatten(ftv->argTypes).first;
REQUIRE(3 == params.size());
CHECK("a" == toString(params[0], opts2));
CHECK("b" == toString(params[1], opts2));
CHECK("c" == toString(params[2], opts2));
CHECK("a" == toString(params[0], opts));
CHECK("b" == toString(params[1], opts));
CHECK("c" == toString(params[2], opts));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "toStringDetailed2")
@ -488,13 +484,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "toStringDetailed2")
TypeId tType = requireType("inst");
ToStringResult r = toStringDetailed(tType, opts);
CHECK_EQ("{ @metatable { __index: { @metatable {| __index: base |}, child } }, inst }", r.name);
if (FFlag::LuauFixNameMaps)
CHECK(0 == opts.nameMap.typeVars.size());
else
CHECK_EQ(0, r.DEPRECATED_nameMap.typeVars.size());
if (!FFlag::LuauFixNameMaps)
opts.DEPRECATED_nameMap = r.DEPRECATED_nameMap;
CHECK(0 == opts.nameMap.typeVars.size());
const MetatableTypeVar* tMeta = get<MetatableTypeVar>(tType);
REQUIRE(tMeta);
@ -519,8 +509,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "toStringDetailed2")
REQUIRE(tMeta6->props.count("two") > 0);
ToStringResult oneResult = toStringDetailed(tMeta5->props["one"].type, opts);
if (!FFlag::LuauFixNameMaps)
opts.DEPRECATED_nameMap = oneResult.DEPRECATED_nameMap;
std::string twoResult = toString(tMeta6->props["two"].type, opts);
@ -535,10 +523,7 @@ local function target(callback: nil) return callback(4, "hello") end
)");
LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("(nil) -> (*error-type*)", toString(requireType("target")));
else
CHECK_EQ("(nil) -> (<error-type>)", toString(requireType("target")));
CHECK_EQ("(nil) -> (*error-type*)", toString(requireType("target")));
}
TEST_CASE_FIXTURE(Fixture, "toStringGenericPack")

View file

@ -8,6 +8,7 @@
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauNoMoreGlobalSingletonTypes)
TEST_SUITE_BEGIN("TypeAliases");
@ -509,11 +510,21 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "general_require_multi_assign")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_alias_import_mutation")
{
ScopedFastFlag luauNewLibraryTypeNames{"LuauNewLibraryTypeNames", true};
CheckResult result = check("type t10<x> = typeof(table)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId ty = getGlobalBinding(frontend, "table");
CHECK_EQ(toString(ty), "table");
if (FFlag::LuauNoMoreGlobalSingletonTypes)
{
CHECK_EQ(toString(ty), "typeof(table)");
}
else
{
CHECK_EQ(toString(ty), "table");
}
const TableTypeVar* ttv = get<TableTypeVar>(ty);
REQUIRE(ttv);

View file

@ -13,8 +13,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
TEST_SUITE_BEGIN("TypeInferAnyError");
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_returns_any")
@ -96,10 +94,7 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error")
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("a")));
else
CHECK_EQ("<error-type>", toString(requireType("a")));
CHECK_EQ("*error-type*", toString(requireType("a")));
}
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error2")
@ -115,10 +110,7 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error2")
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("a")));
else
CHECK_EQ("<error-type>", toString(requireType("a")));
CHECK_EQ("*error-type*", toString(requireType("a")));
}
TEST_CASE_FIXTURE(Fixture, "length_of_error_type_does_not_produce_an_error")
@ -233,10 +225,7 @@ TEST_CASE_FIXTURE(Fixture, "calling_error_type_yields_error")
CHECK_EQ("unknown", err->name);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("a")));
else
CHECK_EQ("<error-type>", toString(requireType("a")));
CHECK_EQ("*error-type*", toString(requireType("a")));
}
TEST_CASE_FIXTURE(Fixture, "chain_calling_error_type_yields_error")
@ -245,10 +234,7 @@ TEST_CASE_FIXTURE(Fixture, "chain_calling_error_type_yields_error")
local a = Utility.Create "Foo" {}
)");
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("a")));
else
CHECK_EQ("<error-type>", toString(requireType("a")));
CHECK_EQ("*error-type*", toString(requireType("a")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "replace_every_free_type_when_unifying_a_complex_function_with_any")

View file

@ -8,7 +8,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
TEST_SUITE_BEGIN("BuiltinTests");
@ -58,7 +57,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "next_iterator_should_infer_types_and_type_ch
local s = "foo"
local t = { [s] = 1 }
local c: string, d: number = next(t)
local c: string?, d: number = next(t)
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
@ -70,7 +69,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "pairs_iterator_should_infer_types_and_type_c
type Map<K, V> = { [K]: V }
local map: Map<string, number> = { ["foo"] = 1, ["bar"] = 2, ["baz"] = 3 }
local it: (Map<string, number>, string | nil) -> (string, number), t: Map<string, number>, i: nil = pairs(map)
local it: (Map<string, number>, string | nil) -> (string?, number), t: Map<string, number>, i: nil = pairs(map)
)");
LUAU_REQUIRE_NO_ERRORS(result);
@ -82,7 +81,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "ipairs_iterator_should_infer_types_and_type_
type Map<K, V> = { [K]: V }
local array: Map<number, string> = { "foo", "bar", "baz" }
local it: (Map<number, string>, number) -> (number, string), t: Map<number, string>, i: number = ipairs(array)
local it: (Map<number, string>, number) -> (number?, string), t: Map<number, string>, i: number = ipairs(array)
)");
LUAU_REQUIRE_NO_ERRORS(result);
@ -685,7 +684,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "select_with_variadic_typepack_tail")
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution && FFlag::LuauSpecialTypesAsterisked)
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("string", toString(requireType("foo")));
CHECK_EQ("*error-type*", toString(requireType("bar")));
@ -714,7 +713,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "select_with_variadic_typepack_tail_and_strin
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution && FFlag::LuauSpecialTypesAsterisked)
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("string", toString(requireType("foo")));
CHECK_EQ("string", toString(requireType("bar")));
@ -1016,10 +1015,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_freeze_is_generic")
CHECK_EQ("number", toString(requireType("a")));
CHECK_EQ("string", toString(requireType("b")));
CHECK_EQ("boolean", toString(requireType("c")));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("d")));
else
CHECK_EQ("<error-type>", toString(requireType("d")));
CHECK_EQ("*error-type*", toString(requireType("d")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "set_metatable_needs_arguments")

View file

@ -15,7 +15,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauInstantiateInSubtyping);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
TEST_SUITE_BEGIN("TypeInferFunctions");
@ -533,7 +532,7 @@ TEST_CASE_FIXTURE(Fixture, "infer_higher_order_function")
REQUIRE_EQ(2, argVec.size());
const FunctionTypeVar* fType = get<FunctionTypeVar>(follow(argVec[0]));
REQUIRE(fType != nullptr);
REQUIRE_MESSAGE(fType != nullptr, "Expected a function but got " << toString(argVec[0]));
std::vector<TypeId> fArgs = flatten(fType->argTypes).first;
@ -980,19 +979,13 @@ TEST_CASE_FIXTURE(Fixture, "function_cast_error_uses_correct_language")
REQUIRE(tm1);
CHECK_EQ("(string) -> number", toString(tm1->wantedType));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("(string, *error-type*) -> number", toString(tm1->givenType));
else
CHECK_EQ("(string, <error-type>) -> number", toString(tm1->givenType));
CHECK_EQ("(string, *error-type*) -> number", toString(tm1->givenType));
auto tm2 = get<TypeMismatch>(result.errors[1]);
REQUIRE(tm2);
CHECK_EQ("(number, number) -> (number, number)", toString(tm2->wantedType));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("(string, *error-type*) -> number", toString(tm2->givenType));
else
CHECK_EQ("(string, <error-type>) -> number", toString(tm2->givenType));
CHECK_EQ("(string, *error-type*) -> number", toString(tm2->givenType));
}
TEST_CASE_FIXTURE(Fixture, "no_lossy_function_type")
@ -1538,20 +1531,10 @@ function t:b() return 2 end -- not OK
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauSpecialTypesAsterisked)
{
CHECK_EQ(R"(Type '(*error-type*) -> number' could not be converted into '() -> number'
CHECK_EQ(R"(Type '(*error-type*) -> number' could not be converted into '() -> number'
caused by:
Argument count mismatch. Function expects 1 argument, but none are specified)",
toString(result.errors[0]));
}
else
{
CHECK_EQ(R"(Type '(<error-type>) -> number' could not be converted into '() -> number'
caused by:
Argument count mismatch. Function expects 1 argument, but none are specified)",
toString(result.errors[0]));
}
toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "too_few_arguments_variadic")
@ -1779,7 +1762,72 @@ z = y -- Not OK, so the line is colorable
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((\"blue\" | \"red\") -> (\"blue\" | \"red\") -> (\"blue\" | \"red\") -> boolean) & ((\"blue\" | \"red\") -> (\"blue\") -> (\"blue\") -> false) & ((\"blue\" | \"red\") -> (\"red\") -> (\"red\") -> false) & ((\"blue\") -> (\"blue\") -> (\"blue\" | \"red\") -> false) & ((\"red\") -> (\"red\") -> (\"blue\" | \"red\") -> false)' could not be converted into '(\"blue\" | \"red\") -> (\"blue\" | \"red\") -> (\"blue\" | \"red\") -> false'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '((\"blue\" | \"red\") -> (\"blue\" | \"red\") -> (\"blue\" | \"red\") -> boolean) & ((\"blue\" | \"red\") -> (\"blue\") -> (\"blue\") "
"-> false) & ((\"blue\" | \"red\") -> (\"red\") -> (\"red\") -> false) & ((\"blue\") -> (\"blue\") -> (\"blue\" | \"red\") -> false) & "
"((\"red\") -> (\"red\") -> (\"blue\" | \"red\") -> false)' could not be converted into '(\"blue\" | \"red\") -> (\"blue\" | \"red\") -> "
"(\"blue\" | \"red\") -> false'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "function_is_supertype_of_concrete_functions")
{
ScopedFastFlag sff{"LuauNegatedFunctionTypes", true};
registerHiddenTypes(*this, frontend.globalTypes);
CheckResult result = check(R"(
function foo(f: fun) end
function a() end
function id(x) return x end
foo(a)
foo(id)
foo(foo)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "concrete_functions_are_not_supertypes_of_function")
{
ScopedFastFlag sff{"LuauNegatedFunctionTypes", true};
registerHiddenTypes(*this, frontend.globalTypes);
CheckResult result = check(R"(
local a: fun = function() end
function one(arg: () -> ()) end
function two(arg: <T>(T) -> T) end
one(a)
two(a)
)");
LUAU_REQUIRE_ERROR_COUNT(2, result);
CHECK(6 == result.errors[0].location.begin.line);
CHECK(7 == result.errors[1].location.begin.line);
}
TEST_CASE_FIXTURE(Fixture, "other_things_are_not_related_to_function")
{
ScopedFastFlag sff{"LuauNegatedFunctionTypes", true};
registerHiddenTypes(*this, frontend.globalTypes);
CheckResult result = check(R"(
local a: fun = function() end
local b: {} = a
local c: boolean = a
local d: fun = true
local e: fun = {}
)");
LUAU_REQUIRE_ERROR_COUNT(4, result);
CHECK(2 == result.errors[0].location.begin.line);
CHECK(3 == result.errors[1].location.begin.line);
CHECK(4 == result.errors[2].location.begin.line);
CHECK(5 == result.errors[3].location.begin.line);
}
TEST_SUITE_END();

View file

@ -10,7 +10,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
using namespace Luau;
@ -1011,10 +1010,7 @@ TEST_CASE_FIXTURE(Fixture, "no_stack_overflow_from_quantifying")
std::optional<TypeFun> t0 = getMainModule()->getModuleScope()->lookupType("t0");
REQUIRE(t0);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(t0->type));
else
CHECK_EQ("<error-type>", toString(t0->type));
CHECK_EQ("*error-type*", toString(t0->type));
auto it = std::find_if(result.errors.begin(), result.errors.end(), [](TypeError& err) {
return get<OccursCheckFailed>(err);

View file

@ -13,7 +13,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
TEST_SUITE_BEGIN("TypeInferLoops");
@ -157,10 +156,7 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_on_error")
LUAU_REQUIRE_ERROR_COUNT(2, result);
TypeId p = requireType("p");
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(p));
else
CHECK_EQ("<error-type>", toString(p));
CHECK_EQ("*error-type*", toString(p));
}
TEST_CASE_FIXTURE(Fixture, "for_in_loop_on_non_function")

View file

@ -14,8 +14,6 @@ LUAU_FASTFLAG(LuauInstantiateInSubtyping)
using namespace Luau;
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
TEST_SUITE_BEGIN("TypeInferModules");
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_require_basic")
@ -176,10 +174,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "require_module_that_does_not_export")
auto hootyType = requireType(bModule, "Hooty");
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(hootyType));
else
CHECK_EQ("<error-type>", toString(hootyType));
CHECK_EQ("*error-type*", toString(hootyType));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "warn_if_you_try_to_require_a_non_modulescript")
@ -282,10 +277,7 @@ local ModuleA = require(game.A)
std::optional<TypeId> oty = requireType("ModuleA");
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(*oty));
else
CHECK_EQ("<error-type>", toString(*oty));
CHECK_EQ("*error-type*", toString(*oty));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_modify_imported_types")

View file

@ -13,17 +13,17 @@ namespace
struct NegationFixture : Fixture
{
TypeArena arena;
ScopedFastFlag sff[2] {
ScopedFastFlag sff[2]{
{"LuauNegatedStringSingletons", true},
{"LuauSubtypeNormalizer", true},
};
NegationFixture()
{
registerNotType(*this, arena);
registerHiddenTypes(*this, arena);
}
};
}
} // namespace
TEST_SUITE_BEGIN("Negations");

View file

@ -50,14 +50,18 @@ TEST_CASE_FIXTURE(Fixture, "or_joins_types_with_no_superfluous_union")
CHECK_EQ(*requireType("s"), *typeChecker.stringType);
}
TEST_CASE_FIXTURE(Fixture, "and_adds_boolean")
TEST_CASE_FIXTURE(Fixture, "and_does_not_always_add_boolean")
{
ScopedFastFlag sff[]{
{"LuauTryhardAnd", true},
};
CheckResult result = check(R"(
local s = "a" and 10
local x:boolean|number = s
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(*requireType("s")), "boolean | number");
CHECK_EQ(toString(*requireType("s")), "number");
}
TEST_CASE_FIXTURE(Fixture, "and_adds_boolean_no_superfluous_union")
@ -971,4 +975,79 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "mm_comparisons_must_return_a_boolean")
CHECK(toString(result.errors[1]) == "Metamethod '__lt' must return type 'boolean'");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "reworked_and")
{
ScopedFastFlag sff[]{
{"LuauTryhardAnd", true},
};
CheckResult result = check(R"(
local a: number? = 5
local b: boolean = (a or 1) > 10
local c -- free
local x = a and 1
local y = 'a' and 1
local z = b and 1
local w = c and 1
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK("number?" == toString(requireType("x")));
CHECK("number" == toString(requireType("y")));
CHECK("false | number" == toString(requireType("z")));
CHECK("number" == toString(requireType("w"))); // Normalizer considers free & falsy == never
}
else
{
CHECK("number?" == toString(requireType("x")));
CHECK("number" == toString(requireType("y")));
CHECK("boolean | number" == toString(requireType("z"))); // 'false' widened to boolean
CHECK("(boolean | number)?" == toString(requireType("w")));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "reworked_or")
{
ScopedFastFlag sff[]{
{"LuauTryhardAnd", true},
};
CheckResult result = check(R"(
local a: number | false = 5
local b: number? = 6
local c: boolean = true
local d: true = true
local e: false = false
local f: nil = false
local a1 = a or 'a'
local b1 = b or 4
local c1 = c or 'c'
local d1 = d or 'd'
local e1 = e or 'e'
local f1 = f or 'f'
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK("number | string" == toString(requireType("a1")));
CHECK("number" == toString(requireType("b1")));
CHECK("string | true" == toString(requireType("c1")));
CHECK("string | true" == toString(requireType("d1")));
CHECK("string" == toString(requireType("e1")));
CHECK("string" == toString(requireType("f1")));
}
else
{
CHECK("number | string" == toString(requireType("a1")));
CHECK("number" == toString(requireType("b1")));
CHECK("boolean | string" == toString(requireType("c1"))); // 'true' widened to boolean
CHECK("boolean | string" == toString(requireType("d1"))); // 'true' widened to boolean
CHECK("string" == toString(requireType("e1")));
CHECK("string" == toString(requireType("f1")));
}
}
TEST_SUITE_END();

View file

@ -11,8 +11,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
using namespace Luau;
TEST_SUITE_BEGIN("TypeInferPrimitives");
@ -49,10 +47,7 @@ TEST_CASE_FIXTURE(Fixture, "string_index")
REQUIRE(nat);
CHECK_EQ("string", toString(nat->ty));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("t")));
else
CHECK_EQ("<error-type>", toString(requireType("t")));
CHECK_EQ("*error-type*", toString(requireType("t")));
}
TEST_CASE_FIXTURE(Fixture, "string_method")

View file

@ -461,23 +461,27 @@ TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint")
LUAU_REQUIRE_NO_ERRORS(result);
// Solving this requires recognizing that we can partially solve the
// following constraint:
// Solving this requires recognizing that we can't dispatch a constraint
// like this without doing further work:
//
// (*blocked*) -> () <: (number) -> (b...)
//
// The correct thing for us to do is to consider the constraint dispatched,
// but we need to also record a new constraint number <: *blocked* to finish
// the job later.
// We solve this by searching both types for BlockedTypeVars and block the
// constraint on any we find. It also gets the job done, but I'm worried
// about the efficiency of doing so many deep type traversals and it may
// make us more prone to getting stuck on constraint cycles.
//
// If this doesn't pan out, a possible solution is to go further down the
// path of supporting partial constraint dispatch. The way it would work is
// that we'd dispatch the above constraint by binding b... to (), but we
// would append a new constraint number <: *blocked* to the constraint set
// to be solved later. This should be faster and theoretically less prone
// to cyclic constraint dependencies.
CHECK("<a>(a, number) -> ()" == toString(requireType("prime_iter")));
}
TEST_CASE_FIXTURE(Fixture, "free_options_cannot_be_unified_together")
{
ScopedFastFlag sff[] = {
{"LuauFixNameMaps", true},
};
TypeArena arena;
TypeId nilType = singletonTypes->nilType;
@ -522,8 +526,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "for_in_loop_with_zero_iterators")
// Ideally, we would not try to export a function type with generic types from incorrect scope
TEST_CASE_FIXTURE(BuiltinsFixture, "generic_type_leak_to_module_interface")
{
ScopedFastFlag LuauAnyifyModuleReturnGenerics{"LuauAnyifyModuleReturnGenerics", true};
fileResolver.source["game/A"] = R"(
local wrapStrictTable
@ -563,8 +565,6 @@ return wrapStrictTable(Constants, "Constants")
TEST_CASE_FIXTURE(BuiltinsFixture, "generic_type_leak_to_module_interface_variadic")
{
ScopedFastFlag LuauAnyifyModuleReturnGenerics{"LuauAnyifyModuleReturnGenerics", true};
fileResolver.source["game/A"] = R"(
local wrapStrictTable
@ -633,7 +633,7 @@ struct IsSubtypeFixture : Fixture
return ::Luau::isSubtype(a, b, NotNull{getMainModule()->getModuleScope().get()}, singletonTypes, ice);
}
};
}
} // namespace
TEST_CASE_FIXTURE(IsSubtypeFixture, "intersection_of_functions_of_different_arities")
{

View file

@ -7,7 +7,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
using namespace Luau;
@ -36,7 +35,7 @@ std::optional<WithPredicate<TypePackId>> magicFunctionInstanceIsA(
return WithPredicate<TypePackId>{booleanPack, {IsAPredicate{std::move(*lvalue), expr.location, tfun->type}}};
}
struct RefinementClassFixture : Fixture
struct RefinementClassFixture : BuiltinsFixture
{
RefinementClassFixture()
{
@ -127,7 +126,7 @@ TEST_CASE_FIXTURE(Fixture, "invert_is_truthy_constraint")
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & ~~(false?)", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("(string?) & ~~~(false?)", toString(requireTypeAtPosition({5, 26})));
CHECK_EQ("(string?) & ~(false?)", toString(requireTypeAtPosition({5, 26})));
}
else
{
@ -153,7 +152,7 @@ TEST_CASE_FIXTURE(Fixture, "parenthesized_expressions_are_followed_through")
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & ~~(false?)", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("(string?) & ~~~(false?)", toString(requireTypeAtPosition({5, 26})));
CHECK_EQ("(string?) & ~(false?)", toString(requireTypeAtPosition({5, 26})));
}
else
{
@ -178,8 +177,16 @@ TEST_CASE_FIXTURE(Fixture, "and_constraint")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("string", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("number", toString(requireTypeAtPosition({4, 26})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & ~(false?)", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("(number?) & ~(false?)", toString(requireTypeAtPosition({4, 26})));
}
else
{
CHECK_EQ("string", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("number", toString(requireTypeAtPosition({4, 26})));
}
CHECK_EQ("string?", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("number?", toString(requireTypeAtPosition({7, 26})));
@ -204,8 +211,16 @@ TEST_CASE_FIXTURE(Fixture, "not_and_constraint")
CHECK_EQ("string?", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("number?", toString(requireTypeAtPosition({4, 26})));
CHECK_EQ("string", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("number", toString(requireTypeAtPosition({7, 26})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & ~(false?)", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("(number?) & ~(false?)", toString(requireTypeAtPosition({7, 26})));
}
else
{
CHECK_EQ("string", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("number", toString(requireTypeAtPosition({7, 26})));
}
}
TEST_CASE_FIXTURE(Fixture, "or_predicate_with_truthy_predicates")
@ -227,8 +242,56 @@ TEST_CASE_FIXTURE(Fixture, "or_predicate_with_truthy_predicates")
CHECK_EQ("string?", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("number?", toString(requireTypeAtPosition({4, 26})));
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("nil", toString(requireTypeAtPosition({7, 26})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & ~~(false?)", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("(number?) & ~~(false?)", toString(requireTypeAtPosition({7, 26})));
}
else
{
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 26})));
CHECK_EQ("nil", toString(requireTypeAtPosition({7, 26})));
}
}
TEST_CASE_FIXTURE(Fixture, "a_and_b_or_a_and_c")
{
CheckResult result = check(R"(
function f(a: string?, b: number?, c: boolean)
if (a and b) or (a and c) then
local foo = a
local bar = b
local baz = c
else
local foo = a
local bar = b
local baz = c
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string?) & (~(false?) | ~(false?))", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("number?", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("boolean", toString(requireTypeAtPosition({5, 28})));
CHECK_EQ("string?", toString(requireTypeAtPosition({7, 28})));
CHECK_EQ("number?", toString(requireTypeAtPosition({8, 28})));
CHECK_EQ("boolean", toString(requireTypeAtPosition({9, 28})));
}
else
{
CHECK_EQ("string", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("number?", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("true", toString(requireTypeAtPosition({5, 28}))); // oh no! :(
CHECK_EQ("string?", toString(requireTypeAtPosition({7, 28})));
CHECK_EQ("number?", toString(requireTypeAtPosition({8, 28})));
CHECK_EQ("boolean", toString(requireTypeAtPosition({9, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "type_assertion_expr_carry_its_constraints")
@ -244,11 +307,20 @@ TEST_CASE_FIXTURE(Fixture, "type_assertion_expr_carry_its_constraints")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("number", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("string", toString(requireTypeAtPosition({4, 26})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("number?", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("string?", toString(requireTypeAtPosition({4, 26})));
}
else
{
// We're going to drop support for type refinements through type assertions.
CHECK_EQ("number", toString(requireTypeAtPosition({3, 26})));
CHECK_EQ("string", toString(requireTypeAtPosition({4, 26})));
}
}
TEST_CASE_FIXTURE(Fixture, "typeguard_in_if_condition_position")
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_in_if_condition_position")
{
CheckResult result = check(R"(
function f(s: any)
@ -260,7 +332,14 @@ TEST_CASE_FIXTURE(Fixture, "typeguard_in_if_condition_position")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("number", toString(requireTypeAtPosition({3, 26})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("any & number", toString(requireTypeAtPosition({3, 26})));
}
else
{
CHECK_EQ("number", toString(requireTypeAtPosition({3, 26})));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_in_assert_position")
@ -272,10 +351,11 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_in_assert_position")
)");
LUAU_REQUIRE_NO_ERRORS(result);
REQUIRE_EQ("number", toString(requireType("b")));
}
TEST_CASE_FIXTURE(Fixture, "call_a_more_specific_function_using_typeguard")
TEST_CASE_FIXTURE(BuiltinsFixture, "call_an_incompatible_function_after_using_typeguard")
{
CheckResult result = check(R"(
local function f(x: number)
@ -290,6 +370,7 @@ TEST_CASE_FIXTURE(Fixture, "call_a_more_specific_function_using_typeguard")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Type 'string' could not be converted into 'number'", toString(result.errors[0]));
}
@ -381,11 +462,22 @@ TEST_CASE_FIXTURE(Fixture, "lvalue_is_equal_to_another_lvalue")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "(number | string)?"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "boolean?"); // a == b
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "((number | string)?) & (boolean?)"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "((number | string)?) & (boolean?)"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({5, 33})), "(number | string)?"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({5, 36})), "boolean?"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({5, 33})), "((number | string)?) & unknown"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({5, 36})), "(boolean?) & unknown"); // a ~= b
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "(number | string)?"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "boolean?"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({5, 33})), "(number | string)?"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({5, 36})), "boolean?"); // a ~= b
}
}
TEST_CASE_FIXTURE(Fixture, "lvalue_is_equal_to_a_term")
@ -402,8 +494,16 @@ TEST_CASE_FIXTURE(Fixture, "lvalue_is_equal_to_a_term")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "(number | string)?"); // a == 1
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "(number | string)?"); // a ~= 1
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "((number | string)?) & number"); // a == 1
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "((number | string)?) & unknown"); // a ~= 1
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "(number | string)?"); // a == 1;
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "(number | string)?"); // a ~= 1
}
}
TEST_CASE_FIXTURE(Fixture, "term_is_equal_to_an_lvalue")
@ -420,8 +520,16 @@ TEST_CASE_FIXTURE(Fixture, "term_is_equal_to_an_lvalue")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), R"("hello")"); // a == "hello"
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "(number | string)?"); // a ~= "hello"
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), R"("hello" & ((number | string)?))"); // a == "hello"
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), R"(((number | string)?) & ~"hello")"); // a ~= "hello"
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), R"("hello")"); // a == "hello"
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), R"((number | string)?)"); // a ~= "hello"
}
}
TEST_CASE_FIXTURE(Fixture, "lvalue_is_not_nil")
@ -438,8 +546,16 @@ TEST_CASE_FIXTURE(Fixture, "lvalue_is_not_nil")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "number | string"); // a ~= nil
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "(number | string)?"); // a == nil
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "((number | string)?) & ~nil"); // a ~= nil
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "((number | string)?) & nil"); // a == nil
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 28})), "number | string"); // a ~= nil
CHECK_EQ(toString(requireTypeAtPosition({5, 28})), "(number | string)?"); // a == nil
}
}
TEST_CASE_FIXTURE(Fixture, "free_type_is_equal_to_an_lvalue")
@ -454,8 +570,17 @@ TEST_CASE_FIXTURE(Fixture, "free_type_is_equal_to_an_lvalue")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "string?"); // a == b
if (FFlag::DebugLuauDeferredConstraintResolution)
{
ToStringOptions opts;
CHECK_EQ(toString(requireTypeAtPosition({3, 33}), opts), "(string?) & a"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36}), opts), "(string?) & a"); // a == b
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "string?"); // a == b
}
}
TEST_CASE_FIXTURE(Fixture, "unknown_lvalue_is_not_synonymous_with_other_on_not_equal")
@ -470,8 +595,16 @@ TEST_CASE_FIXTURE(Fixture, "unknown_lvalue_is_not_synonymous_with_other_on_not_e
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "any"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "{| x: number |}?"); // a ~= b
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "any & unknown"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "({| x: number |}?) & unknown"); // a ~= b
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "any"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "{| x: number |}?"); // a ~= b
}
}
TEST_CASE_FIXTURE(Fixture, "string_not_equal_to_string_or_nil")
@ -490,11 +623,22 @@ TEST_CASE_FIXTURE(Fixture, "string_not_equal_to_string_or_nil")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(toString(requireTypeAtPosition({6, 29})), "string"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({6, 32})), "string?"); // a ~= b
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ(toString(requireTypeAtPosition({6, 29})), "string & unknown"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({6, 32})), "(string?) & unknown"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({8, 29})), "string"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({8, 32})), "string?"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({8, 29})), "(string?) & string"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({8, 32})), "(string?) & string"); // a == b
}
else
{
CHECK_EQ(toString(requireTypeAtPosition({6, 29})), "string"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({6, 32})), "string?"); // a ~= b
CHECK_EQ(toString(requireTypeAtPosition({8, 29})), "string"); // a == b
CHECK_EQ(toString(requireTypeAtPosition({8, 32})), "string?"); // a == b
}
}
TEST_CASE_FIXTURE(Fixture, "narrow_property_of_a_bounded_variable")
@ -513,7 +657,7 @@ TEST_CASE_FIXTURE(Fixture, "narrow_property_of_a_bounded_variable")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "type_narrow_to_vector")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_narrow_to_vector")
{
CheckResult result = check(R"(
local function f(x)
@ -525,13 +669,10 @@ TEST_CASE_FIXTURE(Fixture, "type_narrow_to_vector")
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireTypeAtPosition({3, 28})));
else
CHECK_EQ("<error-type>", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("*error-type*", toString(requireTypeAtPosition({3, 28})));
}
TEST_CASE_FIXTURE(Fixture, "nonoptional_type_can_narrow_to_nil_if_sense_is_true")
TEST_CASE_FIXTURE(BuiltinsFixture, "nonoptional_type_can_narrow_to_nil_if_sense_is_true")
{
CheckResult result = check(R"(
local t = {"hello"}
@ -558,7 +699,7 @@ TEST_CASE_FIXTURE(Fixture, "nonoptional_type_can_narrow_to_nil_if_sense_is_true"
CHECK_EQ("string", toString(requireTypeAtPosition({12, 24}))); // equivalent to type(v) ~= "nil"
}
TEST_CASE_FIXTURE(Fixture, "typeguard_not_to_be_string")
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_not_to_be_string")
{
CheckResult result = check(R"(
local function f(x: string | number | boolean)
@ -572,11 +713,19 @@ TEST_CASE_FIXTURE(Fixture, "typeguard_not_to_be_string")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("boolean | number", toString(requireTypeAtPosition({3, 28}))); // type(x) ~= "string"
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28}))); // type(x) == "string"
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(boolean | number | string) & ~string", toString(requireTypeAtPosition({3, 28}))); // type(x) ~= "string"
CHECK_EQ("(boolean | number | string) & string", toString(requireTypeAtPosition({5, 28}))); // type(x) == "string"
}
else
{
CHECK_EQ("boolean | number", toString(requireTypeAtPosition({3, 28}))); // type(x) ~= "string"
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28}))); // type(x) == "string"
}
}
TEST_CASE_FIXTURE(Fixture, "typeguard_narrows_for_table")
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_narrows_for_table")
{
CheckResult result = check(R"(
local function f(x: string | {x: number} | {y: boolean})
@ -594,7 +743,7 @@ TEST_CASE_FIXTURE(Fixture, "typeguard_narrows_for_table")
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28}))); // type(x) ~= "table"
}
TEST_CASE_FIXTURE(Fixture, "typeguard_narrows_for_functions")
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_narrows_for_functions")
{
CheckResult result = check(R"(
local function weird(x: string | ((number) -> string))
@ -608,11 +757,19 @@ TEST_CASE_FIXTURE(Fixture, "typeguard_narrows_for_functions")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("(number) -> string", toString(requireTypeAtPosition({3, 28}))); // type(x) == "function"
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28}))); // type(x) ~= "function"
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(((number) -> string) | string) & function", toString(requireTypeAtPosition({3, 28}))); // type(x) == "function"
CHECK_EQ("(((number) -> string) | string) & ~function", toString(requireTypeAtPosition({5, 28}))); // type(x) ~= "function"
}
else
{
CHECK_EQ("(number) -> string", toString(requireTypeAtPosition({3, 28}))); // type(x) == "function"
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28}))); // type(x) ~= "function"
}
}
TEST_CASE_FIXTURE(Fixture, "type_guard_can_filter_for_intersection_of_tables")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_guard_can_filter_for_intersection_of_tables")
{
CheckResult result = check(R"(
type XYCoord = {x: number} & {y: number}
@ -631,7 +788,7 @@ TEST_CASE_FIXTURE(Fixture, "type_guard_can_filter_for_intersection_of_tables")
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 28})));
}
TEST_CASE_FIXTURE(Fixture, "type_guard_can_filter_for_overloaded_function")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_guard_can_filter_for_overloaded_function")
{
CheckResult result = check(R"(
type SomeOverloadedFunction = ((number) -> string) & ((string) -> number)
@ -646,8 +803,16 @@ TEST_CASE_FIXTURE(Fixture, "type_guard_can_filter_for_overloaded_function")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("((number) -> string) & ((string) -> number)", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("((((number) -> string) & ((string) -> number))?) & function", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("((((number) -> string) & ((string) -> number))?) & ~function", toString(requireTypeAtPosition({6, 28})));
}
else
{
CHECK_EQ("((number) -> string) & ((string) -> number)", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 28})));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "type_guard_narrowed_into_nothingness")
@ -715,8 +880,16 @@ TEST_CASE_FIXTURE(Fixture, "not_a_and_not_b")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("nil", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({4, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(number?) & ~~(false?)", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(number?) & ~~(false?)", toString(requireTypeAtPosition({4, 28})));
}
else
{
CHECK_EQ("nil", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({4, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "not_a_and_not_b2")
@ -732,11 +905,19 @@ TEST_CASE_FIXTURE(Fixture, "not_a_and_not_b2")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("nil", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({4, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(number?) & ~~(false?)", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(number?) & ~~(false?)", toString(requireTypeAtPosition({4, 28})));
}
else
{
CHECK_EQ("nil", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({4, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "either_number_or_string")
TEST_CASE_FIXTURE(BuiltinsFixture, "either_number_or_string")
{
CheckResult result = check(R"(
local function f(x: any)
@ -748,7 +929,14 @@ TEST_CASE_FIXTURE(Fixture, "either_number_or_string")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("number | string", toString(requireTypeAtPosition({3, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(number | string) & any", toString(requireTypeAtPosition({3, 28})));
}
else
{
CHECK_EQ("number | string", toString(requireTypeAtPosition({3, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "not_t_or_some_prop_of_t")
@ -798,10 +986,17 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "merge_should_be_fully_agnostic_of_hashmap_or
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("string", toString(requireTypeAtPosition({6, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(string | {| x: string |}) & string", toString(requireTypeAtPosition({6, 28})));
}
else
{
CHECK_EQ("string", toString(requireTypeAtPosition({6, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "refine_the_correct_types_opposite_of_when_a_is_not_number_or_string")
TEST_CASE_FIXTURE(BuiltinsFixture, "refine_the_correct_types_opposite_of_when_a_is_not_number_or_string")
{
CheckResult result = check(R"(
local function f(a: string | number | boolean)
@ -815,8 +1010,16 @@ TEST_CASE_FIXTURE(Fixture, "refine_the_correct_types_opposite_of_when_a_is_not_n
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("boolean", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("number | string", toString(requireTypeAtPosition({5, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(boolean | number | string) & ~number & ~string", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(boolean | number | string) & (number | string)", toString(requireTypeAtPosition({5, 28})));
}
else
{
CHECK_EQ("boolean", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("number | string", toString(requireTypeAtPosition({5, 28})));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "is_truthy_constraint_ifelse_expression")
@ -847,7 +1050,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "invert_is_truthy_constraint_ifelse_expressio
CHECK_EQ("string", toString(requireTypeAtPosition({2, 50})));
}
TEST_CASE_FIXTURE(Fixture, "type_comparison_ifelse_expression")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_comparison_ifelse_expression")
{
CheckResult result = check(R"(
function returnOne(x)
@ -879,7 +1082,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "correctly_lookup_a_shadowed_local_that_which
CHECK_EQ("Type 'number' does not have key 'sub'", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "correctly_lookup_property_whose_base_was_previously_refined")
TEST_CASE_FIXTURE(BuiltinsFixture, "correctly_lookup_property_whose_base_was_previously_refined")
{
CheckResult result = check(R"(
type T = {x: string | number}
@ -1098,8 +1301,16 @@ TEST_CASE_FIXTURE(RefinementClassFixture, "typeguard_cast_instance_or_vector3_to
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("Vector3", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("Instance", toString(requireTypeAtPosition({5, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(Instance | Vector3) & Vector3", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(Instance | Vector3) & ~Vector3", toString(requireTypeAtPosition({5, 28})));
}
else
{
CHECK_EQ("Vector3", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("Instance", toString(requireTypeAtPosition({5, 28})));
}
}
TEST_CASE_FIXTURE(RefinementClassFixture, "type_narrow_for_all_the_userdata")
@ -1134,14 +1345,22 @@ TEST_CASE_FIXTURE(RefinementClassFixture, "eliminate_subclasses_of_instance")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("Folder | Part", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(Folder | Part | string) & Instance", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(Folder | Part | string) & ~Instance", toString(requireTypeAtPosition({5, 28})));
}
else
{
CHECK_EQ("Folder | Part", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("string", toString(requireTypeAtPosition({5, 28})));
}
}
TEST_CASE_FIXTURE(RefinementClassFixture, "narrow_this_large_union")
TEST_CASE_FIXTURE(RefinementClassFixture, "narrow_from_subclasses_of_instance_or_string_or_vector3")
{
CheckResult result = check(R"(
local function f(x: Part | Folder | Instance | string | Vector3 | any)
local function f(x: Part | Folder | string | Vector3)
if typeof(x) == "Instance" then
local foo = x
else
@ -1152,8 +1371,16 @@ TEST_CASE_FIXTURE(RefinementClassFixture, "narrow_this_large_union")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("Folder | Instance | Part", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("Vector3 | any | string", toString(requireTypeAtPosition({5, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("(Folder | Part | Vector3 | string) & Instance", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("(Folder | Part | Vector3 | string) & ~Instance", toString(requireTypeAtPosition({5, 28})));
}
else
{
CHECK_EQ("Folder | Part", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("Vector3 | string", toString(requireTypeAtPosition({5, 28})));
}
}
TEST_CASE_FIXTURE(RefinementClassFixture, "x_as_any_if_x_is_instance_elseif_x_is_table")
@ -1194,7 +1421,7 @@ TEST_CASE_FIXTURE(RefinementClassFixture, "x_is_not_instance_or_else_not_part")
CHECK_EQ("Part", toString(requireTypeAtPosition({5, 28})));
}
TEST_CASE_FIXTURE(Fixture, "typeguard_doesnt_leak_to_elseif")
TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_doesnt_leak_to_elseif")
{
CheckResult result = check(R"(
function f(a)
@ -1225,8 +1452,16 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknowns")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("string", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("unknown", toString(requireTypeAtPosition({5, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("unknown & string", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("unknown & ~string", toString(requireTypeAtPosition({5, 28})));
}
else
{
CHECK_EQ("string", toString(requireTypeAtPosition({3, 28})));
CHECK_EQ("unknown", toString(requireTypeAtPosition({5, 28})));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "falsiness_of_TruthyPredicate_narrows_into_nil")
@ -1260,7 +1495,35 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "what_nonsensical_condition")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("never", toString(requireTypeAtPosition({3, 28})));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
CHECK_EQ("a & number & string", toString(requireTypeAtPosition({3, 28})));
}
else
{
CHECK_EQ("never", toString(requireTypeAtPosition({3, 28})));
}
}
TEST_CASE_FIXTURE(Fixture, "else_with_no_explicit_expression_should_also_refine_the_tagged_union")
{
ScopedFastFlag sff{"LuauImplicitElseRefinement", true};
CheckResult result = check(R"(
type Ok<T> = { tag: "ok", value: T }
type Err<E> = { tag: "err", err: E }
type Result<T, E> = Ok<T> | Err<E>
function and_then<T, U, E>(r: Result<T, E>, f: (T) -> U): Result<U, E>
if r.tag == "ok" then
return { tag = "ok", value = f(r.value) }
else
return r
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View file

@ -79,6 +79,16 @@ TEST_CASE_FIXTURE(Fixture, "string_singleton_subtype")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "string_singleton_subtype_multi_assignment")
{
CheckResult result = check(R"(
local a: "foo" = "foo"
local b: string, c: number = a, 10
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "function_call_with_singletons")
{
CheckResult result = check(R"(

View file

@ -17,7 +17,7 @@ using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
LUAU_FASTFLAG(LuauNoMoreGlobalSingletonTypes)
TEST_SUITE_BEGIN("TableTests");
@ -1722,6 +1722,8 @@ TEST_CASE_FIXTURE(Fixture, "hide_table_error_properties")
TEST_CASE_FIXTURE(BuiltinsFixture, "builtin_table_names")
{
ScopedFastFlag luauNewLibraryTypeNames{"LuauNewLibraryTypeNames", true};
CheckResult result = check(R"(
os.h = 2
string.k = 3
@ -1729,19 +1731,36 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "builtin_table_names")
LUAU_REQUIRE_ERROR_COUNT(2, result);
CHECK_EQ("Cannot add property 'h' to table 'os'", toString(result.errors[0]));
CHECK_EQ("Cannot add property 'k' to table 'string'", toString(result.errors[1]));
if (FFlag::LuauNoMoreGlobalSingletonTypes)
{
CHECK_EQ("Cannot add property 'h' to table 'typeof(os)'", toString(result.errors[0]));
CHECK_EQ("Cannot add property 'k' to table 'typeof(string)'", toString(result.errors[1]));
}
else
{
CHECK_EQ("Cannot add property 'h' to table 'os'", toString(result.errors[0]));
CHECK_EQ("Cannot add property 'k' to table 'string'", toString(result.errors[1]));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "persistent_sealed_table_is_immutable")
{
ScopedFastFlag luauNewLibraryTypeNames{"LuauNewLibraryTypeNames", true};
CheckResult result = check(R"(
--!nonstrict
function os:bad() end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Cannot add property 'bad' to table 'os'", toString(result.errors[0]));
if (FFlag::LuauNoMoreGlobalSingletonTypes)
{
CHECK_EQ("Cannot add property 'bad' to table 'typeof(os)'", toString(result.errors[0]));
}
else
{
CHECK_EQ("Cannot add property 'bad' to table 'os'", toString(result.errors[0]));
}
const TableTypeVar* osType = get<TableTypeVar>(requireType("os"));
REQUIRE(osType != nullptr);
@ -3189,6 +3208,7 @@ TEST_CASE_FIXTURE(Fixture, "scalar_is_a_subtype_of_a_compatible_polymorphic_shap
TEST_CASE_FIXTURE(Fixture, "scalar_is_not_a_subtype_of_a_compatible_polymorphic_shape_type")
{
ScopedFastFlag sff{"LuauScalarShapeSubtyping", true};
ScopedFastFlag luauNewLibraryTypeNames{"LuauNewLibraryTypeNames", true};
CheckResult result = check(R"(
local function f(s)
@ -3201,25 +3221,47 @@ TEST_CASE_FIXTURE(Fixture, "scalar_is_not_a_subtype_of_a_compatible_polymorphic_
)");
LUAU_REQUIRE_ERROR_COUNT(3, result);
CHECK_EQ(R"(Type 'string' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
if (FFlag::LuauNoMoreGlobalSingletonTypes)
{
CHECK_EQ(R"(Type 'string' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'typeof(string)' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[0]));
CHECK_EQ(R"(Type '"bar"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'typeof(string)' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[1]));
CHECK_EQ(R"(Type '"bar" | "baz"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
Not all union options are compatible. Type '"bar"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'typeof(string)' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[2]));
}
else
{
CHECK_EQ(R"(Type 'string' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'string' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[0]));
CHECK_EQ(R"(Type '"bar"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
toString(result.errors[0]));
CHECK_EQ(R"(Type '"bar"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'string' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[1]));
CHECK_EQ(R"(Type '"bar" | "baz"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
toString(result.errors[1]));
CHECK_EQ(R"(Type '"bar" | "baz"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
Not all union options are compatible. Type '"bar"' could not be converted into 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}'
caused by:
The former's metatable does not satisfy the requirements. Table type 'string' not compatible with type 't1 where t1 = {- absolutely_no_scalar_has_this_method: (t1) -> (a...) -}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[2]));
toString(result.errors[2]));
}
}
TEST_CASE_FIXTURE(Fixture, "a_free_shape_can_turn_into_a_scalar_if_it_is_compatible")
{
ScopedFastFlag sff{"LuauScalarShapeSubtyping", true};
ScopedFastFlag luauScalarShapeUnifyToMtOwner{"LuauScalarShapeUnifyToMtOwner", true}; // Changes argument from table type to primitive
CheckResult result = check(R"(
local function f(s): string
@ -3235,6 +3277,7 @@ TEST_CASE_FIXTURE(Fixture, "a_free_shape_can_turn_into_a_scalar_if_it_is_compati
TEST_CASE_FIXTURE(Fixture, "a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible")
{
ScopedFastFlag sff{"LuauScalarShapeSubtyping", true};
ScopedFastFlag luauNewLibraryTypeNames{"LuauNewLibraryTypeNames", true};
CheckResult result = check(R"(
local function f(s): string
@ -3244,11 +3287,42 @@ TEST_CASE_FIXTURE(Fixture, "a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(R"(Type 't1 where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}' could not be converted into 'string'
if (FFlag::LuauNoMoreGlobalSingletonTypes)
{
CHECK_EQ(R"(Type 't1 where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}' could not be converted into 'string'
caused by:
The former's metatable does not satisfy the requirements. Table type 'typeof(string)' not compatible with type 't1 where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[0]));
CHECK_EQ("<a, b...>(t1) -> string where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}", toString(requireType("f")));
}
else
{
CHECK_EQ(R"(Type 't1 where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}' could not be converted into 'string'
caused by:
The former's metatable does not satisfy the requirements. Table type 'string' not compatible with type 't1 where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}' because the former is missing field 'absolutely_no_scalar_has_this_method')",
toString(result.errors[0]));
CHECK_EQ("<a, b...>(t1) -> string where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}", toString(requireType("f")));
toString(result.errors[0]));
CHECK_EQ("<a, b...>(t1) -> string where t1 = {+ absolutely_no_scalar_has_this_method: (t1) -> (a, b...) +}", toString(requireType("f")));
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "a_free_shape_can_turn_into_a_scalar_directly")
{
ScopedFastFlag luauScalarShapeSubtyping{"LuauScalarShapeSubtyping", true};
ScopedFastFlag luauScalarShapeUnifyToMtOwner{"LuauScalarShapeUnifyToMtOwner", true};
CheckResult result = check(R"(
local function stringByteList(str)
local out = {}
for i = 1, #str do
table.insert(out, string.byte(str, i))
end
return table.concat(out, ",")
end
local x = stringByteList("xoo")
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "invariant_table_properties_means_instantiating_tables_in_call_is_unsound")
@ -3272,11 +3346,12 @@ TEST_CASE_FIXTURE(Fixture, "invariant_table_properties_means_instantiating_table
LUAU_REQUIRE_NO_ERRORS(result);
// TODO: test behavior is wrong until we can re-enable the covariant requirement for instantiation in subtyping
// LUAU_REQUIRE_ERRORS(result);
// CHECK_EQ(toString(result.errors[0]), R"(Type 't' could not be converted into '{| m: (number) -> number |}'
// caused by:
// Property 'm' is not compatible. Type '<a>(a) -> a' could not be converted into '(number) -> number'; different number of generic type parameters)");
// // this error message is not great since the underlying issue is that the context is invariant,
// LUAU_REQUIRE_ERRORS(result);
// CHECK_EQ(toString(result.errors[0]), R"(Type 't' could not be converted into '{| m: (number) -> number |}'
// caused by:
// Property 'm' is not compatible. Type '<a>(a) -> a' could not be converted into '(number) -> number'; different number of generic type
// parameters)");
// // this error message is not great since the underlying issue is that the context is invariant,
// and `(number) -> number` cannot be a subtype of `<a>(a) -> a`.
}
@ -3335,10 +3410,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "tables_should_be_fully_populated")
ToStringOptions opts;
opts.exhaustive = true;
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("{ x: *error-type*, y: number }", toString(requireType("t"), opts));
else
CHECK_EQ("{ x: <error-type>, y: number }", toString(requireType("t"), opts));
CHECK_EQ("{ x: *error-type*, y: number }", toString(requireType("t"), opts));
}
TEST_SUITE_END();

View file

@ -17,7 +17,6 @@
LUAU_FASTFLAG(LuauFixLocationSpanTableIndexExpr);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauInstantiateInSubtyping);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
using namespace Luau;
@ -238,20 +237,10 @@ TEST_CASE_FIXTURE(Fixture, "type_errors_infer_types")
// TODO: Should we assert anything about these tests when DCR is being used?
if (!FFlag::DebugLuauDeferredConstraintResolution)
{
if (FFlag::LuauSpecialTypesAsterisked)
{
CHECK_EQ("*error-type*", toString(requireType("c")));
CHECK_EQ("*error-type*", toString(requireType("d")));
CHECK_EQ("*error-type*", toString(requireType("e")));
CHECK_EQ("*error-type*", toString(requireType("f")));
}
else
{
CHECK_EQ("<error-type>", toString(requireType("c")));
CHECK_EQ("<error-type>", toString(requireType("d")));
CHECK_EQ("<error-type>", toString(requireType("e")));
CHECK_EQ("<error-type>", toString(requireType("f")));
}
CHECK_EQ("*error-type*", toString(requireType("c")));
CHECK_EQ("*error-type*", toString(requireType("d")));
CHECK_EQ("*error-type*", toString(requireType("e")));
CHECK_EQ("*error-type*", toString(requireType("f")));
}
}
@ -662,10 +651,7 @@ TEST_CASE_FIXTURE(Fixture, "no_stack_overflow_from_isoptional")
std::optional<TypeFun> t0 = getMainModule()->getModuleScope()->lookupType("t0");
REQUIRE(t0);
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(t0->type));
else
CHECK_EQ("<error-type>", toString(t0->type));
CHECK_EQ("*error-type*", toString(t0->type));
auto it = std::find_if(result.errors.begin(), result.errors.end(), [](TypeError& err) {
return get<OccursCheckFailed>(err);
@ -1139,43 +1125,6 @@ TEST_CASE_FIXTURE(Fixture, "bidirectional_checking_of_higher_order_function")
CHECK(location.end.line == 4);
}
TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint")
{
ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true},
};
CheckResult result = check(R"(
local function hasDivisors(value: number)
end
function prime_iter(state, index)
hasDivisors(index)
index += 1
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// Solving this requires recognizing that we can't dispatch a constraint
// like this without doing further work:
//
// (*blocked*) -> () <: (number) -> (b...)
//
// We solve this by searching both types for BlockedTypeVars and block the
// constraint on any we find. It also gets the job done, but I'm worried
// about the efficiency of doing so many deep type traversals and it may
// make us more prone to getting stuck on constraint cycles.
//
// If this doesn't pan out, a possible solution is to go further down the
// path of supporting partial constraint dispatch. The way it would work is
// that we'd dispatch the above constraint by binding b... to (), but we
// would append a new constraint number <: *blocked* to the constraint set
// to be solved later. This should be faster and theoretically less prone
// to cyclic constraint dependencies.
CHECK("<a>(a, number) -> ()" == toString(requireType("prime_iter")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "it_is_ok_to_have_inconsistent_number_of_return_values_in_nonstrict")
{
CheckResult result = check(R"(

View file

@ -9,8 +9,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
struct TryUnifyFixture : Fixture
{
TypeArena arena;
@ -124,10 +122,7 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "members_of_failed_typepack_unification_are_u
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("a", toString(requireType("a")));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("b")));
else
CHECK_EQ("<error-type>", toString(requireType("b")));
CHECK_EQ("*error-type*", toString(requireType("b")));
}
TEST_CASE_FIXTURE(TryUnifyFixture, "result_of_failed_typepack_unification_is_constrained")
@ -142,10 +137,7 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "result_of_failed_typepack_unification_is_con
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("a", toString(requireType("a")));
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("b")));
else
CHECK_EQ("<error-type>", toString(requireType("b")));
CHECK_EQ("*error-type*", toString(requireType("b")));
CHECK_EQ("number", toString(requireType("c")));
}

View file

@ -1002,8 +1002,6 @@ TEST_CASE_FIXTURE(Fixture, "unify_variadic_tails_in_arguments_free")
TEST_CASE_FIXTURE(BuiltinsFixture, "type_packs_with_tails_in_vararg_adjustment")
{
ScopedFastFlag luauFixVarargExprHeadType{"LuauFixVarargExprHeadType", true};
CheckResult result = check(R"(
local function wrapReject<TArg, TResult>(fn: (self: any, ...TArg) -> ...TResult): (self: any, ...TArg) -> ...TResult
return function(self, ...)

View file

@ -6,8 +6,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
using namespace Luau;
TEST_SUITE_BEGIN("UnionTypes");
@ -199,10 +197,7 @@ TEST_CASE_FIXTURE(Fixture, "index_on_a_union_type_with_missing_property")
CHECK_EQ(mup->missing[0], *bTy);
CHECK_EQ(mup->key, "x");
if (FFlag::LuauSpecialTypesAsterisked)
CHECK_EQ("*error-type*", toString(requireType("r")));
else
CHECK_EQ("<error-type>", toString(requireType("r")));
CHECK_EQ("*error-type*", toString(requireType("r")));
}
TEST_CASE_FIXTURE(Fixture, "index_on_a_union_type_with_one_property_of_type_any")
@ -400,6 +395,23 @@ local e = a.z
CHECK_EQ("Type 'A | B | C | D' does not have key 'z'", toString(result.errors[3]));
}
TEST_CASE_FIXTURE(Fixture, "optional_iteration")
{
ScopedFastFlag luauNilIterator{"LuauNilIterator", true};
CheckResult result = check(R"(
function foo(values: {number}?)
local s = 0
for _, value in values do
s += value
end
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Value of type '{number}?' could be nil", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "unify_unsealed_table_union_check")
{
CheckResult result = check(R"(

View file

@ -284,6 +284,7 @@ TEST_CASE_FIXTURE(Fixture, "dont_unify_operands_if_one_of_the_operand_is_never_i
ScopedFastFlag sff[]{
{"LuauUnknownAndNeverType", true},
{"LuauNeverTypesAndOperatorsInference", true},
{"LuauTryhardAnd", true},
};
CheckResult result = check(R"(
@ -293,7 +294,8 @@ TEST_CASE_FIXTURE(Fixture, "dont_unify_operands_if_one_of_the_operand_is_never_i
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("<a>(nil, a) -> boolean", toString(requireType("ord")));
// Widening doesn't normalize yet, so the result is a bit strange
CHECK_EQ("<a>(nil, a) -> boolean | boolean", toString(requireType("ord")));
}
TEST_CASE_FIXTURE(Fixture, "math_operators_and_never")

View file

@ -217,4 +217,35 @@ TEST_CASE("Visit")
CHECK(r3 == "1231147");
}
struct MoveOnly
{
MoveOnly() = default;
MoveOnly(const MoveOnly&) = delete;
MoveOnly& operator=(const MoveOnly&) = delete;
MoveOnly(MoveOnly&&) = default;
MoveOnly& operator=(MoveOnly&&) = default;
};
TEST_CASE("Move")
{
Variant<MoveOnly> v1 = MoveOnly{};
Variant<MoveOnly> v2 = std::move(v1);
}
TEST_CASE("MoveWithCopyableAlternative")
{
Variant<std::string, MoveOnly> v1 = std::string{"Hello, world! I am longer than a normal hello world string to avoid SSO."};
Variant<std::string, MoveOnly> v2 = std::move(v1);
std::string* s1 = get_if<std::string>(&v1);
REQUIRE(s1);
CHECK(*s1 == "");
std::string* s2 = get_if<std::string>(&v2);
REQUIRE(s2);
CHECK(*s2 == "Hello, world! I am longer than a normal hello world string to avoid SSO.");
}
TEST_SUITE_END();

View file

@ -10,7 +10,9 @@ AnnotationTests.too_many_type_params
AnnotationTests.two_type_params
AnnotationTests.unknown_type_reference_generates_error
AstQuery.last_argument_function_call_type
AstQuery::getDocumentationSymbolAtPosition.overloaded_class_method
AstQuery::getDocumentationSymbolAtPosition.overloaded_fn
AstQuery::getDocumentationSymbolAtPosition.table_overloaded_function_prop
AutocompleteTest.autocomplete_first_function_arg_expected_type
AutocompleteTest.autocomplete_interpolated_string
AutocompleteTest.autocomplete_oop_implicit_self
@ -30,10 +32,8 @@ AutocompleteTest.type_correct_expected_argument_type_suggestion_optional
AutocompleteTest.type_correct_expected_argument_type_suggestion_self
AutocompleteTest.type_correct_expected_return_type_pack_suggestion
AutocompleteTest.type_correct_expected_return_type_suggestion
AutocompleteTest.type_correct_full_type_suggestion
AutocompleteTest.type_correct_function_no_parenthesis
AutocompleteTest.type_correct_function_return_types
AutocompleteTest.type_correct_function_type_suggestion
AutocompleteTest.type_correct_keywords
AutocompleteTest.type_correct_suggestion_for_overloads
AutocompleteTest.type_correct_suggestion_in_argument
@ -51,12 +51,10 @@ BuiltinTests.dont_add_definitions_to_persistent_types
BuiltinTests.find_capture_types
BuiltinTests.find_capture_types2
BuiltinTests.find_capture_types3
BuiltinTests.gmatch_capture_types
BuiltinTests.gmatch_capture_types2
BuiltinTests.gmatch_capture_types_balanced_escaped_parens
BuiltinTests.gmatch_capture_types_default_capture
BuiltinTests.gmatch_capture_types_parens_in_sets_are_ignored
BuiltinTests.gmatch_capture_types_set_containing_lbracket
BuiltinTests.gmatch_definition
BuiltinTests.ipairs_iterator_should_infer_types_and_type_check
BuiltinTests.match_capture_types
@ -72,13 +70,12 @@ BuiltinTests.set_metatable_needs_arguments
BuiltinTests.setmetatable_should_not_mutate_persisted_types
BuiltinTests.sort_with_bad_predicate
BuiltinTests.string_format_arg_count_mismatch
BuiltinTests.string_format_arg_types_inference
BuiltinTests.string_format_as_method
BuiltinTests.string_format_correctly_ordered_types
BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_use_correct_argument
BuiltinTests.string_format_use_correct_argument2
BuiltinTests.string_format_use_correct_argument3
BuiltinTests.strings_have_methods
BuiltinTests.table_freeze_is_generic
BuiltinTests.table_insert_correctly_infers_type_of_array_2_args_overload
BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload
@ -106,17 +103,13 @@ GenericsTests.correctly_instantiate_polymorphic_member_functions
GenericsTests.do_not_infer_generic_functions
GenericsTests.duplicate_generic_type_packs
GenericsTests.duplicate_generic_types
GenericsTests.factories_of_generics
GenericsTests.generic_argument_count_too_few
GenericsTests.generic_argument_count_too_many
GenericsTests.generic_factories
GenericsTests.generic_functions_in_types
GenericsTests.generic_functions_should_be_memory_safe
GenericsTests.generic_table_method
GenericsTests.generic_type_pack_parentheses
GenericsTests.generic_type_pack_unification1
GenericsTests.generic_type_pack_unification2
GenericsTests.generic_type_pack_unification3
GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments
GenericsTests.infer_generic_function_function_argument
GenericsTests.infer_generic_function_function_argument_overloaded
@ -172,61 +165,39 @@ ProvisionalTests.table_insert_with_a_singleton_argument
ProvisionalTests.typeguard_inference_incomplete
ProvisionalTests.weirditer_should_not_loop_forever
ProvisionalTests.while_body_are_also_refined
RefinementTest.and_constraint
RefinementTest.apply_refinements_on_astexprindexexpr_whose_subscript_expr_is_constant_string
RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number
RefinementTest.assert_non_binary_expressions_actually_resolve_constraints
RefinementTest.call_a_more_specific_function_using_typeguard
RefinementTest.call_an_incompatible_function_after_using_typeguard
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined2
RefinementTest.discriminate_from_isa_of_x
RefinementTest.discriminate_from_truthiness_of_x
RefinementTest.discriminate_on_properties_of_disjoint_tables_where_that_property_is_true_or_false
RefinementTest.discriminate_tag
RefinementTest.either_number_or_string
RefinementTest.eliminate_subclasses_of_instance
RefinementTest.else_with_no_explicit_expression_should_also_refine_the_tagged_union
RefinementTest.falsiness_of_TruthyPredicate_narrows_into_nil
RefinementTest.index_on_a_refined_property
RefinementTest.invert_is_truthy_constraint
RefinementTest.invert_is_truthy_constraint_ifelse_expression
RefinementTest.is_truthy_constraint
RefinementTest.is_truthy_constraint_ifelse_expression
RefinementTest.lvalue_is_not_nil
RefinementTest.merge_should_be_fully_agnostic_of_hashmap_ordering
RefinementTest.narrow_boolean_to_true_or_false
RefinementTest.narrow_property_of_a_bounded_variable
RefinementTest.narrow_this_large_union
RefinementTest.nonoptional_type_can_narrow_to_nil_if_sense_is_true
RefinementTest.not_a_and_not_b
RefinementTest.not_a_and_not_b2
RefinementTest.not_and_constraint
RefinementTest.not_t_or_some_prop_of_t
RefinementTest.or_predicate_with_truthy_predicates
RefinementTest.parenthesized_expressions_are_followed_through
RefinementTest.refine_a_property_not_to_be_nil_through_an_intersection_table
RefinementTest.refine_the_correct_types_opposite_of_when_a_is_not_number_or_string
RefinementTest.refine_unknowns
RefinementTest.term_is_equal_to_an_lvalue
RefinementTest.truthy_constraint_on_properties
RefinementTest.type_assertion_expr_carry_its_constraints
RefinementTest.type_comparison_ifelse_expression
RefinementTest.type_guard_can_filter_for_intersection_of_tables
RefinementTest.type_guard_can_filter_for_overloaded_function
RefinementTest.type_guard_narrowed_into_nothingness
RefinementTest.type_narrow_for_all_the_userdata
RefinementTest.type_narrow_to_vector
RefinementTest.typeguard_cast_free_table_to_vector
RefinementTest.typeguard_cast_instance_or_vector3_to_vector
RefinementTest.typeguard_doesnt_leak_to_elseif
RefinementTest.typeguard_in_assert_position
RefinementTest.typeguard_in_if_condition_position
RefinementTest.typeguard_narrows_for_functions
RefinementTest.typeguard_narrows_for_table
RefinementTest.typeguard_not_to_be_string
RefinementTest.what_nonsensical_condition
RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table
RefinementTest.x_is_not_instance_or_else_not_part
RuntimeLimits.typescript_port_of_Result_type
TableTests.a_free_shape_can_turn_into_a_scalar_directly
TableTests.a_free_shape_can_turn_into_a_scalar_if_it_is_compatible
TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible
TableTests.access_index_metamethod_that_returns_variadic
@ -262,7 +233,6 @@ TableTests.generic_table_instantiation_potential_regression
TableTests.getmetatable_returns_pointer_to_metatable
TableTests.give_up_after_one_metatable_index_look_up
TableTests.hide_table_error_properties
TableTests.indexer_fn
TableTests.indexer_on_sealed_table_must_unify_with_free_table
TableTests.indexing_from_a_table_should_prefer_properties_when_possible
TableTests.inequality_operators_imply_exactly_matching_types
@ -271,10 +241,10 @@ TableTests.infer_indexer_from_value_property_in_literal
TableTests.inferred_return_type_of_free_table
TableTests.inferring_crazy_table_should_also_be_quick
TableTests.instantiate_table_cloning_3
TableTests.invariant_table_properties_means_instantiating_tables_in_assignment_is_unsound
TableTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
TableTests.leaking_bad_metatable_errors
TableTests.less_exponential_blowup_please
TableTests.meta_add
TableTests.meta_add_both_ways
TableTests.meta_add_inferred
TableTests.metatable_mismatch_should_fail
@ -315,7 +285,6 @@ TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors
TableTests.tables_get_names_from_their_locals
TableTests.tc_member_function
TableTests.tc_member_function_2
TableTests.top_table_type
TableTests.type_mismatch_on_massive_table_is_cut_short
TableTests.unification_of_unions_in_a_self_referential_type
TableTests.unifying_tables_shouldnt_uaf2
@ -402,7 +371,6 @@ TypeInferFunctions.improved_function_arg_mismatch_error_nonstrict
TypeInferFunctions.improved_function_arg_mismatch_errors
TypeInferFunctions.infer_anonymous_function_arguments
TypeInferFunctions.infer_return_type_from_selected_overload
TypeInferFunctions.infer_return_value_type
TypeInferFunctions.infer_that_function_does_not_return_a_table
TypeInferFunctions.list_all_overloads_if_no_overload_takes_given_argument_count
TypeInferFunctions.list_only_alternative_overloads_that_match_argument_count
@ -417,20 +385,21 @@ TypeInferFunctions.too_few_arguments_variadic
TypeInferFunctions.too_few_arguments_variadic_generic
TypeInferFunctions.too_few_arguments_variadic_generic2
TypeInferFunctions.too_many_arguments
TypeInferFunctions.too_many_arguments_error_location
TypeInferFunctions.too_many_return_values
TypeInferFunctions.too_many_return_values_in_parentheses
TypeInferFunctions.too_many_return_values_no_function
TypeInferFunctions.vararg_function_is_quantified
TypeInferLoops.for_in_loop
TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_values
TypeInferLoops.for_in_loop_with_custom_iterator
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_in_with_generic_next
TypeInferLoops.for_in_with_just_one_iterator_is_ok
TypeInferLoops.loop_iter_metamethod_ok_with_inference
TypeInferLoops.loop_iter_no_indexer_nonstrict
TypeInferLoops.loop_iter_trailing_nil
TypeInferLoops.unreachable_code_after_infinite_loop
TypeInferLoops.varlist_declared_by_for_in_loop_should_be_free
TypeInferModules.bound_free_table_export_is_ok
TypeInferModules.custom_require_global
TypeInferModules.do_not_modify_imported_types
TypeInferModules.module_type_conflict
@ -443,8 +412,7 @@ TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_not_defined_with_colon
TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
TypeInferOOP.methods_are_topologically_sorted
TypeInferOperators.and_or_ternary
TypeInferOperators.CallAndOrOfFunctions
TypeInferOOP.object_constructor_can_refer_to_method_of_self
TypeInferOperators.cannot_compare_tables_that_do_not_have_the_same_metatable
TypeInferOperators.cannot_indirectly_compare_types_that_do_not_have_a_metatable
TypeInferOperators.cannot_indirectly_compare_types_that_do_not_offer_overloaded_ordering_operators
@ -530,6 +498,7 @@ UnionTypes.optional_assignment_errors
UnionTypes.optional_call_error
UnionTypes.optional_field_access_error
UnionTypes.optional_index_error
UnionTypes.optional_iteration
UnionTypes.optional_length_error
UnionTypes.optional_missing_key_error_details
UnionTypes.optional_union_follow

View file

@ -32,6 +32,9 @@ source = """// This file is part of the Luau programming language and is license
"""
function = ""
signature = ""
includeInsts = ["LOP_NEWCLOSURE", "LOP_NAMECALL", "LOP_FORGPREP", "LOP_GETVARARGS", "LOP_DUPCLOSURE", "LOP_PREPVARARGS", "LOP_COVERAGE", "LOP_BREAK", "LOP_GETGLOBAL", "LOP_SETGLOBAL", "LOP_GETTABLEKS", "LOP_SETTABLEKS"]
state = 0
@ -44,7 +47,6 @@ for line in input:
if match:
inst = match[1]
signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, StkId base, TValue* k)"
header += signature + ";\n"
function = signature + "\n"
function += "{\n"
function += " [[maybe_unused]] Closure* cl = clvalue(L->ci->func);\n"
@ -84,7 +86,10 @@ for line in input:
function = function[:-len(finalline)]
function += " return pc;\n}\n"
source += function + "\n"
if inst in includeInsts:
header += signature + ";\n"
source += function + "\n"
state = 0
# skip LUA_CUSTOM_EXECUTION code blocks

View file

@ -0,0 +1,173 @@
#!/usr/bin/python
# This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
# The purpose of this script is to analyze disassembly generated by objdump or
# dumpbin to print (or to compare) the stack usage of functions/methods.
# This is a quickly written script, so it is quite possible it may not handle
# all code properly.
#
# The script expects the user to create a text assembly dump to be passed to
# the script.
#
# objdump Example
# objdump --demangle --disassemble objfile.o > objfile.s
#
# dumpbin Example
# dumpbin /disasm objfile.obj > objfile.s
#
# If the script is passed a single file, then all stack size information that
# is found it printed. If two files are passed, then the script compares the
# stack usage of the two files (useful for A/B comparisons).
# Currently more than two input files are not supported. (But adding support shouldn't
# be very difficult.)
#
# Note: The script only handles x64 disassembly. Supporting x86 is likely
# trivial, but ARM support could be difficult.
# Thus far the script has been tested with MSVC on Win64 and clang on OSX.
import argparse
import re
blank_re = re.compile('\s*')
class LineReader:
def __init__(self, lines):
self.lines = list(reversed(lines))
def get_line(self):
return self.lines.pop(-1)
def peek_line(self):
return self.lines[-1]
def consume_blank_lines(self):
while blank_re.fullmatch(self.peek_line()):
self.get_line()
def is_empty(self):
return len(self.lines) == 0
def parse_objdump_assembly(in_file):
results = {}
text_section_re = re.compile('Disassembly of section __TEXT,__text:\s*')
symbol_re = re.compile('[^<]*<(.*)>:\s*')
stack_alloc = re.compile('.*subq\s*\$(\d*), %rsp\s*')
lr = LineReader(in_file.readlines())
def find_stack_alloc_size():
while True:
if lr.is_empty():
return None
if blank_re.fullmatch(lr.peek_line()):
return None
line = lr.get_line()
mo = stack_alloc.fullmatch(line)
if mo:
lr.consume_blank_lines()
return int(mo.group(1))
# Find beginning of disassembly
while not text_section_re.fullmatch(lr.get_line()):
pass
# Scan for symbols
while not lr.is_empty():
lr.consume_blank_lines()
if lr.is_empty():
break
line = lr.get_line()
mo = symbol_re.fullmatch(line)
# Found a symbol
if mo:
symbol = mo.group(1)
stack_size = find_stack_alloc_size()
if stack_size != None:
results[symbol] = stack_size
return results
def parse_dumpbin_assembly(in_file):
results = {}
file_type_re = re.compile('File Type: COFF OBJECT\s*')
symbol_re = re.compile('[^(]*\((.*)\):\s*')
summary_re = re.compile('\s*Summary\s*')
stack_alloc = re.compile('.*sub\s*rsp,([A-Z0-9]*)h\s*')
lr = LineReader(in_file.readlines())
def find_stack_alloc_size():
while True:
if lr.is_empty():
return None
if blank_re.fullmatch(lr.peek_line()):
return None
line = lr.get_line()
mo = stack_alloc.fullmatch(line)
if mo:
lr.consume_blank_lines()
return int(mo.group(1), 16) # return value in decimal
# Find beginning of disassembly
while not file_type_re.fullmatch(lr.get_line()):
pass
# Scan for symbols
while not lr.is_empty():
lr.consume_blank_lines()
if lr.is_empty():
break
line = lr.get_line()
if summary_re.fullmatch(line):
break
mo = symbol_re.fullmatch(line)
# Found a symbol
if mo:
symbol = mo.group(1)
stack_size = find_stack_alloc_size()
if stack_size != None:
results[symbol] = stack_size
return results
def main():
parser = argparse.ArgumentParser(description='Tool used for reporting or comparing the stack usage of functions/methods')
parser.add_argument('--format', choices=['dumpbin', 'objdump'], required=True, help='Specifies the program used to generate the input files')
parser.add_argument('--input', action='append', required=True, help='Input assembly file. This option may be specified multiple times.')
parser.add_argument('--md-output', action='store_true', help='Show table output in markdown format')
parser.add_argument('--only-diffs', action='store_true', help='Only show stack info when it differs between the input files')
args = parser.parse_args()
parsers = {'dumpbin': parse_dumpbin_assembly, 'objdump' : parse_objdump_assembly}
parse_func = parsers[args.format]
input_results = []
for input_name in args.input:
with open(input_name) as in_file:
results = parse_func(in_file)
input_results.append(results)
if len(input_results) == 1:
# Print out the results sorted by size
size_sorted = sorted([(size, symbol) for symbol, size in results.items()], reverse=True)
print(input_name)
for size, symbol in size_sorted:
print(f'{size:10}\t{symbol}')
print()
elif len(input_results) == 2:
common_symbols = set(input_results[0].keys()).intersection(set(input_results[1].keys()))
print(f'Found {len(common_symbols)} common symbols')
stack_sizes = sorted([(input_results[0][sym], input_results[1][sym], sym) for sym in common_symbols], reverse=True)
if args.md_output:
print('Before | After | Symbol')
print('-- | -- | --')
for size0, size1, symbol in stack_sizes:
if args.only_diffs and size0 == size1:
continue
if args.md_output:
print(f'{size0} | {size1} | {symbol}')
else:
print(f'{size0:10}\t{size1:10}\t{symbol}')
else:
print("TODO support more than 2 inputs")
if __name__ == '__main__':
main()