mirror of
https://github.com/luau-lang/luau.git
synced 2024-12-12 13:00:38 +00:00
Sync to upstream/release/578
This commit is contained in:
parent
eb7106016e
commit
b8e9d07b20
82 changed files with 1616 additions and 789 deletions
|
@ -81,6 +81,9 @@ struct IterableConstraint
|
|||
{
|
||||
TypePackId iterator;
|
||||
TypePackId variables;
|
||||
|
||||
const AstNode* nextAstFragment;
|
||||
DenseHashMap<const AstNode*, TypeId>* astOverloadResolvedTypes;
|
||||
};
|
||||
|
||||
// name(namedType) = name
|
||||
|
|
|
@ -201,7 +201,7 @@ struct ConstraintSolver
|
|||
* @param subType the sub-type to unify.
|
||||
* @param superType the super-type to unify.
|
||||
*/
|
||||
void unify(TypeId subType, TypeId superType, NotNull<Scope> scope);
|
||||
ErrorVec unify(TypeId subType, TypeId superType, NotNull<Scope> scope);
|
||||
|
||||
/**
|
||||
* Creates a new Unifier and performs a single unification operation. Commits
|
||||
|
@ -209,7 +209,7 @@ struct ConstraintSolver
|
|||
* @param subPack the sub-type pack to unify.
|
||||
* @param superPack the super-type pack to unify.
|
||||
*/
|
||||
void unify(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope);
|
||||
ErrorVec unify(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope);
|
||||
|
||||
/** Pushes a new solver constraint to the solver.
|
||||
* @param cv the body of the constraint.
|
||||
|
|
|
@ -343,13 +343,27 @@ struct UninhabitedTypePackFamily
|
|||
bool operator==(const UninhabitedTypePackFamily& rhs) const;
|
||||
};
|
||||
|
||||
struct WhereClauseNeeded
|
||||
{
|
||||
TypeId ty;
|
||||
|
||||
bool operator==(const WhereClauseNeeded& rhs) const;
|
||||
};
|
||||
|
||||
struct PackWhereClauseNeeded
|
||||
{
|
||||
TypePackId tp;
|
||||
|
||||
bool operator==(const PackWhereClauseNeeded& rhs) const;
|
||||
};
|
||||
|
||||
using TypeErrorData =
|
||||
Variant<TypeMismatch, UnknownSymbol, UnknownProperty, NotATable, CannotExtendTable, OnlyTablesCanHaveMethods, DuplicateTypeDefinition,
|
||||
CountMismatch, FunctionDoesNotTakeSelf, FunctionRequiresSelf, OccursCheckFailed, UnknownRequire, IncorrectGenericParameterCount, SyntaxError,
|
||||
CodeTooComplex, UnificationTooComplex, UnknownPropButFoundLikeProp, GenericError, InternalError, CannotCallNonFunction, ExtraInformation,
|
||||
DeprecatedApiUsed, ModuleHasCyclicDependency, IllegalRequire, FunctionExitsWithoutReturning, DuplicateGenericParameter,
|
||||
CannotInferBinaryOperation, MissingProperties, SwappedGenericTypeParameter, OptionalValueAccess, MissingUnionProperty, TypesAreUnrelated,
|
||||
NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe, UninhabitedTypeFamily, UninhabitedTypePackFamily>;
|
||||
NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe, UninhabitedTypeFamily, UninhabitedTypePackFamily, WhereClauseNeeded, PackWhereClauseNeeded>;
|
||||
|
||||
struct TypeErrorSummary
|
||||
{
|
||||
|
|
|
@ -182,8 +182,6 @@ struct Frontend
|
|||
std::optional<CheckResult> getCheckResult(const ModuleName& name, bool accumulateNested, bool forAutocomplete = false);
|
||||
|
||||
private:
|
||||
CheckResult check_DEPRECATED(const ModuleName& name, std::optional<FrontendOptions> optionOverride = {});
|
||||
|
||||
struct TypeCheckLimits
|
||||
{
|
||||
std::optional<double> finishTime;
|
||||
|
|
|
@ -285,14 +285,19 @@ class Normalizer
|
|||
std::unordered_map<const TypeIds*, TypeId> cachedIntersections;
|
||||
std::unordered_map<const TypeIds*, TypeId> cachedUnions;
|
||||
std::unordered_map<const TypeIds*, std::unique_ptr<TypeIds>> cachedTypeIds;
|
||||
|
||||
DenseHashMap<TypeId, bool> cachedIsInhabited{nullptr};
|
||||
DenseHashMap<std::pair<TypeId, TypeId>, bool, TypeIdPairHash> cachedIsInhabitedIntersection{{nullptr, nullptr}};
|
||||
|
||||
bool withinResourceLimits();
|
||||
|
||||
public:
|
||||
TypeArena* arena;
|
||||
NotNull<BuiltinTypes> builtinTypes;
|
||||
NotNull<UnifierSharedState> sharedState;
|
||||
bool cacheInhabitance = false;
|
||||
|
||||
Normalizer(TypeArena* arena, NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> sharedState);
|
||||
Normalizer(TypeArena* arena, NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> sharedState, bool cacheInhabitance = false);
|
||||
Normalizer(const Normalizer&) = delete;
|
||||
Normalizer(Normalizer&&) = delete;
|
||||
Normalizer() = delete;
|
||||
|
@ -355,8 +360,10 @@ public:
|
|||
bool normalizeIntersections(const std::vector<TypeId>& intersections, NormalizedType& outType);
|
||||
|
||||
// Check for inhabitance
|
||||
bool isInhabited(TypeId ty, std::unordered_set<TypeId> seen = {});
|
||||
bool isInhabited(TypeId ty);
|
||||
bool isInhabited(TypeId ty, std::unordered_set<TypeId> seen);
|
||||
bool isInhabited(const NormalizedType* norm, std::unordered_set<TypeId> seen = {});
|
||||
|
||||
// Check for intersections being inhabited
|
||||
bool isIntersectionInhabited(TypeId left, TypeId right);
|
||||
|
||||
|
|
|
@ -113,6 +113,13 @@ struct Tarjan
|
|||
void visitChild(TypeId ty);
|
||||
void visitChild(TypePackId ty);
|
||||
|
||||
template<typename Ty>
|
||||
void visitChild(std::optional<Ty> ty)
|
||||
{
|
||||
if (ty)
|
||||
visitChild(*ty);
|
||||
}
|
||||
|
||||
// Visit the root vertex.
|
||||
TarjanResult visitRoot(TypeId ty);
|
||||
TarjanResult visitRoot(TypePackId ty);
|
||||
|
@ -127,10 +134,22 @@ struct Tarjan
|
|||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool ignoreChildren(TypePackId ty)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Some subclasses might ignore children visit, but not other actions like replacing the children
|
||||
virtual bool ignoreChildrenVisit(TypeId ty)
|
||||
{
|
||||
return ignoreChildren(ty);
|
||||
}
|
||||
|
||||
virtual bool ignoreChildrenVisit(TypePackId ty)
|
||||
{
|
||||
return ignoreChildren(ty);
|
||||
}
|
||||
};
|
||||
|
||||
// We use Tarjan to calculate dirty bits. We set `dirty[i]` true
|
||||
|
@ -186,8 +205,10 @@ public:
|
|||
|
||||
TypeId replace(TypeId ty);
|
||||
TypePackId replace(TypePackId tp);
|
||||
|
||||
void replaceChildren(TypeId ty);
|
||||
void replaceChildren(TypePackId tp);
|
||||
|
||||
TypeId clone(TypeId ty);
|
||||
TypePackId clone(TypePackId tp);
|
||||
|
||||
|
@ -211,6 +232,16 @@ public:
|
|||
{
|
||||
return arena->addTypePack(TypePackVar{tp});
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename Ty>
|
||||
std::optional<Ty> replace(std::optional<Ty> ty)
|
||||
{
|
||||
if (ty)
|
||||
return replace(*ty);
|
||||
else
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Luau
|
||||
|
|
|
@ -388,7 +388,13 @@ struct Property
|
|||
static Property writeonly(TypeId ty);
|
||||
static Property rw(TypeId ty); // Shared read-write type.
|
||||
static Property rw(TypeId read, TypeId write); // Separate read-write type.
|
||||
static std::optional<Property> create(std::optional<TypeId> read, std::optional<TypeId> write);
|
||||
|
||||
// Invariant: at least one of the two optionals are not nullopt!
|
||||
// If the read type is not nullopt, but the write type is, then the property is readonly.
|
||||
// If the read type is nullopt, but the write type is not, then the property is writeonly.
|
||||
// If the read and write types are not nullopt, then the property is read and write.
|
||||
// Otherwise, an assertion where read and write types are both nullopt will be tripped.
|
||||
static Property create(std::optional<TypeId> read, std::optional<TypeId> write);
|
||||
|
||||
bool deprecated = false;
|
||||
std::string deprecatedSuggestion;
|
||||
|
@ -414,6 +420,8 @@ struct Property
|
|||
std::optional<TypeId> readType() const;
|
||||
std::optional<TypeId> writeType() const;
|
||||
|
||||
bool isShared() const;
|
||||
|
||||
private:
|
||||
std::optional<TypeId> readTy;
|
||||
std::optional<TypeId> writeTy;
|
||||
|
@ -614,30 +622,26 @@ struct IntersectionType
|
|||
struct LazyType
|
||||
{
|
||||
LazyType() = default;
|
||||
LazyType(std::function<TypeId()> thunk_DEPRECATED, std::function<void(LazyType&)> unwrap)
|
||||
: thunk_DEPRECATED(thunk_DEPRECATED)
|
||||
, unwrap(unwrap)
|
||||
LazyType(std::function<void(LazyType&)> unwrap)
|
||||
: unwrap(unwrap)
|
||||
{
|
||||
}
|
||||
|
||||
// std::atomic is sad and requires a manual copy
|
||||
LazyType(const LazyType& rhs)
|
||||
: thunk_DEPRECATED(rhs.thunk_DEPRECATED)
|
||||
, unwrap(rhs.unwrap)
|
||||
: unwrap(rhs.unwrap)
|
||||
, unwrapped(rhs.unwrapped.load())
|
||||
{
|
||||
}
|
||||
|
||||
LazyType(LazyType&& rhs) noexcept
|
||||
: thunk_DEPRECATED(std::move(rhs.thunk_DEPRECATED))
|
||||
, unwrap(std::move(rhs.unwrap))
|
||||
: unwrap(std::move(rhs.unwrap))
|
||||
, unwrapped(rhs.unwrapped.load())
|
||||
{
|
||||
}
|
||||
|
||||
LazyType& operator=(const LazyType& rhs)
|
||||
{
|
||||
thunk_DEPRECATED = rhs.thunk_DEPRECATED;
|
||||
unwrap = rhs.unwrap;
|
||||
unwrapped = rhs.unwrapped.load();
|
||||
|
||||
|
@ -646,15 +650,12 @@ struct LazyType
|
|||
|
||||
LazyType& operator=(LazyType&& rhs) noexcept
|
||||
{
|
||||
thunk_DEPRECATED = std::move(rhs.thunk_DEPRECATED);
|
||||
unwrap = std::move(rhs.unwrap);
|
||||
unwrapped = rhs.unwrapped.load();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::function<TypeId()> thunk_DEPRECATED;
|
||||
|
||||
std::function<void(LazyType&)> unwrap;
|
||||
std::atomic<TypeId> unwrapped = nullptr;
|
||||
};
|
||||
|
|
|
@ -55,4 +55,13 @@ std::vector<TypeId> reduceUnion(const std::vector<TypeId>& types);
|
|||
*/
|
||||
TypeId stripNil(NotNull<BuiltinTypes> builtinTypes, TypeArena& arena, TypeId ty);
|
||||
|
||||
template<typename T, typename Ty>
|
||||
const T* get(std::optional<Ty> ty)
|
||||
{
|
||||
if (ty)
|
||||
return get<T>(*ty);
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Luau
|
||||
|
|
|
@ -54,7 +54,6 @@ struct Unifier
|
|||
TypeArena* const types;
|
||||
NotNull<BuiltinTypes> builtinTypes;
|
||||
NotNull<Normalizer> normalizer;
|
||||
Mode mode;
|
||||
|
||||
NotNull<Scope> scope; // const Scope maybe
|
||||
TxnLog log;
|
||||
|
@ -78,7 +77,7 @@ struct Unifier
|
|||
std::vector<TypePackId> blockedTypePacks;
|
||||
|
||||
Unifier(
|
||||
NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
|
||||
NotNull<Normalizer> normalizer, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
|
||||
|
||||
// Configure the Unifier to test for scope subsumption via embedded Scope
|
||||
// pointers rather than TypeLevels.
|
||||
|
@ -154,7 +153,6 @@ public:
|
|||
LUAU_NOINLINE void reportError(Location location, TypeErrorData data);
|
||||
|
||||
private:
|
||||
bool isNonstrictMode() const;
|
||||
TypeMismatch::Context mismatchContext();
|
||||
|
||||
void checkChildUnifierTypeMismatch(const ErrorVec& innerErrors, TypeId wantedType, TypeId givenType);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
LUAU_FASTINT(LuauVisitRecursionLimit)
|
||||
LUAU_FASTFLAG(LuauBoundLazyTypes2)
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -250,7 +251,18 @@ struct GenericTypeVisitor
|
|||
else
|
||||
{
|
||||
for (auto& [_name, prop] : ttv->props)
|
||||
traverse(prop.type());
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
if (auto ty = prop.readType())
|
||||
traverse(*ty);
|
||||
|
||||
if (auto ty = prop.writeType())
|
||||
traverse(*ty);
|
||||
}
|
||||
else
|
||||
traverse(prop.type());
|
||||
}
|
||||
|
||||
if (ttv->indexer)
|
||||
{
|
||||
|
@ -273,7 +285,18 @@ struct GenericTypeVisitor
|
|||
if (visit(ty, *ctv))
|
||||
{
|
||||
for (const auto& [name, prop] : ctv->props)
|
||||
traverse(prop.type());
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
if (auto ty = prop.readType())
|
||||
traverse(*ty);
|
||||
|
||||
if (auto ty = prop.writeType())
|
||||
traverse(*ty);
|
||||
}
|
||||
else
|
||||
traverse(prop.type());
|
||||
}
|
||||
|
||||
if (ctv->parent)
|
||||
traverse(*ctv->parent);
|
||||
|
@ -311,11 +334,9 @@ struct GenericTypeVisitor
|
|||
}
|
||||
else if (auto ltv = get<LazyType>(ty))
|
||||
{
|
||||
if (FFlag::LuauBoundLazyTypes2)
|
||||
{
|
||||
if (TypeId unwrapped = ltv->unwrapped)
|
||||
traverse(unwrapped);
|
||||
}
|
||||
if (TypeId unwrapped = ltv->unwrapped)
|
||||
traverse(unwrapped);
|
||||
|
||||
// Visiting into LazyType that hasn't been unwrapped may necessarily cause infinite expansion, so we don't do that on purpose.
|
||||
// Asserting also makes no sense, because the type _will_ happen here, most likely as a property of some ClassType
|
||||
// that doesn't need to be expanded.
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include <algorithm>
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
|
@ -501,12 +503,28 @@ std::optional<DocumentationSymbol> getDocumentationSymbolAtPosition(const Source
|
|||
if (const TableType* ttv = get<TableType>(parentTy))
|
||||
{
|
||||
if (auto propIt = ttv->props.find(indexName->index.value); propIt != ttv->props.end())
|
||||
return checkOverloadedDocumentationSymbol(module, propIt->second.type(), parentExpr, propIt->second.documentationSymbol);
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
if (auto ty = propIt->second.readType())
|
||||
return checkOverloadedDocumentationSymbol(module, *ty, parentExpr, propIt->second.documentationSymbol);
|
||||
}
|
||||
else
|
||||
return checkOverloadedDocumentationSymbol(module, propIt->second.type(), parentExpr, propIt->second.documentationSymbol);
|
||||
}
|
||||
}
|
||||
else if (const ClassType* ctv = get<ClassType>(parentTy))
|
||||
{
|
||||
if (auto propIt = ctv->props.find(indexName->index.value); propIt != ctv->props.end())
|
||||
return checkOverloadedDocumentationSymbol(module, propIt->second.type(), parentExpr, propIt->second.documentationSymbol);
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
if (auto ty = propIt->second.readType())
|
||||
return checkOverloadedDocumentationSymbol(module, *ty, parentExpr, propIt->second.documentationSymbol);
|
||||
}
|
||||
else
|
||||
return checkOverloadedDocumentationSymbol(module, propIt->second.type(), parentExpr, propIt->second.documentationSymbol);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
static const std::unordered_set<std::string> kStatementStartingKeywords = {
|
||||
"while", "if", "local", "repeat", "function", "do", "for", "return", "break", "continue", "type", "export"};
|
||||
|
||||
|
@ -138,7 +140,7 @@ static bool checkTypeMatch(TypeId subTy, TypeId superTy, NotNull<Scope> scope, T
|
|||
InternalErrorReporter iceReporter;
|
||||
UnifierSharedState unifierState(&iceReporter);
|
||||
Normalizer normalizer{typeArena, builtinTypes, NotNull{&unifierState}};
|
||||
Unifier unifier(NotNull<Normalizer>{&normalizer}, Mode::Strict, scope, Location(), Variance::Covariant);
|
||||
Unifier unifier(NotNull<Normalizer>{&normalizer}, scope, Location(), Variance::Covariant);
|
||||
|
||||
// Cost of normalization can be too high for autocomplete response time requirements
|
||||
unifier.normalize = false;
|
||||
|
@ -259,10 +261,22 @@ static void autocompleteProps(const Module& module, TypeArena* typeArena, NotNul
|
|||
// already populated, it takes precedence over the property we found just now.
|
||||
if (result.count(name) == 0 && name != kParseNameError)
|
||||
{
|
||||
Luau::TypeId type = Luau::follow(prop.type());
|
||||
Luau::TypeId type;
|
||||
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
if (auto ty = prop.readType())
|
||||
type = follow(*ty);
|
||||
else
|
||||
continue;
|
||||
}
|
||||
else
|
||||
type = follow(prop.type());
|
||||
|
||||
TypeCorrectKind typeCorrect = indexType == PropIndexType::Key
|
||||
? TypeCorrectKind::Correct
|
||||
: checkTypeCorrectKind(module, typeArena, builtinTypes, nodes.back(), {{}, {}}, type);
|
||||
|
||||
ParenthesesRecommendation parens =
|
||||
indexType == PropIndexType::Key ? ParenthesesRecommendation::None : getParenRecommendation(type, nodes, typeCorrect);
|
||||
|
||||
|
|
|
@ -755,8 +755,8 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatForIn* f
|
|||
// It is always ok to provide too few variables, so we give this pack a free tail.
|
||||
TypePackId variablePack = arena->addTypePack(std::move(variableTypes), arena->addTypePack(FreeTypePack{loopScope.get()}));
|
||||
|
||||
addConstraint(loopScope, getLocation(forIn->values), IterableConstraint{iterator, variablePack});
|
||||
|
||||
addConstraint(
|
||||
loopScope, getLocation(forIn->values), IterableConstraint{iterator, variablePack, forIn->values.data[0], &module->astOverloadResolvedTypes});
|
||||
visit(loopScope, forIn->body);
|
||||
|
||||
return ControlFlow::None;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "Luau/VisitType.h"
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
|
||||
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -252,6 +251,11 @@ struct InstantiationQueuer : TypeOnceVisitor
|
|||
solver->pushConstraint(scope, location, ReduceConstraint{ty});
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const ClassType& ctv) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, std::vector<NotNull<Constraint>> constraints,
|
||||
|
@ -749,7 +753,10 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
mmResult = builtinTypes->booleanType;
|
||||
break;
|
||||
default:
|
||||
mmResult = first(ftv->retTypes).value_or(errorRecoveryType());
|
||||
if (get<NeverType>(leftType) || get<NeverType>(rightType))
|
||||
mmResult = builtinTypes->neverType;
|
||||
else
|
||||
mmResult = first(ftv->retTypes).value_or(errorRecoveryType());
|
||||
}
|
||||
|
||||
asMutable(resultType)->ty.emplace<BoundType>(mmResult);
|
||||
|
@ -785,6 +792,13 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
else if (get<NeverType>(leftType) || get<NeverType>(rightType))
|
||||
{
|
||||
unify(leftType, rightType, constraint->scope);
|
||||
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->neverType);
|
||||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -800,6 +814,13 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
else if (get<NeverType>(leftType) || get<NeverType>(rightType))
|
||||
{
|
||||
unify(leftType, rightType, constraint->scope);
|
||||
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->neverType);
|
||||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
|
||||
break;
|
||||
// Inexact comparisons require that the types be both numbers or both
|
||||
|
@ -808,7 +829,8 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
case AstExprBinary::Op::CompareGt:
|
||||
case AstExprBinary::Op::CompareLe:
|
||||
case AstExprBinary::Op::CompareLt:
|
||||
if ((isNumber(leftType) && isNumber(rightType)) || (isString(leftType) && isString(rightType)))
|
||||
if ((isNumber(leftType) && isNumber(rightType)) || (isString(leftType) && isString(rightType)) || get<NeverType>(leftType) ||
|
||||
get<NeverType>(rightType))
|
||||
{
|
||||
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->booleanType);
|
||||
unblock(resultType);
|
||||
|
@ -1291,7 +1313,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
|
|||
return true;
|
||||
}
|
||||
|
||||
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
|
||||
Unifier u{normalizer, constraint->scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
|
||||
u.tryUnify(*instantiated, inferredTy, /* isFunctionCall */ true);
|
||||
|
@ -1344,7 +1366,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
|
|||
}
|
||||
|
||||
// We found no matching overloads.
|
||||
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
|
||||
Unifier u{normalizer, constraint->scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
|
||||
u.tryUnify(inferredTy, builtinTypes->anyType);
|
||||
|
@ -1746,6 +1768,11 @@ struct FindRefineConstraintBlockers : TypeOnceVisitor
|
|||
found.insert(ty);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1932,6 +1959,15 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
|
|||
unify(*errorified, ty, constraint->scope);
|
||||
};
|
||||
|
||||
auto neverify = [&](auto ty) {
|
||||
Anyification anyify{arena, constraint->scope, builtinTypes, &iceReporter, builtinTypes->neverType, builtinTypes->neverTypePack};
|
||||
std::optional neverified = anyify.substitute(ty);
|
||||
if (!neverified)
|
||||
reportError(CodeTooComplex{}, constraint->location);
|
||||
else
|
||||
unify(*neverified, ty, constraint->scope);
|
||||
};
|
||||
|
||||
if (get<AnyType>(iteratorTy))
|
||||
{
|
||||
anyify(c.variables);
|
||||
|
@ -1944,6 +1980,12 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
|
|||
return true;
|
||||
}
|
||||
|
||||
if (get<NeverType>(iteratorTy))
|
||||
{
|
||||
neverify(c.variables);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Irksome: I don't think we have any way to guarantee that this table
|
||||
// type never has a metatable.
|
||||
|
||||
|
@ -2072,7 +2114,11 @@ bool ConstraintSolver::tryDispatchIterableFunction(
|
|||
const TypePackId nextRetPack = arena->addTypePack(TypePack{{retIndex}, valueTailTy});
|
||||
|
||||
const TypeId expectedNextTy = arena->addType(FunctionType{TypeLevel{}, constraint->scope, nextArgPack, nextRetPack});
|
||||
unify(nextTy, expectedNextTy, constraint->scope);
|
||||
ErrorVec errors = unify(nextTy, expectedNextTy, constraint->scope);
|
||||
|
||||
// if there are no errors from unifying the two, we can pass forward the expected type as our selected resolution.
|
||||
if (errors.empty())
|
||||
(*c.astOverloadResolvedTypes)[c.nextAstFragment] = expectedNextTy;
|
||||
|
||||
auto it = begin(nextRetPack);
|
||||
std::vector<TypeId> modifiedNextRetHead;
|
||||
|
@ -2122,7 +2168,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
|||
else if (auto ttv = getMutable<TableType>(subjectType))
|
||||
{
|
||||
if (auto prop = ttv->props.find(propName); prop != ttv->props.end())
|
||||
return {{}, prop->second.type()};
|
||||
return {{}, FFlag::DebugLuauReadWriteProperties ? prop->second.readType() : prop->second.type()};
|
||||
else if (ttv->indexer && maybeString(ttv->indexer->indexType))
|
||||
return {{}, ttv->indexer->indexResultType};
|
||||
else if (ttv->state == TableState::Free)
|
||||
|
@ -2275,7 +2321,7 @@ static TypePackId getErrorType(NotNull<BuiltinTypes> builtinTypes, TypePackId)
|
|||
template <typename TID>
|
||||
bool ConstraintSolver::tryUnify(NotNull<const Constraint> constraint, TID subTy, TID superTy)
|
||||
{
|
||||
Unifier u{normalizer, Mode::Strict, constraint->scope, constraint->location, Covariant};
|
||||
Unifier u{normalizer, constraint->scope, constraint->location, Covariant};
|
||||
u.enableScopeTests();
|
||||
|
||||
u.tryUnify(subTy, superTy);
|
||||
|
@ -2379,12 +2425,17 @@ struct Blocker : TypeOnceVisitor
|
|||
{
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const PendingExpansionType&)
|
||||
bool visit(TypeId ty, const PendingExpansionType&) override
|
||||
{
|
||||
blocked = true;
|
||||
solver->block(ty, constraint);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
bool ConstraintSolver::blockOnPendingTypes(TypeId target, NotNull<const Constraint> constraint)
|
||||
|
@ -2492,9 +2543,9 @@ bool ConstraintSolver::isBlocked(NotNull<const Constraint> constraint)
|
|||
return blockedIt != blockedConstraints.end() && blockedIt->second > 0;
|
||||
}
|
||||
|
||||
void ConstraintSolver::unify(TypeId subType, TypeId superType, NotNull<Scope> scope)
|
||||
ErrorVec ConstraintSolver::unify(TypeId subType, TypeId superType, NotNull<Scope> scope)
|
||||
{
|
||||
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{normalizer, scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
|
||||
u.tryUnify(subType, superType);
|
||||
|
@ -2512,12 +2563,14 @@ void ConstraintSolver::unify(TypeId subType, TypeId superType, NotNull<Scope> sc
|
|||
|
||||
unblock(changedTypes);
|
||||
unblock(changedPacks);
|
||||
|
||||
return std::move(u.errors);
|
||||
}
|
||||
|
||||
void ConstraintSolver::unify(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope)
|
||||
ErrorVec ConstraintSolver::unify(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope)
|
||||
{
|
||||
UnifierSharedState sharedState{&iceReporter};
|
||||
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{normalizer, scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
|
||||
u.tryUnify(subPack, superPack);
|
||||
|
@ -2528,6 +2581,8 @@ void ConstraintSolver::unify(TypePackId subPack, TypePackId superPack, NotNull<S
|
|||
|
||||
unblock(changedTypes);
|
||||
unblock(changedPacks);
|
||||
|
||||
return std::move(u.errors);
|
||||
}
|
||||
|
||||
NotNull<Constraint> ConstraintSolver::pushConstraint(NotNull<Scope> scope, const Location& location, ConstraintV cv)
|
||||
|
@ -2550,7 +2605,7 @@ TypeId ConstraintSolver::resolveModule(const ModuleInfo& info, const Location& l
|
|||
|
||||
for (const auto& [location, path] : requireCycles)
|
||||
{
|
||||
if (!path.empty() && path.front() == (FFlag::LuauRequirePathTrueModuleName ? info.name : moduleResolver->getHumanReadableModuleName(info.name)))
|
||||
if (!path.empty() && path.front() == info.name)
|
||||
return builtinTypes->anyType;
|
||||
}
|
||||
|
||||
|
@ -2612,7 +2667,7 @@ TypeId ConstraintSolver::unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope,
|
|||
|
||||
if (unifyFreeTypes && (get<FreeType>(a) || get<FreeType>(b)))
|
||||
{
|
||||
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{normalizer, scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
u.tryUnify(b, a);
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include <type_traits>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauTypeMismatchInvarianceInError, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauRequirePathTrueModuleName, false)
|
||||
|
||||
static std::string wrongNumberOfArgsString(
|
||||
size_t expectedCount, std::optional<size_t> maximumCount, size_t actualCount, const char* argPrefix = nullptr, bool isVariadic = false)
|
||||
|
@ -350,7 +349,7 @@ struct ErrorConverter
|
|||
else
|
||||
s += " -> ";
|
||||
|
||||
if (FFlag::LuauRequirePathTrueModuleName && fileResolver != nullptr)
|
||||
if (fileResolver != nullptr)
|
||||
s += fileResolver->getHumanReadableModuleName(name);
|
||||
else
|
||||
s += name;
|
||||
|
@ -494,6 +493,16 @@ struct ErrorConverter
|
|||
{
|
||||
return "Type pack family instance " + Luau::toString(e.tp) + " is uninhabited";
|
||||
}
|
||||
|
||||
std::string operator()(const WhereClauseNeeded& e) const
|
||||
{
|
||||
return "Type family instance " + Luau::toString(e.ty) + " depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time";
|
||||
}
|
||||
|
||||
std::string operator()(const PackWhereClauseNeeded& e) const
|
||||
{
|
||||
return "Type pack family instance " + Luau::toString(e.tp) + " depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time";
|
||||
}
|
||||
};
|
||||
|
||||
struct InvalidNameChecker
|
||||
|
@ -806,6 +815,16 @@ bool UninhabitedTypePackFamily::operator==(const UninhabitedTypePackFamily& rhs)
|
|||
return tp == rhs.tp;
|
||||
}
|
||||
|
||||
bool WhereClauseNeeded::operator==(const WhereClauseNeeded& rhs) const
|
||||
{
|
||||
return ty == rhs.ty;
|
||||
}
|
||||
|
||||
bool PackWhereClauseNeeded::operator==(const PackWhereClauseNeeded& rhs) const
|
||||
{
|
||||
return tp == rhs.tp;
|
||||
}
|
||||
|
||||
std::string toString(const TypeError& error)
|
||||
{
|
||||
return toString(error, TypeErrorToStringOptions{});
|
||||
|
@ -968,6 +987,10 @@ void copyError(T& e, TypeArena& destArena, CloneState cloneState)
|
|||
e.ty = clone(e.ty);
|
||||
else if constexpr (std::is_same_v<T, UninhabitedTypePackFamily>)
|
||||
e.tp = clone(e.tp);
|
||||
else if constexpr (std::is_same_v<T, WhereClauseNeeded>)
|
||||
e.ty = clone(e.ty);
|
||||
else if constexpr (std::is_same_v<T, PackWhereClauseNeeded>)
|
||||
e.tp = clone(e.tp);
|
||||
else
|
||||
static_assert(always_false_v<T>, "Non-exhaustive type switch");
|
||||
}
|
||||
|
|
|
@ -34,9 +34,7 @@ LUAU_FASTFLAGVARIABLE(LuauKnowsTheDataModel3, false)
|
|||
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
|
||||
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauReadWriteProperties, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauSplitFrontendProcessing, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauTypeCheckerUseCorrectScope, false)
|
||||
|
||||
namespace Luau
|
||||
|
@ -349,9 +347,9 @@ std::vector<RequireCycle> getRequireCycles(const FileResolver* resolver,
|
|||
if (top == start)
|
||||
{
|
||||
for (const SourceNode* node : path)
|
||||
cycle.push_back(FFlag::LuauRequirePathTrueModuleName ? node->name : node->humanReadableName);
|
||||
cycle.push_back(node->name);
|
||||
|
||||
cycle.push_back(FFlag::LuauRequirePathTrueModuleName ? top->name : top->humanReadableName);
|
||||
cycle.push_back(top->name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -419,9 +417,6 @@ Frontend::Frontend(FileResolver* fileResolver, ConfigResolver* configResolver, c
|
|||
|
||||
CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOptions> optionOverride)
|
||||
{
|
||||
if (!FFlag::LuauSplitFrontendProcessing)
|
||||
return check_DEPRECATED(name, optionOverride);
|
||||
|
||||
LUAU_TIMETRACE_SCOPE("Frontend::check", "Frontend");
|
||||
LUAU_TIMETRACE_ARGUMENT("name", name.c_str());
|
||||
|
||||
|
@ -463,200 +458,6 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
|
|||
return checkResult;
|
||||
}
|
||||
|
||||
CheckResult Frontend::check_DEPRECATED(const ModuleName& name, std::optional<FrontendOptions> optionOverride)
|
||||
{
|
||||
LUAU_TIMETRACE_SCOPE("Frontend::check", "Frontend");
|
||||
LUAU_TIMETRACE_ARGUMENT("name", name.c_str());
|
||||
|
||||
FrontendOptions frontendOptions = optionOverride.value_or(options);
|
||||
CheckResult checkResult;
|
||||
|
||||
FrontendModuleResolver& resolver = frontendOptions.forAutocomplete ? moduleResolverForAutocomplete : moduleResolver;
|
||||
|
||||
auto it = sourceNodes.find(name);
|
||||
if (it != sourceNodes.end() && !it->second->hasDirtyModule(frontendOptions.forAutocomplete))
|
||||
{
|
||||
// No recheck required.
|
||||
ModulePtr module = resolver.getModule(name);
|
||||
|
||||
if (!module)
|
||||
throw InternalCompilerError("Frontend::modules does not have data for " + name, name);
|
||||
|
||||
checkResult.errors = accumulateErrors(sourceNodes, resolver, name);
|
||||
|
||||
// Get lint result only for top checked module
|
||||
checkResult.lintResult = module->lintResult;
|
||||
|
||||
return checkResult;
|
||||
}
|
||||
|
||||
std::vector<ModuleName> buildQueue;
|
||||
bool cycleDetected = parseGraph(buildQueue, name, frontendOptions.forAutocomplete);
|
||||
|
||||
for (const ModuleName& moduleName : buildQueue)
|
||||
{
|
||||
LUAU_ASSERT(sourceNodes.count(moduleName));
|
||||
SourceNode& sourceNode = *sourceNodes[moduleName];
|
||||
|
||||
if (!sourceNode.hasDirtyModule(frontendOptions.forAutocomplete))
|
||||
continue;
|
||||
|
||||
LUAU_ASSERT(sourceModules.count(moduleName));
|
||||
SourceModule& sourceModule = *sourceModules[moduleName];
|
||||
|
||||
const Config& config = configResolver->getConfig(moduleName);
|
||||
|
||||
Mode mode = sourceModule.mode.value_or(config.mode);
|
||||
|
||||
ScopePtr environmentScope = getModuleEnvironment(sourceModule, config, frontendOptions.forAutocomplete);
|
||||
|
||||
double timestamp = getTimestamp();
|
||||
|
||||
std::vector<RequireCycle> requireCycles;
|
||||
|
||||
// in NoCheck mode we only need to compute the value of .cyclic for typeck
|
||||
// in the future we could replace toposort with an algorithm that can flag cyclic nodes by itself
|
||||
// however, for now getRequireCycles isn't expensive in practice on the cases we care about, and long term
|
||||
// all correct programs must be acyclic so this code triggers rarely
|
||||
if (cycleDetected)
|
||||
requireCycles = getRequireCycles(fileResolver, sourceNodes, &sourceNode, mode == Mode::NoCheck);
|
||||
|
||||
// This is used by the type checker to replace the resulting type of cyclic modules with any
|
||||
sourceModule.cyclic = !requireCycles.empty();
|
||||
|
||||
if (frontendOptions.forAutocomplete)
|
||||
{
|
||||
double autocompleteTimeLimit = FInt::LuauAutocompleteCheckTimeoutMs / 1000.0;
|
||||
|
||||
// The autocomplete typecheck is always in strict mode with DM awareness
|
||||
// to provide better type information for IDE features
|
||||
TypeCheckLimits typeCheckLimits;
|
||||
|
||||
if (autocompleteTimeLimit != 0.0)
|
||||
typeCheckLimits.finishTime = TimeTrace::getClock() + autocompleteTimeLimit;
|
||||
else
|
||||
typeCheckLimits.finishTime = std::nullopt;
|
||||
|
||||
// TODO: This is a dirty ad hoc solution for autocomplete timeouts
|
||||
// We are trying to dynamically adjust our existing limits to lower total typechecking time under the limit
|
||||
// so that we'll have type information for the whole file at lower quality instead of a full abort in the middle
|
||||
if (FInt::LuauTarjanChildLimit > 0)
|
||||
typeCheckLimits.instantiationChildLimit = std::max(1, int(FInt::LuauTarjanChildLimit * sourceNode.autocompleteLimitsMult));
|
||||
else
|
||||
typeCheckLimits.instantiationChildLimit = std::nullopt;
|
||||
|
||||
if (FInt::LuauTypeInferIterationLimit > 0)
|
||||
typeCheckLimits.unifierIterationLimit = std::max(1, int(FInt::LuauTypeInferIterationLimit * sourceNode.autocompleteLimitsMult));
|
||||
else
|
||||
typeCheckLimits.unifierIterationLimit = std::nullopt;
|
||||
|
||||
ModulePtr moduleForAutocomplete = check(sourceModule, Mode::Strict, requireCycles, environmentScope, /*forAutocomplete*/ true,
|
||||
/*recordJsonLog*/ false, typeCheckLimits);
|
||||
|
||||
resolver.setModule(moduleName, moduleForAutocomplete);
|
||||
|
||||
double duration = getTimestamp() - timestamp;
|
||||
|
||||
if (moduleForAutocomplete->timeout)
|
||||
{
|
||||
checkResult.timeoutHits.push_back(moduleName);
|
||||
|
||||
sourceNode.autocompleteLimitsMult = sourceNode.autocompleteLimitsMult / 2.0;
|
||||
}
|
||||
else if (duration < autocompleteTimeLimit / 2.0)
|
||||
{
|
||||
sourceNode.autocompleteLimitsMult = std::min(sourceNode.autocompleteLimitsMult * 2.0, 1.0);
|
||||
}
|
||||
|
||||
stats.timeCheck += duration;
|
||||
stats.filesStrict += 1;
|
||||
|
||||
sourceNode.dirtyModuleForAutocomplete = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
const bool recordJsonLog = FFlag::DebugLuauLogSolverToJson && moduleName == name;
|
||||
ModulePtr module = check(sourceModule, mode, requireCycles, environmentScope, /*forAutocomplete*/ false, recordJsonLog, {});
|
||||
|
||||
stats.timeCheck += getTimestamp() - timestamp;
|
||||
stats.filesStrict += mode == Mode::Strict;
|
||||
stats.filesNonstrict += mode == Mode::Nonstrict;
|
||||
|
||||
if (module == nullptr)
|
||||
throw InternalCompilerError("Frontend::check produced a nullptr module for " + moduleName, moduleName);
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution && mode == Mode::NoCheck)
|
||||
module->errors.clear();
|
||||
|
||||
if (frontendOptions.runLintChecks)
|
||||
{
|
||||
LUAU_TIMETRACE_SCOPE("lint", "Frontend");
|
||||
|
||||
LintOptions lintOptions = frontendOptions.enabledLintWarnings.value_or(config.enabledLint);
|
||||
filterLintOptions(lintOptions, sourceModule.hotcomments, mode);
|
||||
|
||||
double timestamp = getTimestamp();
|
||||
|
||||
std::vector<LintWarning> warnings =
|
||||
Luau::lint(sourceModule.root, *sourceModule.names, environmentScope, module.get(), sourceModule.hotcomments, lintOptions);
|
||||
|
||||
stats.timeLint += getTimestamp() - timestamp;
|
||||
|
||||
module->lintResult = classifyLints(warnings, config);
|
||||
}
|
||||
|
||||
if (!frontendOptions.retainFullTypeGraphs)
|
||||
{
|
||||
// copyErrors needs to allocate into interfaceTypes as it copies
|
||||
// types out of internalTypes, so we unfreeze it here.
|
||||
unfreeze(module->interfaceTypes);
|
||||
copyErrors(module->errors, module->interfaceTypes);
|
||||
freeze(module->interfaceTypes);
|
||||
|
||||
module->internalTypes.clear();
|
||||
|
||||
module->astTypes.clear();
|
||||
module->astTypePacks.clear();
|
||||
module->astExpectedTypes.clear();
|
||||
module->astOriginalCallTypes.clear();
|
||||
module->astOverloadResolvedTypes.clear();
|
||||
module->astResolvedTypes.clear();
|
||||
module->astResolvedTypePacks.clear();
|
||||
module->astScopes.clear();
|
||||
|
||||
module->scopes.clear();
|
||||
}
|
||||
|
||||
if (mode != Mode::NoCheck)
|
||||
{
|
||||
for (const RequireCycle& cyc : requireCycles)
|
||||
{
|
||||
TypeError te{cyc.location, moduleName, ModuleHasCyclicDependency{cyc.path}};
|
||||
|
||||
module->errors.push_back(te);
|
||||
}
|
||||
}
|
||||
|
||||
ErrorVec parseErrors;
|
||||
|
||||
for (const ParseError& pe : sourceModule.parseErrors)
|
||||
parseErrors.push_back(TypeError{pe.getLocation(), moduleName, SyntaxError{pe.what()}});
|
||||
|
||||
module->errors.insert(module->errors.begin(), parseErrors.begin(), parseErrors.end());
|
||||
|
||||
checkResult.errors.insert(checkResult.errors.end(), module->errors.begin(), module->errors.end());
|
||||
|
||||
resolver.setModule(moduleName, std::move(module));
|
||||
sourceNode.dirtyModule = false;
|
||||
}
|
||||
|
||||
// Get lint result only for top checked module
|
||||
if (ModulePtr module = resolver.getModule(name))
|
||||
checkResult.lintResult = module->lintResult;
|
||||
|
||||
return checkResult;
|
||||
}
|
||||
|
||||
void Frontend::queueModuleCheck(const std::vector<ModuleName>& names)
|
||||
{
|
||||
moduleQueue.insert(moduleQueue.end(), names.begin(), names.end());
|
||||
|
@ -996,8 +797,6 @@ bool Frontend::parseGraph(
|
|||
void Frontend::addBuildQueueItems(std::vector<BuildQueueItem>& items, std::vector<ModuleName>& buildQueue, bool cycleDetected,
|
||||
std::unordered_set<Luau::ModuleName>& seen, const FrontendOptions& frontendOptions)
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauSplitFrontendProcessing);
|
||||
|
||||
for (const ModuleName& moduleName : buildQueue)
|
||||
{
|
||||
if (seen.count(moduleName))
|
||||
|
@ -1038,8 +837,6 @@ void Frontend::addBuildQueueItems(std::vector<BuildQueueItem>& items, std::vecto
|
|||
|
||||
void Frontend::checkBuildQueueItem(BuildQueueItem& item)
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauSplitFrontendProcessing);
|
||||
|
||||
SourceNode& sourceNode = *item.sourceNode;
|
||||
const SourceModule& sourceModule = *item.sourceModule;
|
||||
const Config& config = item.config;
|
||||
|
@ -1139,7 +936,8 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
|
|||
module->astResolvedTypePacks.clear();
|
||||
module->astScopes.clear();
|
||||
|
||||
module->scopes.clear();
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
module->scopes.clear();
|
||||
}
|
||||
|
||||
if (mode != Mode::NoCheck)
|
||||
|
@ -1164,8 +962,6 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
|
|||
|
||||
void Frontend::checkBuildQueueItems(std::vector<BuildQueueItem>& items)
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauSplitFrontendProcessing);
|
||||
|
||||
for (BuildQueueItem& item : items)
|
||||
{
|
||||
checkBuildQueueItem(item);
|
||||
|
|
|
@ -196,6 +196,10 @@ static void errorToString(std::ostream& stream, const T& err)
|
|||
stream << "UninhabitedTypeFamily { " << toString(err.ty) << " }";
|
||||
else if constexpr (std::is_same_v<T, UninhabitedTypePackFamily>)
|
||||
stream << "UninhabitedTypePackFamily { " << toString(err.tp) << " }";
|
||||
else if constexpr (std::is_same_v<T, WhereClauseNeeded>)
|
||||
stream << "WhereClauseNeeded { " << toString(err.ty) << " }";
|
||||
else if constexpr (std::is_same_v<T, PackWhereClauseNeeded>)
|
||||
stream << "PackWhereClauseNeeded { " << toString(err.tp) << " }";
|
||||
else
|
||||
static_assert(always_false_v<T>, "Non-exhaustive type switch");
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false);
|
|||
LUAU_FASTFLAG(LuauSubstitutionReentrant);
|
||||
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution);
|
||||
LUAU_FASTFLAG(LuauSubstitutionFixMissingFields);
|
||||
LUAU_FASTFLAGVARIABLE(LuauCloneSkipNonInternalVisit, false);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -98,6 +99,22 @@ struct ClonePublicInterface : Substitution
|
|||
return tp->owningArena == &module->internalTypes;
|
||||
}
|
||||
|
||||
bool ignoreChildrenVisit(TypeId ty) override
|
||||
{
|
||||
if (FFlag::LuauCloneSkipNonInternalVisit && ty->owningArena != &module->internalTypes)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ignoreChildrenVisit(TypePackId tp) override
|
||||
{
|
||||
if (FFlag::LuauCloneSkipNonInternalVisit && tp->owningArena != &module->internalTypes)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
TypeId clean(TypeId ty) override
|
||||
{
|
||||
TypeId result = clone(ty);
|
||||
|
|
|
@ -21,6 +21,7 @@ LUAU_FASTFLAGVARIABLE(LuauNormalizeBlockedTypes, false);
|
|||
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
|
||||
LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
|
||||
LUAU_FASTFLAG(LuauTransitiveSubtyping)
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -277,6 +278,22 @@ bool Normalizer::isInhabited(const NormalizedType* norm, std::unordered_set<Type
|
|||
return false;
|
||||
}
|
||||
|
||||
bool Normalizer::isInhabited(TypeId ty)
|
||||
{
|
||||
if (cacheInhabitance)
|
||||
{
|
||||
if (bool* result = cachedIsInhabited.find(ty))
|
||||
return *result;
|
||||
}
|
||||
|
||||
bool result = isInhabited(ty, {});
|
||||
|
||||
if (cacheInhabitance)
|
||||
cachedIsInhabited[ty] = result;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool Normalizer::isInhabited(TypeId ty, std::unordered_set<TypeId> seen)
|
||||
{
|
||||
// TODO: use log.follow(ty), CLI-64291
|
||||
|
@ -297,8 +314,18 @@ bool Normalizer::isInhabited(TypeId ty, std::unordered_set<TypeId> seen)
|
|||
{
|
||||
for (const auto& [_, prop] : ttv->props)
|
||||
{
|
||||
if (!isInhabited(prop.type(), seen))
|
||||
return false;
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
// A table enclosing a read property whose type is uninhabitable is also itself uninhabitable,
|
||||
// but not its write property. That just means the write property doesn't exist, and so is readonly.
|
||||
if (auto ty = prop.readType(); ty && !isInhabited(*ty, seen))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!isInhabited(prop.type(), seen))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -314,14 +341,32 @@ bool Normalizer::isIntersectionInhabited(TypeId left, TypeId right)
|
|||
{
|
||||
left = follow(left);
|
||||
right = follow(right);
|
||||
|
||||
if (cacheInhabitance)
|
||||
{
|
||||
if (bool* result = cachedIsInhabitedIntersection.find({left, right}))
|
||||
return *result;
|
||||
}
|
||||
|
||||
std::unordered_set<TypeId> seen = {};
|
||||
seen.insert(left);
|
||||
seen.insert(right);
|
||||
|
||||
NormalizedType norm{builtinTypes};
|
||||
if (!normalizeIntersections({left, right}, norm))
|
||||
{
|
||||
if (cacheInhabitance)
|
||||
cachedIsInhabitedIntersection[{left, right}] = false;
|
||||
|
||||
return false;
|
||||
return isInhabited(&norm, seen);
|
||||
}
|
||||
|
||||
bool result = isInhabited(&norm, seen);
|
||||
|
||||
if (cacheInhabitance)
|
||||
cachedIsInhabitedIntersection[{left, right}] = result;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int tyvarIndex(TypeId ty)
|
||||
|
@ -568,10 +613,11 @@ static void assertInvariant(const NormalizedType& norm)
|
|||
#endif
|
||||
}
|
||||
|
||||
Normalizer::Normalizer(TypeArena* arena, NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> sharedState)
|
||||
Normalizer::Normalizer(TypeArena* arena, NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> sharedState, bool cacheInhabitance)
|
||||
: arena(arena)
|
||||
, builtinTypes(builtinTypes)
|
||||
, sharedState(sharedState)
|
||||
, cacheInhabitance(cacheInhabitance)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1315,7 +1361,8 @@ bool Normalizer::withinResourceLimits()
|
|||
// If cache is too large, clear it
|
||||
if (FInt::LuauNormalizeCacheLimit > 0)
|
||||
{
|
||||
size_t cacheUsage = cachedNormals.size() + cachedIntersections.size() + cachedUnions.size() + cachedTypeIds.size();
|
||||
size_t cacheUsage = cachedNormals.size() + cachedIntersections.size() + cachedUnions.size() + cachedTypeIds.size() +
|
||||
cachedIsInhabited.size() + cachedIsInhabitedIntersection.size();
|
||||
if (cacheUsage > size_t(FInt::LuauNormalizeCacheLimit))
|
||||
{
|
||||
clearCaches();
|
||||
|
@ -2726,7 +2773,7 @@ bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<Built
|
|||
UnifierSharedState sharedState{&ice};
|
||||
TypeArena arena;
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, Location{}, Covariant};
|
||||
|
||||
u.tryUnify(subTy, superTy);
|
||||
return !u.failure;
|
||||
|
@ -2739,7 +2786,7 @@ bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, N
|
|||
UnifierSharedState sharedState{&ice};
|
||||
TypeArena arena;
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, Location{}, Covariant};
|
||||
|
||||
u.tryUnify(subPack, superPack);
|
||||
return !u.failure;
|
||||
|
@ -2750,7 +2797,7 @@ bool isConsistentSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, Not
|
|||
UnifierSharedState sharedState{&ice};
|
||||
TypeArena arena;
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, Location{}, Covariant};
|
||||
|
||||
u.tryUnify(subTy, superTy);
|
||||
const bool ok = u.errors.empty() && u.log.empty();
|
||||
|
@ -2763,7 +2810,7 @@ bool isConsistentSubtype(
|
|||
UnifierSharedState sharedState{&ice};
|
||||
TypeArena arena;
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, Location{}, Covariant};
|
||||
|
||||
u.tryUnify(subPack, superPack);
|
||||
const bool ok = u.errors.empty() && u.log.empty();
|
||||
|
|
|
@ -13,6 +13,8 @@ LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
|
|||
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
|
||||
LUAU_FASTFLAGVARIABLE(LuauClassTypeVarsInSubstitution, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauSubstitutionReentrant, false)
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
LUAU_FASTFLAG(LuauCloneSkipNonInternalVisit)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -214,7 +216,7 @@ void Tarjan::visitChildren(TypeId ty, int index)
|
|||
{
|
||||
LUAU_ASSERT(ty == log->follow(ty));
|
||||
|
||||
if (ignoreChildren(ty))
|
||||
if (FFlag::LuauCloneSkipNonInternalVisit ? ignoreChildrenVisit(ty) : ignoreChildren(ty))
|
||||
return;
|
||||
|
||||
if (auto pty = log->pending(ty))
|
||||
|
@ -237,7 +239,16 @@ void Tarjan::visitChildren(TypeId ty, int index)
|
|||
{
|
||||
LUAU_ASSERT(!ttv->boundTo);
|
||||
for (const auto& [name, prop] : ttv->props)
|
||||
visitChild(prop.type());
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
visitChild(prop.readType());
|
||||
visitChild(prop.writeType());
|
||||
}
|
||||
else
|
||||
visitChild(prop.type());
|
||||
}
|
||||
|
||||
if (ttv->indexer)
|
||||
{
|
||||
visitChild(ttv->indexer->indexType);
|
||||
|
@ -311,7 +322,7 @@ void Tarjan::visitChildren(TypePackId tp, int index)
|
|||
{
|
||||
LUAU_ASSERT(tp == log->follow(tp));
|
||||
|
||||
if (ignoreChildren(tp))
|
||||
if (FFlag::LuauCloneSkipNonInternalVisit ? ignoreChildrenVisit(tp) : ignoreChildren(tp))
|
||||
return;
|
||||
|
||||
if (auto ptp = log->pending(tp))
|
||||
|
@ -793,7 +804,13 @@ void Substitution::replaceChildren(TypeId ty)
|
|||
{
|
||||
LUAU_ASSERT(!ttv->boundTo);
|
||||
for (auto& [name, prop] : ttv->props)
|
||||
prop.setType(replace(prop.type()));
|
||||
{
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
prop = Property::create(replace(prop.readType()), replace(prop.writeType()));
|
||||
else
|
||||
prop.setType(replace(prop.type()));
|
||||
}
|
||||
|
||||
if (ttv->indexer)
|
||||
{
|
||||
ttv->indexer->indexType = replace(ttv->indexer->indexType);
|
||||
|
|
|
@ -335,6 +335,44 @@ struct TypeStringifier
|
|||
tv->ty);
|
||||
}
|
||||
|
||||
void stringify(const std::string& name, const Property& prop)
|
||||
{
|
||||
if (isIdentifier(name))
|
||||
state.emit(name);
|
||||
else
|
||||
{
|
||||
state.emit("[\"");
|
||||
state.emit(escape(name));
|
||||
state.emit("\"]");
|
||||
}
|
||||
state.emit(": ");
|
||||
|
||||
if (FFlag::DebugLuauReadWriteProperties)
|
||||
{
|
||||
// We special case the stringification if the property's read and write types are shared.
|
||||
if (prop.isShared())
|
||||
return stringify(*prop.readType());
|
||||
|
||||
// Otherwise emit them separately.
|
||||
if (auto ty = prop.readType())
|
||||
{
|
||||
state.emit("read ");
|
||||
stringify(*ty);
|
||||
}
|
||||
|
||||
if (prop.readType() && prop.writeType())
|
||||
state.emit(" + ");
|
||||
|
||||
if (auto ty = prop.writeType())
|
||||
{
|
||||
state.emit("write ");
|
||||
stringify(*ty);
|
||||
}
|
||||
}
|
||||
else
|
||||
stringify(prop.type());
|
||||
}
|
||||
|
||||
void stringify(TypePackId tp);
|
||||
void stringify(TypePackId tpid, const std::vector<std::optional<FunctionArgument>>& names);
|
||||
|
||||
|
@ -672,16 +710,8 @@ struct TypeStringifier
|
|||
break;
|
||||
}
|
||||
|
||||
if (isIdentifier(name))
|
||||
state.emit(name);
|
||||
else
|
||||
{
|
||||
state.emit("[\"");
|
||||
state.emit(escape(name));
|
||||
state.emit("\"]");
|
||||
}
|
||||
state.emit(": ");
|
||||
stringify(prop.type());
|
||||
stringify(name, prop);
|
||||
|
||||
comma = true;
|
||||
++index;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ LUAU_FASTINT(LuauTypeInferRecursionLimit)
|
|||
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
|
||||
LUAU_FASTFLAG(LuauNormalizeBlockedTypes)
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
LUAU_FASTFLAGVARIABLE(LuauBoundLazyTypes2, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -78,65 +77,31 @@ TypeId follow(TypeId t)
|
|||
TypeId follow(TypeId t, const void* context, TypeId (*mapper)(const void*, TypeId))
|
||||
{
|
||||
auto advance = [context, mapper](TypeId ty) -> std::optional<TypeId> {
|
||||
if (FFlag::LuauBoundLazyTypes2)
|
||||
{
|
||||
TypeId mapped = mapper(context, ty);
|
||||
|
||||
if (auto btv = get<Unifiable::Bound<TypeId>>(mapped))
|
||||
return btv->boundTo;
|
||||
|
||||
if (auto ttv = get<TableType>(mapped))
|
||||
return ttv->boundTo;
|
||||
|
||||
if (auto ltv = getMutable<LazyType>(mapped))
|
||||
return unwrapLazy(ltv);
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (auto btv = get<Unifiable::Bound<TypeId>>(mapper(context, ty)))
|
||||
return btv->boundTo;
|
||||
else if (auto ttv = get<TableType>(mapper(context, ty)))
|
||||
return ttv->boundTo;
|
||||
else
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
auto force = [context, mapper](TypeId ty) {
|
||||
TypeId mapped = mapper(context, ty);
|
||||
|
||||
if (auto ltv = get_if<LazyType>(&mapped->ty))
|
||||
{
|
||||
TypeId res = ltv->thunk_DEPRECATED();
|
||||
if (get<LazyType>(res))
|
||||
throw InternalCompilerError("Lazy Type cannot resolve to another Lazy Type");
|
||||
if (auto btv = get<Unifiable::Bound<TypeId>>(mapped))
|
||||
return btv->boundTo;
|
||||
|
||||
*asMutable(ty) = BoundType(res);
|
||||
}
|
||||
if (auto ttv = get<TableType>(mapped))
|
||||
return ttv->boundTo;
|
||||
|
||||
if (auto ltv = getMutable<LazyType>(mapped))
|
||||
return unwrapLazy(ltv);
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
|
||||
if (!FFlag::LuauBoundLazyTypes2)
|
||||
force(t);
|
||||
|
||||
TypeId cycleTester = t; // Null once we've determined that there is no cycle
|
||||
if (auto a = advance(cycleTester))
|
||||
cycleTester = *a;
|
||||
else
|
||||
return t;
|
||||
|
||||
if (FFlag::LuauBoundLazyTypes2)
|
||||
{
|
||||
if (!advance(cycleTester)) // Short circuit traversal for the rather common case when advance(advance(t)) == null
|
||||
return cycleTester;
|
||||
}
|
||||
if (!advance(cycleTester)) // Short circuit traversal for the rather common case when advance(advance(t)) == null
|
||||
return cycleTester;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!FFlag::LuauBoundLazyTypes2)
|
||||
force(t);
|
||||
|
||||
auto a1 = advance(t);
|
||||
if (a1)
|
||||
t = *a1;
|
||||
|
@ -684,16 +649,17 @@ Property Property::rw(TypeId read, TypeId write)
|
|||
return p;
|
||||
}
|
||||
|
||||
std::optional<Property> Property::create(std::optional<TypeId> read, std::optional<TypeId> write)
|
||||
Property Property::create(std::optional<TypeId> read, std::optional<TypeId> write)
|
||||
{
|
||||
if (read && !write)
|
||||
return Property::readonly(*read);
|
||||
else if (!read && write)
|
||||
return Property::writeonly(*write);
|
||||
else if (read && write)
|
||||
return Property::rw(*read, *write);
|
||||
else
|
||||
return std::nullopt;
|
||||
{
|
||||
LUAU_ASSERT(read && write);
|
||||
return Property::rw(*read, *write);
|
||||
}
|
||||
}
|
||||
|
||||
TypeId Property::type() const
|
||||
|
@ -705,6 +671,7 @@ TypeId Property::type() const
|
|||
|
||||
void Property::setType(TypeId ty)
|
||||
{
|
||||
LUAU_ASSERT(!FFlag::DebugLuauReadWriteProperties);
|
||||
readTy = ty;
|
||||
}
|
||||
|
||||
|
@ -722,6 +689,11 @@ std::optional<TypeId> Property::writeType() const
|
|||
return writeTy;
|
||||
}
|
||||
|
||||
bool Property::isShared() const
|
||||
{
|
||||
return readTy && writeTy && readTy == writeTy;
|
||||
}
|
||||
|
||||
TableType::TableType(TableState state, TypeLevel level, Scope* scope)
|
||||
: state(state)
|
||||
, level(level)
|
||||
|
|
|
@ -13,9 +13,11 @@
|
|||
#include "Luau/ToString.h"
|
||||
#include "Luau/TxnLog.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Unifier.h"
|
||||
#include "Luau/TypeFamily.h"
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
|
@ -81,6 +83,146 @@ static std::optional<std::string> getIdentifierOfBaseVar(AstExpr* node)
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool areEquivalent(const T& a, const T& b)
|
||||
{
|
||||
if (a.family != b.family)
|
||||
return false;
|
||||
|
||||
if (a.typeArguments.size() != b.typeArguments.size() || a.packArguments.size() != b.packArguments.size())
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i < a.typeArguments.size(); ++i)
|
||||
{
|
||||
if (follow(a.typeArguments[i]) != follow(b.typeArguments[i]))
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < a.packArguments.size(); ++i)
|
||||
{
|
||||
if (follow(a.packArguments[i]) != follow(b.packArguments[i]))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct FamilyFinder : TypeOnceVisitor
|
||||
{
|
||||
DenseHashSet<TypeId> mentionedFamilies{nullptr};
|
||||
DenseHashSet<TypePackId> mentionedFamilyPacks{nullptr};
|
||||
|
||||
bool visit(TypeId ty, const TypeFamilyInstanceType&) override
|
||||
{
|
||||
mentionedFamilies.insert(ty);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visit(TypePackId tp, const TypeFamilyInstanceTypePack&) override
|
||||
{
|
||||
mentionedFamilyPacks.insert(tp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct InternalFamilyFinder : TypeOnceVisitor
|
||||
{
|
||||
DenseHashSet<TypeId> internalFamilies{nullptr};
|
||||
DenseHashSet<TypePackId> internalPackFamilies{nullptr};
|
||||
DenseHashSet<TypeId> mentionedFamilies{nullptr};
|
||||
DenseHashSet<TypePackId> mentionedFamilyPacks{nullptr};
|
||||
|
||||
InternalFamilyFinder(std::vector<TypeId>& declStack)
|
||||
{
|
||||
FamilyFinder f;
|
||||
for (TypeId fn : declStack)
|
||||
f.traverse(fn);
|
||||
|
||||
mentionedFamilies = std::move(f.mentionedFamilies);
|
||||
mentionedFamilyPacks = std::move(f.mentionedFamilyPacks);
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const TypeFamilyInstanceType& tfit) override
|
||||
{
|
||||
bool hasGeneric = false;
|
||||
|
||||
for (TypeId p : tfit.typeArguments)
|
||||
{
|
||||
if (get<GenericType>(follow(p)))
|
||||
{
|
||||
hasGeneric = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (TypePackId p : tfit.packArguments)
|
||||
{
|
||||
if (get<GenericTypePack>(follow(p)))
|
||||
{
|
||||
hasGeneric = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasGeneric)
|
||||
{
|
||||
for (TypeId mentioned : mentionedFamilies)
|
||||
{
|
||||
const TypeFamilyInstanceType* mentionedTfit = get<TypeFamilyInstanceType>(mentioned);
|
||||
LUAU_ASSERT(mentionedTfit);
|
||||
if (areEquivalent(tfit, *mentionedTfit))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
internalFamilies.insert(ty);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visit(TypePackId tp, const TypeFamilyInstanceTypePack& tfitp) override
|
||||
{
|
||||
bool hasGeneric = false;
|
||||
|
||||
for (TypeId p : tfitp.typeArguments)
|
||||
{
|
||||
if (get<GenericType>(follow(p)))
|
||||
{
|
||||
hasGeneric = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (TypePackId p : tfitp.packArguments)
|
||||
{
|
||||
if (get<GenericTypePack>(follow(p)))
|
||||
{
|
||||
hasGeneric = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasGeneric)
|
||||
{
|
||||
for (TypePackId mentioned : mentionedFamilyPacks)
|
||||
{
|
||||
const TypeFamilyInstanceTypePack* mentionedTfitp = get<TypeFamilyInstanceTypePack>(mentioned);
|
||||
LUAU_ASSERT(mentionedTfitp);
|
||||
if (areEquivalent(tfitp, *mentionedTfitp))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
internalPackFamilies.insert(tp);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct TypeChecker2
|
||||
{
|
||||
NotNull<BuiltinTypes> builtinTypes;
|
||||
|
@ -91,16 +233,20 @@ struct TypeChecker2
|
|||
TypeArena testArena;
|
||||
|
||||
std::vector<NotNull<Scope>> stack;
|
||||
std::vector<TypeId> functionDeclStack;
|
||||
|
||||
DenseHashSet<TypeId> noTypeFamilyErrors{nullptr};
|
||||
|
||||
Normalizer normalizer;
|
||||
|
||||
TypeChecker2(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule* sourceModule, Module* module)
|
||||
TypeChecker2(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule* sourceModule,
|
||||
Module* module)
|
||||
: builtinTypes(builtinTypes)
|
||||
, logger(logger)
|
||||
, ice(unifierState->iceHandler)
|
||||
, sourceModule(sourceModule)
|
||||
, module(module)
|
||||
, normalizer{&testArena, builtinTypes, unifierState}
|
||||
, normalizer{&testArena, builtinTypes, unifierState, /* cacheInhabitance */ true}
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -112,10 +258,31 @@ struct TypeChecker2
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
void checkForInternalFamily(TypeId ty, Location location)
|
||||
{
|
||||
InternalFamilyFinder finder(functionDeclStack);
|
||||
finder.traverse(ty);
|
||||
|
||||
for (TypeId internal : finder.internalFamilies)
|
||||
reportError(WhereClauseNeeded{internal}, location);
|
||||
|
||||
for (TypePackId internal : finder.internalPackFamilies)
|
||||
reportError(PackWhereClauseNeeded{internal}, location);
|
||||
}
|
||||
|
||||
TypeId checkForFamilyInhabitance(TypeId instance, Location location)
|
||||
{
|
||||
if (noTypeFamilyErrors.find(instance))
|
||||
return instance;
|
||||
|
||||
TxnLog fake{};
|
||||
reportErrors(reduceFamilies(instance, location, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true).errors);
|
||||
ErrorVec errors =
|
||||
reduceFamilies(instance, location, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true).errors;
|
||||
|
||||
if (errors.empty())
|
||||
noTypeFamilyErrors.insert(instance);
|
||||
|
||||
reportErrors(std::move(errors));
|
||||
return instance;
|
||||
}
|
||||
|
||||
|
@ -316,7 +483,7 @@ struct TypeChecker2
|
|||
TypeArena* arena = &testArena;
|
||||
TypePackId actualRetType = reconstructPack(ret->list, *arena);
|
||||
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, stack.back(), ret->location, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, stack.back(), ret->location, Covariant};
|
||||
u.hideousFixMeGenericsAreActuallyFree = true;
|
||||
|
||||
u.tryUnify(actualRetType, expectedRetType);
|
||||
|
@ -466,12 +633,47 @@ struct TypeChecker2
|
|||
variableTypes.emplace_back(*ty);
|
||||
}
|
||||
|
||||
// ugh. There's nothing in the AST to hang a whole type pack on for the
|
||||
// set of iteratees, so we have to piece it back together by hand.
|
||||
AstExpr* firstValue = forInStatement->values.data[0];
|
||||
|
||||
// we need to build up a typepack for the iterators/values portion of the for-in statement.
|
||||
std::vector<TypeId> valueTypes;
|
||||
for (size_t i = 0; i < forInStatement->values.size - 1; ++i)
|
||||
std::optional<TypePackId> iteratorTail;
|
||||
|
||||
// since the first value may be the only iterator (e.g. if it is a call), we want to
|
||||
// look to see if it has a resulting typepack as our iterators.
|
||||
TypePackId* retPack = module->astTypePacks.find(firstValue);
|
||||
if (retPack)
|
||||
{
|
||||
auto [head, tail] = flatten(*retPack);
|
||||
valueTypes = head;
|
||||
iteratorTail = tail;
|
||||
}
|
||||
else
|
||||
{
|
||||
valueTypes.emplace_back(lookupType(firstValue));
|
||||
}
|
||||
|
||||
// if the initial and expected types from the iterator unified during constraint solving,
|
||||
// we'll have a resolved type to use here, but we'll only use it if either the iterator is
|
||||
// directly present in the for-in statement or if we have an iterator state constraining us
|
||||
TypeId* resolvedTy = module->astOverloadResolvedTypes.find(firstValue);
|
||||
if (resolvedTy && (!retPack || valueTypes.size() > 1))
|
||||
valueTypes[0] = *resolvedTy;
|
||||
|
||||
for (size_t i = 1; i < forInStatement->values.size - 1; ++i)
|
||||
{
|
||||
valueTypes.emplace_back(lookupType(forInStatement->values.data[i]));
|
||||
TypePackId iteratorTail = lookupPack(forInStatement->values.data[forInStatement->values.size - 1]);
|
||||
}
|
||||
|
||||
// if we had more than one value, the tail from the first value is no longer appropriate to use.
|
||||
if (forInStatement->values.size > 1)
|
||||
{
|
||||
auto [head, tail] = flatten(lookupPack(forInStatement->values.data[forInStatement->values.size - 1]));
|
||||
valueTypes.insert(valueTypes.end(), head.begin(), head.end());
|
||||
iteratorTail = tail;
|
||||
}
|
||||
|
||||
// and now we can put everything together to get the actual typepack of the iterators.
|
||||
TypePackId iteratorPack = arena.addTypePack(valueTypes, iteratorTail);
|
||||
|
||||
// ... and then expand it out to 3 values (if possible)
|
||||
|
@ -518,26 +720,16 @@ struct TypeChecker2
|
|||
// This depends on the types in iterateePack and therefore
|
||||
// iteratorTypes.
|
||||
|
||||
// If the iteratee is an error type, then we can't really say anything else about iteration over it.
|
||||
// After all, it _could've_ been a table.
|
||||
if (get<ErrorType>(follow(flattenPack(iterFtv->argTypes))))
|
||||
return;
|
||||
|
||||
// If iteratorTypes is too short to be a valid call to nextFn, we have to report a count mismatch error.
|
||||
// If 2 is too short to be a valid call to nextFn, we have to report a count mismatch error.
|
||||
// If 2 is too long to be a valid call to nextFn, we have to report a count mismatch error.
|
||||
auto [minCount, maxCount] = getParameterExtents(TxnLog::empty(), iterFtv->argTypes, /*includeHiddenVariadics*/ true);
|
||||
|
||||
if (minCount > 2)
|
||||
{
|
||||
if (isMm)
|
||||
reportError(GenericError{"__iter metamethod must return (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
else
|
||||
reportError(GenericError{"for..in loops must be passed (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
}
|
||||
if (maxCount && *maxCount < 2)
|
||||
{
|
||||
if (isMm)
|
||||
reportError(GenericError{"__iter metamethod must return (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
else
|
||||
reportError(GenericError{"for..in loops must be passed (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
}
|
||||
|
||||
TypePack flattenedArgTypes = extendTypePack(arena, builtinTypes, iterFtv->argTypes, 2);
|
||||
size_t firstIterationArgCount = iterTys.empty() ? 0 : iterTys.size() - 1;
|
||||
size_t actualArgCount = expectedVariableTypes.head.size();
|
||||
|
@ -546,7 +738,7 @@ struct TypeChecker2
|
|||
if (isMm)
|
||||
reportError(GenericError{"__iter metamethod must return (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
else
|
||||
reportError(CountMismatch{2, std::nullopt, firstIterationArgCount, CountMismatch::Arg}, forInStatement->vars.data[0]->location);
|
||||
reportError(CountMismatch{2, std::nullopt, firstIterationArgCount, CountMismatch::Arg}, forInStatement->values.data[0]->location);
|
||||
}
|
||||
|
||||
else if (actualArgCount < minCount)
|
||||
|
@ -554,7 +746,7 @@ struct TypeChecker2
|
|||
if (isMm)
|
||||
reportError(GenericError{"__iter metamethod must return (next[, table[, state]])"}, getLocation(forInStatement->values));
|
||||
else
|
||||
reportError(CountMismatch{2, std::nullopt, firstIterationArgCount, CountMismatch::Arg}, forInStatement->vars.data[0]->location);
|
||||
reportError(CountMismatch{2, std::nullopt, firstIterationArgCount, CountMismatch::Arg}, forInStatement->values.data[0]->location);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1211,6 +1403,7 @@ struct TypeChecker2
|
|||
visitGenerics(fn->generics, fn->genericPacks);
|
||||
|
||||
TypeId inferredFnTy = lookupType(fn);
|
||||
functionDeclStack.push_back(inferredFnTy);
|
||||
|
||||
const NormalizedType* normalizedFnTy = normalizer.normalize(inferredFnTy);
|
||||
if (!normalizedFnTy)
|
||||
|
@ -1260,6 +1453,8 @@ struct TypeChecker2
|
|||
}
|
||||
|
||||
visit(fn->body);
|
||||
|
||||
functionDeclStack.pop_back();
|
||||
}
|
||||
|
||||
void visit(AstExprTable* expr)
|
||||
|
@ -1370,7 +1565,10 @@ struct TypeChecker2
|
|||
TypeId expectedResult = lookupType(expr);
|
||||
|
||||
if (get<TypeFamilyInstanceType>(expectedResult))
|
||||
{
|
||||
checkForInternalFamily(expectedResult, expr->location);
|
||||
return expectedResult;
|
||||
}
|
||||
|
||||
if (expr->op == AstExprBinary::Op::Or)
|
||||
{
|
||||
|
@ -1379,9 +1577,9 @@ struct TypeChecker2
|
|||
|
||||
bool isStringOperation = isString(leftType) && isString(rightType);
|
||||
|
||||
if (get<AnyType>(leftType) || get<ErrorType>(leftType))
|
||||
if (get<AnyType>(leftType) || get<ErrorType>(leftType) || get<NeverType>(leftType))
|
||||
return leftType;
|
||||
else if (get<AnyType>(rightType) || get<ErrorType>(rightType))
|
||||
else if (get<AnyType>(rightType) || get<ErrorType>(rightType) || get<NeverType>(rightType))
|
||||
return rightType;
|
||||
|
||||
if ((get<BlockedType>(leftType) || get<FreeType>(leftType) || get<GenericType>(leftType)) && !isEquality && !isLogical)
|
||||
|
@ -1982,7 +2180,7 @@ struct TypeChecker2
|
|||
bool isSubtype(TID subTy, TID superTy, NotNull<Scope> scope, bool genericsOkay = false)
|
||||
{
|
||||
TypeArena arena;
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, Location{}, Covariant};
|
||||
u.hideousFixMeGenericsAreActuallyFree = genericsOkay;
|
||||
u.enableScopeTests();
|
||||
|
||||
|
@ -1995,7 +2193,7 @@ struct TypeChecker2
|
|||
ErrorVec tryUnify(NotNull<Scope> scope, const Location& location, TID subTy, TID superTy, CountMismatch::Context context = CountMismatch::Arg,
|
||||
bool genericsOkay = false)
|
||||
{
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, location, Covariant};
|
||||
Unifier u{NotNull{&normalizer}, scope, location, Covariant};
|
||||
u.ctx = context;
|
||||
u.hideousFixMeGenericsAreActuallyFree = genericsOkay;
|
||||
u.enableScopeTests();
|
||||
|
|
|
@ -301,6 +301,9 @@ FamilyGraphReductionResult reduceFamilies(TypeId entrypoint, Location location,
|
|||
return FamilyGraphReductionResult{};
|
||||
}
|
||||
|
||||
if (collector.tys.empty() && collector.tps.empty())
|
||||
return {};
|
||||
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, scope, normalizer, log, force);
|
||||
}
|
||||
|
||||
|
@ -318,6 +321,9 @@ FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location locati
|
|||
return FamilyGraphReductionResult{};
|
||||
}
|
||||
|
||||
if (collector.tys.empty() && collector.tps.empty())
|
||||
return {};
|
||||
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, scope, normalizer, log, force);
|
||||
}
|
||||
|
||||
|
@ -338,8 +344,10 @@ TypeFamilyReductionResult<TypeId> addFamilyFn(std::vector<TypeId> typeParams, st
|
|||
|
||||
TypeId lhsTy = log->follow(typeParams.at(0));
|
||||
TypeId rhsTy = log->follow(typeParams.at(1));
|
||||
const NormalizedType* normLhsTy = normalizer->normalize(lhsTy);
|
||||
const NormalizedType* normRhsTy = normalizer->normalize(rhsTy);
|
||||
|
||||
if (isNumber(lhsTy) && isNumber(rhsTy))
|
||||
if (normLhsTy && normRhsTy && normLhsTy->isNumber() && normRhsTy->isNumber())
|
||||
{
|
||||
return {builtins->numberType, false, {}, {}};
|
||||
}
|
||||
|
@ -398,7 +406,7 @@ TypeFamilyReductionResult<TypeId> addFamilyFn(std::vector<TypeId> typeParams, st
|
|||
inferredArgs = {rhsTy, lhsTy};
|
||||
|
||||
TypePackId inferredArgPack = arena->addTypePack(std::move(inferredArgs));
|
||||
Unifier u{normalizer, Mode::Strict, scope, Location{}, Variance::Covariant, log.get()};
|
||||
Unifier u{normalizer, scope, Location{}, Variance::Covariant, log.get()};
|
||||
u.tryUnify(inferredArgPack, instantiatedMmFtv->argTypes);
|
||||
|
||||
if (std::optional<TypeId> ret = first(instantiatedMmFtv->retTypes); ret && u.errors.empty())
|
||||
|
|
|
@ -39,7 +39,6 @@ LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
|
|||
LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure)
|
||||
LUAU_FASTFLAGVARIABLE(LuauTypecheckTypeguards, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false)
|
||||
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
|
||||
LUAU_FASTFLAGVARIABLE(LuauTypecheckClassTypeIndexers, false)
|
||||
|
||||
namespace Luau
|
||||
|
@ -2769,8 +2768,9 @@ TypeId TypeChecker::checkRelationalOperation(
|
|||
|
||||
std::string metamethodName = opToMetaTableEntry(expr.op);
|
||||
|
||||
std::optional<TypeId> leftMetatable = isString(lhsType) ? std::nullopt : getMetatable(follow(lhsType), builtinTypes);
|
||||
std::optional<TypeId> rightMetatable = isString(rhsType) ? std::nullopt : getMetatable(follow(rhsType), builtinTypes);
|
||||
std::optional<TypeId> stringNoMT = std::nullopt; // works around gcc false positive "maybe uninitialized" warnings
|
||||
std::optional<TypeId> leftMetatable = isString(lhsType) ? stringNoMT : getMetatable(follow(lhsType), builtinTypes);
|
||||
std::optional<TypeId> rightMetatable = isString(rhsType) ? stringNoMT : getMetatable(follow(rhsType), builtinTypes);
|
||||
|
||||
if (leftMetatable != rightMetatable)
|
||||
{
|
||||
|
@ -4676,7 +4676,7 @@ TypeId TypeChecker::checkRequire(const ScopePtr& scope, const ModuleInfo& module
|
|||
// Types of requires that transitively refer to current module have to be replaced with 'any'
|
||||
for (const auto& [location, path] : requireCycles)
|
||||
{
|
||||
if (!path.empty() && path.front() == (FFlag::LuauRequirePathTrueModuleName ? moduleInfo.name : resolver->getHumanReadableModuleName(moduleInfo.name)))
|
||||
if (!path.empty() && path.front() == moduleInfo.name)
|
||||
return anyType;
|
||||
}
|
||||
|
||||
|
@ -5043,7 +5043,7 @@ void TypeChecker::merge(RefinementMap& l, const RefinementMap& r)
|
|||
|
||||
Unifier TypeChecker::mkUnifier(const ScopePtr& scope, const Location& location)
|
||||
{
|
||||
return Unifier{NotNull{&normalizer}, currentModule->mode, NotNull{scope.get()}, location, Variance::Covariant};
|
||||
return Unifier{NotNull{&normalizer}, NotNull{scope.get()}, location, Variance::Covariant};
|
||||
}
|
||||
|
||||
TypeId TypeChecker::freshType(const ScopePtr& scope)
|
||||
|
|
|
@ -396,11 +396,10 @@ TypeMismatch::Context Unifier::mismatchContext()
|
|||
}
|
||||
}
|
||||
|
||||
Unifier::Unifier(NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog)
|
||||
Unifier::Unifier(NotNull<Normalizer> normalizer, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog)
|
||||
: types(normalizer->arena)
|
||||
, builtinTypes(normalizer->builtinTypes)
|
||||
, normalizer(normalizer)
|
||||
, mode(mode)
|
||||
, scope(scope)
|
||||
, log(parentLog)
|
||||
, location(location)
|
||||
|
@ -423,6 +422,12 @@ static bool isBlocked(const TxnLog& log, TypeId ty)
|
|||
return get<BlockedType>(ty) || get<PendingExpansionType>(ty);
|
||||
}
|
||||
|
||||
static bool isBlocked(const TxnLog& log, TypePackId tp)
|
||||
{
|
||||
tp = log.follow(tp);
|
||||
return get<BlockedTypePack>(tp);
|
||||
}
|
||||
|
||||
void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool isIntersection)
|
||||
{
|
||||
RecursionLimiter _ra(&sharedState.counters.recursionCount, sharedState.counters.recursionLimit);
|
||||
|
@ -1761,6 +1766,19 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
|
|||
if (log.haveSeen(superTp, subTp))
|
||||
return;
|
||||
|
||||
if (isBlocked(log, subTp) && isBlocked(log, superTp))
|
||||
{
|
||||
blockedTypePacks.push_back(subTp);
|
||||
blockedTypePacks.push_back(superTp);
|
||||
}
|
||||
else if (isBlocked(log, subTp))
|
||||
{
|
||||
blockedTypePacks.push_back(subTp);
|
||||
}
|
||||
else if (isBlocked(log, superTp))
|
||||
{
|
||||
blockedTypePacks.push_back(superTp);
|
||||
}
|
||||
if (log.getMutable<FreeTypePack>(superTp))
|
||||
{
|
||||
if (!occursCheck(superTp, subTp, /* reversed = */ true))
|
||||
|
@ -2795,7 +2813,12 @@ void Unifier::tryUnifyVariadics(TypePackId subTp, TypePackId superTp, bool rever
|
|||
if (std::optional<TypePackId> maybeTail = subIter.tail())
|
||||
{
|
||||
TypePackId tail = follow(*maybeTail);
|
||||
if (get<FreeTypePack>(tail))
|
||||
|
||||
if (isBlocked(log, tail))
|
||||
{
|
||||
blockedTypePacks.push_back(tail);
|
||||
}
|
||||
else if (get<FreeTypePack>(tail))
|
||||
{
|
||||
log.replace(tail, BoundTypePack(superTp));
|
||||
}
|
||||
|
@ -3094,7 +3117,7 @@ bool Unifier::occursCheck(DenseHashSet<TypePackId>& seen, TypePackId needle, Typ
|
|||
|
||||
Unifier Unifier::makeChildUnifier()
|
||||
{
|
||||
Unifier u = Unifier{normalizer, mode, scope, location, variance, &log};
|
||||
Unifier u = Unifier{normalizer, scope, location, variance, &log};
|
||||
u.normalize = normalize;
|
||||
u.checkInhabited = checkInhabited;
|
||||
|
||||
|
@ -3125,12 +3148,6 @@ void Unifier::reportError(TypeError err)
|
|||
failure = true;
|
||||
}
|
||||
|
||||
|
||||
bool Unifier::isNonstrictMode() const
|
||||
{
|
||||
return (mode == Mode::Nonstrict) || (mode == Mode::NoCheck);
|
||||
}
|
||||
|
||||
void Unifier::checkChildUnifierTypeMismatch(const ErrorVec& innerErrors, TypeId wantedType, TypeId givenType)
|
||||
{
|
||||
if (auto e = hasUnificationTooComplex(innerErrors))
|
||||
|
|
|
@ -157,6 +157,8 @@ public:
|
|||
void fcmpz(RegisterA64 src);
|
||||
void fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
|
||||
|
||||
void udf();
|
||||
|
||||
// Run final checks
|
||||
bool finalize();
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ public:
|
|||
void call(OperandX64 op);
|
||||
|
||||
void int3();
|
||||
void ud2();
|
||||
|
||||
void bsr(RegisterX64 dst, OperandX64 src);
|
||||
void bsf(RegisterX64 dst, OperandX64 src);
|
||||
|
|
|
@ -38,7 +38,6 @@ struct IrBuilder
|
|||
|
||||
IrOp undef();
|
||||
|
||||
IrOp constBool(bool value);
|
||||
IrOp constInt(int value);
|
||||
IrOp constUint(unsigned value);
|
||||
IrOp constDouble(double value);
|
||||
|
|
|
@ -283,7 +283,7 @@ enum class IrCmd : uint8_t
|
|||
// A: builtin
|
||||
// B: Rn (result start)
|
||||
// C: Rn (argument start)
|
||||
// D: Rn or Kn or a boolean that's false (optional second argument)
|
||||
// D: Rn or Kn or undef (optional second argument)
|
||||
// E: int (argument count)
|
||||
// F: int (result count)
|
||||
FASTCALL,
|
||||
|
@ -292,7 +292,7 @@ enum class IrCmd : uint8_t
|
|||
// A: builtin
|
||||
// B: Rn (result start)
|
||||
// C: Rn (argument start)
|
||||
// D: Rn or Kn or a boolean that's false (optional second argument)
|
||||
// D: Rn or Kn or undef (optional second argument)
|
||||
// E: int (argument count or -1 to use all arguments up to stack top)
|
||||
// F: int (result count or -1 to preserve all results and adjust stack top)
|
||||
INVOKE_FASTCALL,
|
||||
|
@ -360,39 +360,46 @@ enum class IrCmd : uint8_t
|
|||
|
||||
// Guard against tag mismatch
|
||||
// A, B: tag
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// In final x64 lowering, A can also be Rn
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_TAG,
|
||||
|
||||
// Guard against readonly table
|
||||
// A: pointer (Table)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_READONLY,
|
||||
|
||||
// Guard against table having a metatable
|
||||
// A: pointer (Table)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NO_METATABLE,
|
||||
|
||||
// Guard against executing in unsafe environment
|
||||
// A: block
|
||||
// A: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_SAFE_ENV,
|
||||
|
||||
// Guard against index overflowing the table array size
|
||||
// A: pointer (Table)
|
||||
// B: int (index)
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_ARRAY_SIZE,
|
||||
|
||||
// Guard against cached table node slot not matching the actual table node slot for a key
|
||||
// A: pointer (LuaNode)
|
||||
// B: Kn
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_SLOT_MATCH,
|
||||
|
||||
// Guard against table node with a linked next node to ensure that our lookup hits the main position of the key
|
||||
// A: pointer (LuaNode)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NODE_NO_NEXT,
|
||||
|
||||
// Special operations
|
||||
|
@ -428,7 +435,7 @@ enum class IrCmd : uint8_t
|
|||
|
||||
// While capture is a no-op right now, it might be useful to track register/upvalue lifetimes
|
||||
// A: Rn or UPn
|
||||
// B: boolean (true for reference capture, false for value capture)
|
||||
// B: unsigned int (1 for reference capture, 0 for value capture)
|
||||
CAPTURE,
|
||||
|
||||
// Operations that don't have an IR representation yet
|
||||
|
@ -581,7 +588,6 @@ enum class IrCmd : uint8_t
|
|||
|
||||
enum class IrConstKind : uint8_t
|
||||
{
|
||||
Bool,
|
||||
Int,
|
||||
Uint,
|
||||
Double,
|
||||
|
@ -867,27 +873,6 @@ struct IrFunction
|
|||
return value.valueTag;
|
||||
}
|
||||
|
||||
bool boolOp(IrOp op)
|
||||
{
|
||||
IrConst& value = constOp(op);
|
||||
|
||||
LUAU_ASSERT(value.kind == IrConstKind::Bool);
|
||||
return value.valueBool;
|
||||
}
|
||||
|
||||
std::optional<bool> asBoolOp(IrOp op)
|
||||
{
|
||||
if (op.kind != IrOpKind::Constant)
|
||||
return std::nullopt;
|
||||
|
||||
IrConst& value = constOp(op);
|
||||
|
||||
if (value.kind != IrConstKind::Bool)
|
||||
return std::nullopt;
|
||||
|
||||
return value.valueBool;
|
||||
}
|
||||
|
||||
int intOp(IrOp op)
|
||||
{
|
||||
IrConst& value = constOp(op);
|
||||
|
|
|
@ -687,6 +687,11 @@ void AssemblyBuilderA64::fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 sr
|
|||
placeCS("fcsel", dst, src1, src2, cond, 0b11110'01'1, 0b11);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::udf()
|
||||
{
|
||||
place0("udf", 0);
|
||||
}
|
||||
|
||||
bool AssemblyBuilderA64::finalize()
|
||||
{
|
||||
code.resize(codePos - code.data());
|
||||
|
|
|
@ -472,6 +472,15 @@ void AssemblyBuilderX64::int3()
|
|||
commit();
|
||||
}
|
||||
|
||||
void AssemblyBuilderX64::ud2()
|
||||
{
|
||||
if (logText)
|
||||
log("ud2");
|
||||
|
||||
place(0x0f);
|
||||
place(0x0b);
|
||||
}
|
||||
|
||||
void AssemblyBuilderX64::bsr(RegisterX64 dst, OperandX64 src)
|
||||
{
|
||||
if (logText)
|
||||
|
|
|
@ -51,13 +51,13 @@ static void makePagesExecutable(uint8_t* mem, size_t size)
|
|||
|
||||
DWORD oldProtect;
|
||||
if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0)
|
||||
LUAU_ASSERT(!"failed to change page protection");
|
||||
LUAU_ASSERT(!"Failed to change page protection");
|
||||
}
|
||||
|
||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||
{
|
||||
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
|
||||
LUAU_ASSERT(!"failed to flush instruction cache");
|
||||
LUAU_ASSERT(!"Failed to flush instruction cache");
|
||||
}
|
||||
#else
|
||||
static uint8_t* allocatePages(size_t size)
|
||||
|
@ -68,7 +68,7 @@ static uint8_t* allocatePages(size_t size)
|
|||
static void freePages(uint8_t* mem, size_t size)
|
||||
{
|
||||
if (munmap(mem, alignToPageSize(size)) != 0)
|
||||
LUAU_ASSERT(!"failed to deallocate block memory");
|
||||
LUAU_ASSERT(!"Failed to deallocate block memory");
|
||||
}
|
||||
|
||||
static void makePagesExecutable(uint8_t* mem, size_t size)
|
||||
|
@ -77,7 +77,7 @@ static void makePagesExecutable(uint8_t* mem, size_t size)
|
|||
LUAU_ASSERT(size == alignToPageSize(size));
|
||||
|
||||
if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0)
|
||||
LUAU_ASSERT(!"failed to change page protection");
|
||||
LUAU_ASSERT(!"Failed to change page protection");
|
||||
}
|
||||
|
||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||
|
|
|
@ -79,7 +79,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
|
|||
#if defined(_WIN32) && defined(_M_X64)
|
||||
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to allocate function table");
|
||||
LUAU_ASSERT(!"Failed to allocate function table");
|
||||
return nullptr;
|
||||
}
|
||||
#elif defined(__linux__) || defined(__APPLE__)
|
||||
|
@ -94,7 +94,7 @@ void destroyBlockUnwindInfo(void* context, void* unwindData)
|
|||
{
|
||||
#if defined(_WIN32) && defined(_M_X64)
|
||||
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
|
||||
LUAU_ASSERT(!"failed to deallocate function table");
|
||||
LUAU_ASSERT(!"Failed to deallocate function table");
|
||||
#elif defined(__linux__) || defined(__APPLE__)
|
||||
visitFdeEntries((char*)unwindData, __deregister_frame);
|
||||
#endif
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "Luau/AssemblyBuilderA64.h"
|
||||
#include "Luau/AssemblyBuilderX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "CodeGenA64.h"
|
||||
|
@ -59,6 +58,8 @@ namespace Luau
|
|||
namespace CodeGen
|
||||
{
|
||||
|
||||
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
|
||||
|
||||
static void* gPerfLogContext = nullptr;
|
||||
static PerfLogFn gPerfLogFn = nullptr;
|
||||
|
||||
|
@ -332,9 +333,15 @@ static std::optional<NativeProto> assembleFunction(AssemblyBuilder& build, Nativ
|
|||
return createNativeProto(proto, ir);
|
||||
}
|
||||
|
||||
static NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
return static_cast<NativeState*>(L->global->ecb.context);
|
||||
}
|
||||
|
||||
static void onCloseState(lua_State* L)
|
||||
{
|
||||
destroyNativeState(L);
|
||||
delete getNativeState(L);
|
||||
L->global->ecb = lua_ExecutionCallbacks();
|
||||
}
|
||||
|
||||
static void onDestroyFunction(lua_State* L, Proto* proto)
|
||||
|
@ -342,6 +349,7 @@ static void onDestroyFunction(lua_State* L, Proto* proto)
|
|||
destroyExecData(proto->execdata);
|
||||
proto->execdata = nullptr;
|
||||
proto->exectarget = 0;
|
||||
proto->codeentry = proto->code;
|
||||
}
|
||||
|
||||
static int onEnter(lua_State* L, Proto* proto)
|
||||
|
@ -362,7 +370,7 @@ static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
|
|||
if (!proto->execdata)
|
||||
return;
|
||||
|
||||
LUAU_ASSERT(!"native breakpoints are not implemented");
|
||||
LUAU_ASSERT(!"Native breakpoints are not implemented");
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
@ -430,39 +438,34 @@ void create(lua_State* L)
|
|||
{
|
||||
LUAU_ASSERT(isSupported());
|
||||
|
||||
NativeState& data = *createNativeState(L);
|
||||
std::unique_ptr<NativeState> data = std::make_unique<NativeState>();
|
||||
|
||||
#if defined(_WIN32)
|
||||
data.unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
||||
data->unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
||||
#else
|
||||
data.unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
||||
data->unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
||||
#endif
|
||||
|
||||
data.codeAllocator.context = data.unwindBuilder.get();
|
||||
data.codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
||||
data.codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
||||
data->codeAllocator.context = data->unwindBuilder.get();
|
||||
data->codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
||||
data->codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
||||
|
||||
initFunctions(data);
|
||||
initFunctions(*data);
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
if (!X64::initHeaderFunctions(data))
|
||||
{
|
||||
destroyNativeState(L);
|
||||
if (!X64::initHeaderFunctions(*data))
|
||||
return;
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
if (!A64::initHeaderFunctions(data))
|
||||
{
|
||||
destroyNativeState(L);
|
||||
if (!A64::initHeaderFunctions(*data))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (gPerfLogFn)
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(data.context.gateEntry), 4096, "<luau gate>");
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(data->context.gateEntry), 4096, "<luau gate>");
|
||||
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
lua_ExecutionCallbacks* ecb = &L->global->ecb;
|
||||
|
||||
ecb->context = data.release();
|
||||
ecb->close = onCloseState;
|
||||
ecb->destroy = onDestroyFunction;
|
||||
ecb->enter = onEnter;
|
||||
|
@ -490,7 +493,8 @@ void compile(lua_State* L, int idx)
|
|||
const TValue* func = luaA_toobject(L, idx);
|
||||
|
||||
// If initialization has failed, do not compile any functions
|
||||
if (!getNativeState(L))
|
||||
NativeState* data = getNativeState(L);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
@ -499,8 +503,6 @@ void compile(lua_State* L, int idx)
|
|||
X64::AssemblyBuilderX64 build(/* logText= */ false);
|
||||
#endif
|
||||
|
||||
NativeState* data = getNativeState(L);
|
||||
|
||||
std::vector<Proto*> protos;
|
||||
gatherFunctions(protos, clvalue(func)->l.p);
|
||||
|
||||
|
@ -564,6 +566,7 @@ void compile(lua_State* L, int idx)
|
|||
// the memory is now managed by VM and will be freed via onDestroyFunction
|
||||
result.p->execdata = result.execdata;
|
||||
result.p->exectarget = uintptr_t(codeStart) + result.exectarget;
|
||||
result.p->codeentry = &kCodeEntryInsn;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "BitUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
#include "EmitCommonA64.h"
|
||||
|
||||
|
@ -95,13 +94,14 @@ static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||
build.ldr(x2, mem(rState, offsetof(lua_State, ci))); // L->ci
|
||||
|
||||
// We need to check if the new frame can be executed natively
|
||||
// TOOD: .flags and .savedpc load below can be fused with ldp
|
||||
// TODO: .flags and .savedpc load below can be fused with ldp
|
||||
build.ldr(w3, mem(x2, offsetof(CallInfo, flags)));
|
||||
build.tbz(x3, countrz(LUA_CALLINFO_CUSTOM), helpers.exitContinueVm);
|
||||
build.tbz(x3, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
|
||||
|
||||
build.mov(rClosure, x0);
|
||||
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
|
||||
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
|
||||
|
||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||
|
||||
// Get instruction index from instruction pointer
|
||||
// To get instruction index from instruction pointer, we need to divide byte offset by 4
|
||||
|
@ -145,8 +145,9 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
|
|||
build.mov(rNativeContext, x3);
|
||||
|
||||
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
|
||||
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
|
||||
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
|
||||
|
||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||
|
||||
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
|
||||
build.ldr(x9, mem(x9, offsetof(CallInfo, func))); // L->ci->func
|
||||
|
@ -194,7 +195,7 @@ bool initHeaderFunctions(NativeState& data)
|
|||
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
||||
int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to create entry function");
|
||||
LUAU_ASSERT(!"Failed to create entry function");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#include "CodeGenUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
|
||||
#include "lvm.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
|
@ -268,7 +266,7 @@ Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
|
|||
ci->savedpc = p->code;
|
||||
|
||||
if (LUAU_LIKELY(p->execdata != NULL))
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
|
||||
return ccl;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/AssemblyBuilderX64.h"
|
||||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
#include "EmitCommonX64.h"
|
||||
|
||||
|
@ -160,7 +159,7 @@ bool initHeaderFunctions(NativeState& data)
|
|||
if (!data.codeAllocator.allocate(
|
||||
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to create entry function");
|
||||
LUAU_ASSERT(!"Failed to create entry function");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lobject.h"
|
||||
#include "lstate.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
{
|
||||
|
||||
// Here we define helper functions to wrap interaction with Luau custom execution API so that it works with or without LUA_CUSTOM_EXECUTION
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
|
||||
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
|
||||
{
|
||||
return &L->global->ecb;
|
||||
}
|
||||
|
||||
inline NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
return (NativeState*)ecb->context;
|
||||
}
|
||||
|
||||
inline void setNativeState(lua_State* L, NativeState* nativeState)
|
||||
{
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
ecb->context = nativeState;
|
||||
}
|
||||
|
||||
inline NativeState* createNativeState(lua_State* L)
|
||||
{
|
||||
NativeState* state = new NativeState();
|
||||
setNativeState(L, state);
|
||||
return state;
|
||||
}
|
||||
|
||||
inline void destroyNativeState(lua_State* L)
|
||||
{
|
||||
NativeState* state = getNativeState(L);
|
||||
setNativeState(L, nullptr);
|
||||
delete state;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline void setNativeState(lua_State* L, NativeState* nativeState) {}
|
||||
|
||||
inline NativeState* createNativeState(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline void destroyNativeState(lua_State* L) {}
|
||||
|
||||
#endif
|
||||
|
||||
inline int getOpLength(LuauOpcode op)
|
||||
{
|
||||
switch (op)
|
||||
{
|
||||
case LOP_GETGLOBAL:
|
||||
case LOP_SETGLOBAL:
|
||||
case LOP_GETIMPORT:
|
||||
case LOP_GETTABLEKS:
|
||||
case LOP_SETTABLEKS:
|
||||
case LOP_NAMECALL:
|
||||
case LOP_JUMPIFEQ:
|
||||
case LOP_JUMPIFLE:
|
||||
case LOP_JUMPIFLT:
|
||||
case LOP_JUMPIFNOTEQ:
|
||||
case LOP_JUMPIFNOTLE:
|
||||
case LOP_JUMPIFNOTLT:
|
||||
case LOP_NEWTABLE:
|
||||
case LOP_SETLIST:
|
||||
case LOP_FORGLOOP:
|
||||
case LOP_LOADKX:
|
||||
case LOP_FASTCALL2:
|
||||
case LOP_FASTCALL2K:
|
||||
case LOP_JUMPXEQKNIL:
|
||||
case LOP_JUMPXEQKB:
|
||||
case LOP_JUMPXEQKN:
|
||||
case LOP_JUMPXEQKS:
|
||||
return 2;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
|
@ -119,7 +119,6 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r
|
|||
return emitBuiltinTypeof(regs, build, ra, arg);
|
||||
default:
|
||||
LUAU_ASSERT(!"Missing x64 lowering");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include "Luau/IrData.h"
|
||||
#include "Luau/IrRegAllocX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lgc.h"
|
||||
|
|
|
@ -184,33 +184,6 @@ inline void jumpIfTruthy(AssemblyBuilderX64& build, int ri, Label& target, Label
|
|||
build.jcc(ConditionX64::NotEqual, target); // true if boolean value is 'true'
|
||||
}
|
||||
|
||||
inline void jumpIfNodeKeyTagIsNot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, lua_Type tag, Label& label)
|
||||
{
|
||||
tmp.size = SizeX64::dword;
|
||||
|
||||
build.mov(tmp, luauNodeKeyTag(node));
|
||||
build.and_(tmp, kTKeyTagMask);
|
||||
build.cmp(tmp, tag);
|
||||
build.jcc(ConditionX64::NotEqual, label);
|
||||
}
|
||||
|
||||
inline void jumpIfNodeValueTagIs(AssemblyBuilderX64& build, RegisterX64 node, lua_Type tag, Label& label)
|
||||
{
|
||||
build.cmp(dword[node + offsetof(LuaNode, val) + offsetof(TValue, tt)], tag);
|
||||
build.jcc(ConditionX64::Equal, label);
|
||||
}
|
||||
|
||||
inline void jumpIfNodeKeyNotInExpectedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, OperandX64 expectedKey, Label& label)
|
||||
{
|
||||
jumpIfNodeKeyTagIsNot(build, tmp, node, LUA_TSTRING, label);
|
||||
|
||||
build.mov(tmp, expectedKey);
|
||||
build.cmp(tmp, luauNodeKeyValue(node));
|
||||
build.jcc(ConditionX64::NotEqual, label);
|
||||
|
||||
jumpIfNodeValueTagIs(build, node, LUA_TNIL, label);
|
||||
}
|
||||
|
||||
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, IrCondition cond, Label& label);
|
||||
void jumpOnAnyCmpFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, IrCondition cond, Label& label);
|
||||
|
||||
|
|
|
@ -4,8 +4,10 @@
|
|||
#include "Luau/AssemblyBuilderX64.h"
|
||||
#include "Luau/IrRegAllocX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "EmitCommonX64.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lstate.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -87,8 +89,8 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
|
|||
build.test(rax, rax);
|
||||
build.jcc(ConditionX64::Zero, helpers.continueCallInVm);
|
||||
|
||||
// Mark call frame as custom
|
||||
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
// Mark call frame as native
|
||||
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
|
||||
|
||||
// Switch current constants
|
||||
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
|
||||
|
@ -298,7 +300,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, i
|
|||
|
||||
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
|
||||
|
||||
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
|
||||
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
|
||||
|
||||
// Change constants
|
||||
|
|
|
@ -113,7 +113,7 @@ uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t s
|
|||
}
|
||||
|
||||
// There must be a next use since there is the last use location
|
||||
LUAU_ASSERT(!"failed to find next use");
|
||||
LUAU_ASSERT(!"Failed to find next use");
|
||||
return targetInst.lastUse;
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
|
|||
case IrCmd::CAPTURE:
|
||||
maybeUse(inst.a);
|
||||
|
||||
if (function.boolOp(inst.b))
|
||||
if (function.uintOp(inst.b) == 1)
|
||||
capturedRegs.set(vmRegOp(inst.a), true);
|
||||
break;
|
||||
case IrCmd::SETLIST:
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/IrAnalysis.h"
|
||||
#include "Luau/IrUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "IrTranslation.h"
|
||||
|
||||
#include "lapi.h"
|
||||
|
@ -19,7 +18,7 @@ namespace CodeGen
|
|||
constexpr unsigned kNoAssociatedBlockIndex = ~0u;
|
||||
|
||||
IrBuilder::IrBuilder()
|
||||
: constantMap({IrConstKind::Bool, ~0ull})
|
||||
: constantMap({IrConstKind::Tag, ~0ull})
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -410,8 +409,7 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LUAU_ASSERT(!"unknown instruction");
|
||||
break;
|
||||
LUAU_ASSERT(!"Unknown instruction");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -449,7 +447,7 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
|
|||
if (const uint32_t* newIndex = instRedir.find(op.index))
|
||||
op.index = *newIndex;
|
||||
else
|
||||
LUAU_ASSERT(!"values can only be used if they are defined in the same block");
|
||||
LUAU_ASSERT(!"Values can only be used if they are defined in the same block");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -501,14 +499,6 @@ IrOp IrBuilder::undef()
|
|||
return {IrOpKind::Undef, 0};
|
||||
}
|
||||
|
||||
IrOp IrBuilder::constBool(bool value)
|
||||
{
|
||||
IrConst constant;
|
||||
constant.kind = IrConstKind::Bool;
|
||||
constant.valueBool = value;
|
||||
return constAny(constant, uint64_t(value));
|
||||
}
|
||||
|
||||
IrOp IrBuilder::constInt(int value)
|
||||
{
|
||||
IrConst constant;
|
||||
|
|
|
@ -390,9 +390,6 @@ void toString(std::string& result, IrConst constant)
|
|||
{
|
||||
switch (constant.kind)
|
||||
{
|
||||
case IrConstKind::Bool:
|
||||
append(result, constant.valueBool ? "true" : "false");
|
||||
break;
|
||||
case IrConstKind::Int:
|
||||
append(result, "%di", constant.valueInt);
|
||||
break;
|
||||
|
|
|
@ -96,6 +96,15 @@ static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA6
|
|||
}
|
||||
}
|
||||
|
||||
static void emitAbort(AssemblyBuilderA64& build, Label& abort)
|
||||
{
|
||||
Label skip;
|
||||
build.b(skip);
|
||||
build.setLabel(abort);
|
||||
build.udf();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
|
||||
static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos)
|
||||
{
|
||||
// fallback(L, instruction, base, k)
|
||||
|
@ -256,7 +265,11 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
if (intOp(inst.b) * sizeof(TValue) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
// no offset required
|
||||
}
|
||||
else if (intOp(inst.b) * sizeof(TValue) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
{
|
||||
build.add(inst.regA64, inst.regA64, uint16_t(intOp(inst.b) * sizeof(TValue)));
|
||||
}
|
||||
|
@ -562,7 +575,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
}
|
||||
case IrCmd::JUMP_EQ_TAG:
|
||||
if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
RegisterA64 zr = noreg;
|
||||
|
||||
if (inst.a.kind == IrOpKind::Constant && tagOp(inst.a) == 0)
|
||||
zr = regOp(inst.b);
|
||||
else if (inst.b.kind == IrOpKind::Constant && tagOp(inst.b) == 0)
|
||||
zr = regOp(inst.a);
|
||||
else if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Constant)
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
else if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Inst)
|
||||
build.cmp(regOp(inst.a), regOp(inst.b));
|
||||
|
@ -573,19 +593,33 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
if (isFallthroughBlock(blockOp(inst.d), next))
|
||||
{
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
if (zr != noreg)
|
||||
build.cbz(zr, labelOp(inst.c));
|
||||
else
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
jumpOrFallthrough(blockOp(inst.d), next);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
if (zr != noreg)
|
||||
build.cbnz(zr, labelOp(inst.d));
|
||||
else
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_EQ_INT:
|
||||
LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
||||
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
build.cbz(regOp(inst.a), labelOp(inst.c));
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
||||
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
}
|
||||
jumpOrFallthrough(blockOp(inst.d), next);
|
||||
break;
|
||||
case IrCmd::JUMP_LT_INT:
|
||||
|
@ -871,7 +905,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.add(x2, rBase, uint16_t(vmRegOp(inst.c) * sizeof(TValue)));
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
build.adr(x2, &n, sizeof(n));
|
||||
}
|
||||
|
@ -893,7 +927,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.add(x2, rBase, uint16_t(vmRegOp(inst.c) * sizeof(TValue)));
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
build.adr(x2, &n, sizeof(n));
|
||||
}
|
||||
|
@ -908,25 +942,17 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
case IrCmd::GET_IMPORT:
|
||||
regs.spill(build, index);
|
||||
// luaV_getimport(L, cl->env, k, aux, /* propagatenil= */ false)
|
||||
// luaV_getimport(L, cl->env, k, ra, aux, /* propagatenil= */ false)
|
||||
build.mov(x0, rState);
|
||||
build.ldr(x1, mem(rClosure, offsetof(Closure, env)));
|
||||
build.mov(x2, rConstants);
|
||||
build.mov(w3, uintOp(inst.b));
|
||||
build.mov(w4, 0);
|
||||
build.ldr(x5, mem(rNativeContext, offsetof(NativeContext, luaV_getimport)));
|
||||
build.blr(x5);
|
||||
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
||||
build.mov(w4, uintOp(inst.b));
|
||||
build.mov(w5, 0);
|
||||
build.ldr(x6, mem(rNativeContext, offsetof(NativeContext, luaV_getimport)));
|
||||
build.blr(x6);
|
||||
|
||||
emitUpdateBase(build);
|
||||
|
||||
// setobj2s(L, ra, L->top - 1)
|
||||
build.ldr(x0, mem(rState, offsetof(lua_State, top)));
|
||||
build.sub(x0, x0, sizeof(TValue));
|
||||
build.ldr(q0, x0);
|
||||
build.str(q0, mem(rBase, vmRegOp(inst.a) * sizeof(TValue)));
|
||||
|
||||
// L->top--
|
||||
build.str(x0, mem(rState, offsetof(lua_State, top)));
|
||||
break;
|
||||
case IrCmd::CONCAT:
|
||||
regs.spill(build, index);
|
||||
|
@ -1003,62 +1029,99 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
// note: no emitUpdateBase necessary because prepareFORN does not reallocate stack
|
||||
break;
|
||||
case IrCmd::CHECK_TAG:
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.c));
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
Label& fail = inst.c.kind == IrOpKind::Undef ? abort : labelOp(inst.c);
|
||||
if (tagOp(inst.b) == 0)
|
||||
{
|
||||
build.cbnz(regOp(inst.a), fail);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
build.b(ConditionA64::NotEqual, fail);
|
||||
}
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_READONLY:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
build.ldrb(temp, mem(regOp(inst.a), offsetof(Table, readonly)));
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, metatable)));
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 tempw = castReg(KindA64::w, temp);
|
||||
build.ldr(temp, mem(rClosure, offsetof(Closure, env)));
|
||||
build.ldrb(tempw, mem(temp, offsetof(Table, safeenv)));
|
||||
build.cbz(tempw, labelOp(inst.a));
|
||||
build.cbz(tempw, inst.a.kind == IrOpKind::Undef ? abort : labelOp(inst.a));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
Label& fail = inst.c.kind == IrOpKind::Undef ? abort : labelOp(inst.c);
|
||||
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, sizearray)));
|
||||
|
||||
if (inst.b.kind == IrOpKind::Inst)
|
||||
{
|
||||
build.cmp(temp, regOp(inst.b));
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
if (size_t(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
build.cbz(temp, fail);
|
||||
}
|
||||
else if (size_t(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
{
|
||||
build.cmp(temp, uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
else
|
||||
{
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::w);
|
||||
build.mov(temp2, intOp(inst.b));
|
||||
build.cmp(temp, temp2);
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
}
|
||||
else
|
||||
LUAU_ASSERT(!"Unsupported instruction form");
|
||||
|
||||
build.b(ConditionA64::UnsignedLessEqual, labelOp(inst.c));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
{
|
||||
Label& mismatch = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? labelOp(inst.d) : labelOp(inst.c);
|
||||
Label abort; // used when guard aborts execution
|
||||
const IrOp& mismatchOp = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? inst.d : inst.c;
|
||||
Label& mismatch = mismatchOp.kind == IrOpKind::Undef ? abort : labelOp(mismatchOp);
|
||||
|
||||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||
|
@ -1081,15 +1144,21 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
else if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTagNext));
|
||||
build.lsr(temp, temp, kTKeyTagBits);
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
|
@ -1762,11 +1831,6 @@ uint8_t IrLoweringA64::tagOp(IrOp op) const
|
|||
return function.tagOp(op);
|
||||
}
|
||||
|
||||
bool IrLoweringA64::boolOp(IrOp op) const
|
||||
{
|
||||
return function.boolOp(op);
|
||||
}
|
||||
|
||||
int IrLoweringA64::intOp(IrOp op) const
|
||||
{
|
||||
return function.intOp(op);
|
||||
|
|
|
@ -48,7 +48,6 @@ struct IrLoweringA64
|
|||
// Operand data lookup helpers
|
||||
IrConst constOp(IrOp op) const;
|
||||
uint8_t tagOp(IrOp op) const;
|
||||
bool boolOp(IrOp op) const;
|
||||
int intOp(IrOp op) const;
|
||||
unsigned uintOp(IrOp op) const;
|
||||
double doubleOp(IrOp op) const;
|
||||
|
|
|
@ -575,14 +575,6 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
jumpOnAnyCmpFallback(regs, build, vmRegOp(inst.a), vmRegOp(inst.b), conditionOp(inst.c), labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.e), next);
|
||||
break;
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
{
|
||||
ScopedRegX64 tmp{regs, SizeX64::qword};
|
||||
|
||||
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::TABLE_LEN:
|
||||
{
|
||||
IrCallWrapperX64 callWrap(regs, build, index);
|
||||
|
@ -782,7 +774,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
callGetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
||||
}
|
||||
|
@ -798,7 +790,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
callSetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
||||
}
|
||||
|
@ -817,24 +809,12 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
callWrap.addArgument(SizeX64::qword, rState);
|
||||
callWrap.addArgument(SizeX64::qword, qword[tmp1.release() + offsetof(Closure, env)]);
|
||||
callWrap.addArgument(SizeX64::qword, rConstants);
|
||||
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
||||
callWrap.addArgument(SizeX64::dword, uintOp(inst.b));
|
||||
callWrap.addArgument(SizeX64::dword, 0);
|
||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_getimport)]);
|
||||
|
||||
emitUpdateBase(build);
|
||||
|
||||
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
||||
|
||||
// setobj2s(L, ra, L->top - 1)
|
||||
build.mov(tmp2.reg, qword[rState + offsetof(lua_State, top)]);
|
||||
build.sub(tmp2.reg, sizeof(TValue));
|
||||
|
||||
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
|
||||
build.vmovups(tmp3.reg, xmmword[tmp2.reg]);
|
||||
build.vmovups(luauReg(vmRegOp(inst.a)), tmp3.reg);
|
||||
|
||||
// L->top--
|
||||
build.mov(qword[rState + offsetof(lua_State, top)], tmp2.reg);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CONCAT:
|
||||
|
@ -897,15 +877,15 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
case IrCmd::CHECK_TAG:
|
||||
build.cmp(memRegTagOp(inst.a), tagOp(inst.b));
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.c));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.c);
|
||||
break;
|
||||
case IrCmd::CHECK_READONLY:
|
||||
build.cmp(byte[regOp(inst.a) + offsetof(Table, readonly)], 0);
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.b);
|
||||
break;
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
build.cmp(qword[regOp(inst.a) + offsetof(Table, metatable)], 0);
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.b);
|
||||
break;
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
{
|
||||
|
@ -914,7 +894,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.mov(tmp.reg, sClosure);
|
||||
build.mov(tmp.reg, qword[tmp.reg + offsetof(Closure, env)]);
|
||||
build.cmp(byte[tmp.reg + offsetof(Table, safeenv)], 0);
|
||||
build.jcc(ConditionX64::Equal, labelOp(inst.a));
|
||||
jumpOrAbortOnUndef(ConditionX64::Equal, ConditionX64::NotEqual, inst.a);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
|
@ -925,13 +905,44 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
else
|
||||
LUAU_ASSERT(!"Unsupported instruction form");
|
||||
|
||||
build.jcc(ConditionX64::BelowEqual, labelOp(inst.c));
|
||||
jumpOrAbortOnUndef(ConditionX64::BelowEqual, ConditionX64::NotBelowEqual, inst.c);
|
||||
break;
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
{
|
||||
Label abort; // Used when guard aborts execution
|
||||
const IrOp& mismatchOp = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? inst.d : inst.c;
|
||||
Label& mismatch = mismatchOp.kind == IrOpKind::Undef ? abort : labelOp(mismatchOp);
|
||||
|
||||
ScopedRegX64 tmp{regs, SizeX64::qword};
|
||||
|
||||
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.c));
|
||||
// Check if node key tag is a string
|
||||
build.mov(dwordReg(tmp.reg), luauNodeKeyTag(regOp(inst.a)));
|
||||
build.and_(dwordReg(tmp.reg), kTKeyTagMask);
|
||||
build.cmp(dwordReg(tmp.reg), LUA_TSTRING);
|
||||
build.jcc(ConditionX64::NotEqual, mismatch);
|
||||
|
||||
// Check that node key value matches the expected one
|
||||
build.mov(tmp.reg, luauConstantValue(vmConstOp(inst.b)));
|
||||
build.cmp(tmp.reg, luauNodeKeyValue(regOp(inst.a)));
|
||||
build.jcc(ConditionX64::NotEqual, mismatch);
|
||||
|
||||
// Check that node value is not nil
|
||||
build.cmp(dword[regOp(inst.a) + offsetof(LuaNode, val) + offsetof(TValue, tt)], LUA_TNIL);
|
||||
build.jcc(ConditionX64::Equal, mismatch);
|
||||
|
||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||
{
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
}
|
||||
else if (mismatchOp.kind == IrOpKind::Undef)
|
||||
{
|
||||
Label skip;
|
||||
build.jmp(skip);
|
||||
build.setLabel(abort);
|
||||
build.ud2();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
|
@ -940,7 +951,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
build.mov(tmp.reg, dword[regOp(inst.a) + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext]);
|
||||
build.shr(tmp.reg, kTKeyTagBits);
|
||||
build.jcc(ConditionX64::NotZero, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotZero, ConditionX64::Zero, inst.b);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
|
@ -1356,6 +1367,21 @@ void IrLoweringX64::jumpOrFallthrough(IrBlock& target, IrBlock& next)
|
|||
build.jmp(target.label);
|
||||
}
|
||||
|
||||
void IrLoweringX64::jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInverse, IrOp targetOrUndef)
|
||||
{
|
||||
if (targetOrUndef.kind == IrOpKind::Undef)
|
||||
{
|
||||
Label skip;
|
||||
build.jcc(condInverse, skip);
|
||||
build.ud2();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.jcc(cond, labelOp(targetOrUndef));
|
||||
}
|
||||
}
|
||||
|
||||
OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op)
|
||||
{
|
||||
switch (op.kind)
|
||||
|
@ -1428,11 +1454,6 @@ uint8_t IrLoweringX64::tagOp(IrOp op) const
|
|||
return function.tagOp(op);
|
||||
}
|
||||
|
||||
bool IrLoweringX64::boolOp(IrOp op) const
|
||||
{
|
||||
return function.boolOp(op);
|
||||
}
|
||||
|
||||
int IrLoweringX64::intOp(IrOp op) const
|
||||
{
|
||||
return function.intOp(op);
|
||||
|
|
|
@ -34,6 +34,7 @@ struct IrLoweringX64
|
|||
|
||||
bool isFallthroughBlock(IrBlock target, IrBlock next);
|
||||
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
|
||||
void jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInverse, IrOp targetOrUndef);
|
||||
|
||||
void storeDoubleAsFloat(OperandX64 dst, IrOp src);
|
||||
|
||||
|
@ -45,7 +46,6 @@ struct IrLoweringX64
|
|||
|
||||
IrConst constOp(IrOp op) const;
|
||||
uint8_t tagOp(IrOp op) const;
|
||||
bool boolOp(IrOp op) const;
|
||||
int intOp(IrOp op) const;
|
||||
unsigned uintOp(IrOp op) const;
|
||||
double doubleOp(IrOp op) const;
|
||||
|
|
|
@ -194,7 +194,7 @@ void IrRegAllocX64::preserve(IrInst& inst)
|
|||
else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int)
|
||||
build.mov(dword[sSpillArea + i * 8], inst.regX64);
|
||||
else
|
||||
LUAU_ASSERT(!"unsupported value kind");
|
||||
LUAU_ASSERT(!"Unsupported value kind");
|
||||
|
||||
usedSpillSlots.set(i);
|
||||
|
||||
|
@ -318,7 +318,7 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
|
|||
return i;
|
||||
}
|
||||
|
||||
LUAU_ASSERT(!"nowhere to spill");
|
||||
LUAU_ASSERT(!"Nowhere to spill");
|
||||
return ~0u;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
#include "Luau/IrBuilder.h"
|
||||
#include "Luau/IrUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "IrTranslateBuiltins.h"
|
||||
|
||||
#include "lobject.h"
|
||||
#include "lstate.h"
|
||||
#include "ltm.h"
|
||||
|
||||
namespace Luau
|
||||
|
@ -366,7 +366,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||
result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc);
|
||||
break;
|
||||
default:
|
||||
LUAU_ASSERT(!"unsupported binary op");
|
||||
LUAU_ASSERT(!"Unsupported binary op");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1068,13 +1068,13 @@ void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
switch (type)
|
||||
{
|
||||
case LCT_VAL:
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constBool(false));
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(0));
|
||||
break;
|
||||
case LCT_REF:
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constBool(true));
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(1));
|
||||
break;
|
||||
case LCT_UPVAL:
|
||||
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constBool(false));
|
||||
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0));
|
||||
break;
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "Luau/Bytecode.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ltm.h"
|
||||
|
@ -64,5 +66,38 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos);
|
|||
void translateInstAndX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
|
||||
void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
|
||||
|
||||
inline int getOpLength(LuauOpcode op)
|
||||
{
|
||||
switch (op)
|
||||
{
|
||||
case LOP_GETGLOBAL:
|
||||
case LOP_SETGLOBAL:
|
||||
case LOP_GETIMPORT:
|
||||
case LOP_GETTABLEKS:
|
||||
case LOP_SETTABLEKS:
|
||||
case LOP_NAMECALL:
|
||||
case LOP_JUMPIFEQ:
|
||||
case LOP_JUMPIFLE:
|
||||
case LOP_JUMPIFLT:
|
||||
case LOP_JUMPIFNOTEQ:
|
||||
case LOP_JUMPIFNOTLE:
|
||||
case LOP_JUMPIFNOTLT:
|
||||
case LOP_NEWTABLE:
|
||||
case LOP_SETLIST:
|
||||
case LOP_FORGLOOP:
|
||||
case LOP_LOADKX:
|
||||
case LOP_FASTCALL2:
|
||||
case LOP_FASTCALL2K:
|
||||
case LOP_JUMPXEQKNIL:
|
||||
case LOP_JUMPXEQKB:
|
||||
case LOP_JUMPXEQKN:
|
||||
case LOP_JUMPXEQKS:
|
||||
return 2;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
||||
|
|
|
@ -356,7 +356,11 @@ void applySubstitutions(IrFunction& function, IrOp& op)
|
|||
src.useCount--;
|
||||
|
||||
if (src.useCount == 0)
|
||||
{
|
||||
src.cmd = IrCmd::NOP;
|
||||
removeUse(function, src.a);
|
||||
src.a = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -396,7 +400,7 @@ bool compare(double a, double b, IrCondition cond)
|
|||
case IrCondition::NotGreaterEqual:
|
||||
return !(a >= b);
|
||||
default:
|
||||
LUAU_ASSERT(!"unsupported conidtion");
|
||||
LUAU_ASSERT(!"Unsupported condition");
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "CodeGenUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
#include "lgc.h"
|
||||
|
|
|
@ -38,7 +38,7 @@ struct NativeContext
|
|||
void (*luaV_prepareFORN)(lua_State* L, StkId plimit, StkId pstep, StkId pinit) = nullptr;
|
||||
void (*luaV_gettable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
|
||||
void (*luaV_settable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
|
||||
void (*luaV_getimport)(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil) = nullptr;
|
||||
void (*luaV_getimport)(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil) = nullptr;
|
||||
void (*luaV_concat)(lua_State* L, int total, int last) = nullptr;
|
||||
|
||||
int (*luaH_getn)(Table* t) = nullptr;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <vector>
|
||||
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -57,6 +58,12 @@ struct ConstPropState
|
|||
return 0xff;
|
||||
}
|
||||
|
||||
void updateTag(IrOp op, uint8_t tag)
|
||||
{
|
||||
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
||||
info->tag = tag;
|
||||
}
|
||||
|
||||
void saveTag(IrOp op, uint8_t tag)
|
||||
{
|
||||
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
||||
|
@ -202,7 +209,7 @@ struct ConstPropState
|
|||
if (RegisterLink* link = instLink.find(instOp.index))
|
||||
{
|
||||
// Check that the target register hasn't changed the value
|
||||
if (link->version > regs[link->reg].version)
|
||||
if (link->version < regs[link->reg].version)
|
||||
return nullptr;
|
||||
|
||||
return link;
|
||||
|
@ -619,13 +626,20 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
||||
{
|
||||
if (tag == b)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.c, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
state.saveTag(inst.a, b); // We can assume the tag value going forward
|
||||
state.updateTag(inst.a, b); // We can assume the tag value going forward
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -633,25 +647,46 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
||||
{
|
||||
if (info->knownNotReadonly)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.b, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
info->knownNotReadonly = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
||||
{
|
||||
if (info->knownNoMetatable)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.b, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
info->knownNoMetatable = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
if (state.inSafeEnv)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.a, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
state.inSafeEnv = true;
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_GC:
|
||||
// It is enough to perform a GC check once in a block
|
||||
|
|
|
@ -300,8 +300,9 @@ enum LuauOpcode
|
|||
// A: target register (see FORGLOOP for register layout)
|
||||
LOP_FORGPREP_NEXT,
|
||||
|
||||
// removed in v3
|
||||
LOP_DEP_FORGLOOP_NEXT,
|
||||
// NATIVECALL: start executing new function in native code
|
||||
// this is a pseudo-instruction that is never emitted by bytecode compiler, but can be constructed at runtime to accelerate native code dispatch
|
||||
LOP_NATIVECALL,
|
||||
|
||||
// GETVARARGS: copy variables into the target register from vararg storage for current function
|
||||
// A: target register
|
||||
|
|
|
@ -252,8 +252,7 @@ BuiltinInfo getBuiltinInfo(int bfid)
|
|||
return {-1, -1};
|
||||
|
||||
case LBF_ASSERT:
|
||||
return {-1, -1};
|
||||
; // assert() returns all values when first value is truthy
|
||||
return {-1, -1}; // assert() returns all values when first value is truthy
|
||||
|
||||
case LBF_MATH_ABS:
|
||||
case LBF_MATH_ACOS:
|
||||
|
|
|
@ -25,7 +25,7 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25)
|
|||
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
|
||||
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauCompileLimitInsns, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCompileInlineDefer, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -250,7 +250,7 @@ struct Compiler
|
|||
|
||||
popLocals(0);
|
||||
|
||||
if (FFlag::LuauCompileLimitInsns && bytecode.getInstructionCount() > kMaxInstructionCount)
|
||||
if (bytecode.getInstructionCount() > kMaxInstructionCount)
|
||||
CompileError::raise(func->location, "Exceeded function instruction limit; split the function into parts to compile");
|
||||
|
||||
bytecode.endFunction(uint8_t(stackSize), uint8_t(upvals.size()));
|
||||
|
@ -559,10 +559,19 @@ struct Compiler
|
|||
|
||||
size_t oldLocals = localStack.size();
|
||||
|
||||
// note that we push the frame early; this is needed to block recursive inline attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
std::vector<InlineArg> args;
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
args.reserve(func->args.size);
|
||||
}
|
||||
else
|
||||
{
|
||||
// note that we push the frame early; this is needed to block recursive inline attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
}
|
||||
|
||||
// evaluate all arguments; note that we don't emit code for constant arguments (relying on constant folding)
|
||||
// note that compiler state (variable registers/values) does not change here - we defer that to a separate loop below to handle nested calls
|
||||
for (size_t i = 0; i < func->args.size; ++i)
|
||||
{
|
||||
AstLocal* var = func->args.data[i];
|
||||
|
@ -581,8 +590,16 @@ struct Compiler
|
|||
else
|
||||
LUAU_ASSERT(!"Unexpected expression type");
|
||||
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
pushLocal(func->args.data[j], uint8_t(reg + (j - i)));
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
args.push_back({func->args.data[j], uint8_t(reg + (j - i))});
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
pushLocal(func->args.data[j], uint8_t(reg + (j - i)));
|
||||
}
|
||||
|
||||
// all remaining function arguments have been allocated and assigned to
|
||||
break;
|
||||
|
@ -597,17 +614,26 @@ struct Compiler
|
|||
else
|
||||
bytecode.emitABC(LOP_LOADNIL, reg, 0, 0);
|
||||
|
||||
pushLocal(var, reg);
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, reg});
|
||||
else
|
||||
pushLocal(var, reg);
|
||||
}
|
||||
else if (arg == nullptr)
|
||||
{
|
||||
// since the argument is not mutated, we can simply fold the value into the expressions that need it
|
||||
locstants[var] = {Constant::Type_Nil};
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, kInvalidReg, {Constant::Type_Nil}});
|
||||
else
|
||||
locstants[var] = {Constant::Type_Nil};
|
||||
}
|
||||
else if (const Constant* cv = constants.find(arg); cv && cv->type != Constant::Type_Unknown)
|
||||
{
|
||||
// since the argument is not mutated, we can simply fold the value into the expressions that need it
|
||||
locstants[var] = *cv;
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, kInvalidReg, *cv});
|
||||
else
|
||||
locstants[var] = *cv;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -617,13 +643,20 @@ struct Compiler
|
|||
// if the argument is a local that isn't mutated, we will simply reuse the existing register
|
||||
if (int reg = le ? getExprLocalReg(le) : -1; reg >= 0 && (!lv || !lv->written))
|
||||
{
|
||||
pushLocal(var, uint8_t(reg));
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, uint8_t(reg)});
|
||||
else
|
||||
pushLocal(var, uint8_t(reg));
|
||||
}
|
||||
else
|
||||
{
|
||||
uint8_t temp = allocReg(arg, 1);
|
||||
compileExprTemp(arg, temp);
|
||||
pushLocal(var, temp);
|
||||
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, temp});
|
||||
else
|
||||
pushLocal(var, temp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -635,6 +668,20 @@ struct Compiler
|
|||
compileExprAuto(expr->args.data[i], rsi);
|
||||
}
|
||||
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
// apply all evaluated arguments to the compiler state
|
||||
// note: locals use current startpc for debug info, although some of them have been computed earlier; this is similar to compileStatLocal
|
||||
for (InlineArg& arg : args)
|
||||
if (arg.value.type == Constant::Type_Unknown)
|
||||
pushLocal(arg.local, arg.reg);
|
||||
else
|
||||
locstants[arg.local] = arg.value;
|
||||
|
||||
// the inline frame will be used to compile return statements as well as to reject recursive inlining attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
}
|
||||
|
||||
// fold constant values updated above into expressions in the function body
|
||||
foldConstants(constants, variables, locstants, builtinsFold, func->body);
|
||||
|
||||
|
@ -3747,6 +3794,14 @@ struct Compiler
|
|||
AstExpr* untilCondition;
|
||||
};
|
||||
|
||||
struct InlineArg
|
||||
{
|
||||
AstLocal* local;
|
||||
|
||||
uint8_t reg;
|
||||
Constant value;
|
||||
};
|
||||
|
||||
struct InlineFrame
|
||||
{
|
||||
AstExprFunction* func;
|
||||
|
|
|
@ -113,7 +113,6 @@ target_sources(Luau.CodeGen PRIVATE
|
|||
|
||||
CodeGen/src/BitUtils.h
|
||||
CodeGen/src/ByteUtils.h
|
||||
CodeGen/src/CustomExecUtils.h
|
||||
CodeGen/src/CodeGenUtils.h
|
||||
CodeGen/src/CodeGenA64.h
|
||||
CodeGen/src/CodeGenX64.h
|
||||
|
@ -404,6 +403,7 @@ if(TARGET Luau.UnitTest)
|
|||
tests/TypeInfer.primitives.test.cpp
|
||||
tests/TypeInfer.provisional.test.cpp
|
||||
tests/TypeInfer.refinements.test.cpp
|
||||
tests/TypeInfer.rwprops.test.cpp
|
||||
tests/TypeInfer.singletons.test.cpp
|
||||
tests/TypeInfer.tables.test.cpp
|
||||
tests/TypeInfer.test.cpp
|
||||
|
|
|
@ -31,7 +31,7 @@ Proto* luaF_newproto(lua_State* L)
|
|||
f->source = NULL;
|
||||
f->debugname = NULL;
|
||||
f->debuginsn = NULL;
|
||||
|
||||
f->codeentry = NULL;
|
||||
f->execdata = NULL;
|
||||
f->exectarget = 0;
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ typedef struct Proto
|
|||
TString* debugname;
|
||||
uint8_t* debuginsn; // a copy of code[] array with just opcodes
|
||||
|
||||
const Instruction* codeentry;
|
||||
void* execdata;
|
||||
uintptr_t exectarget;
|
||||
|
||||
|
|
|
@ -219,9 +219,7 @@ lua_State* lua_newstate(lua_Alloc f, void* ud)
|
|||
|
||||
g->cb = lua_Callbacks();
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
g->ecb = lua_ExecutionCallbacks();
|
||||
#endif
|
||||
|
||||
g->gcstats = GCStats();
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ typedef struct CallInfo
|
|||
|
||||
#define LUA_CALLINFO_RETURN (1 << 0) // should the interpreter return after returning from this callinfo? first frame must have this set
|
||||
#define LUA_CALLINFO_HANDLE (1 << 1) // should the error thrown during execution get handled by continuation from this callinfo? func must be C
|
||||
#define LUA_CALLINFO_CUSTOM (1 << 2) // should this function be executed using custom execution callback
|
||||
#define LUA_CALLINFO_NATIVE (1 << 2) // should this function be executed using execution callback for native code
|
||||
|
||||
#define curr_func(L) (clvalue(L->ci->func))
|
||||
#define ci_func(ci) (clvalue((ci)->func))
|
||||
|
@ -211,9 +211,7 @@ typedef struct global_State
|
|||
|
||||
lua_Callbacks cb;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
lua_ExecutionCallbacks ecb;
|
||||
#endif
|
||||
|
||||
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); // for each userdata tag, a gc callback to be called immediately before freeing memory
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ LUAI_FUNC int luaV_tostring(lua_State* L, StkId obj);
|
|||
LUAI_FUNC void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId val);
|
||||
LUAI_FUNC void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val);
|
||||
LUAI_FUNC void luaV_concat(lua_State* L, int total, int last);
|
||||
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_getimport_dep(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit);
|
||||
LUAI_FUNC void luaV_callTM(lua_State* L, int nparams, int res);
|
||||
LUAI_FUNC void luaV_tryfuncTM(lua_State* L, StkId func);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauUniformTopHandling)
|
||||
LUAU_FASTFLAG(LuauGetImportDirect)
|
||||
|
||||
// Disable c99-designator to avoid the warning in CGOTO dispatch table
|
||||
#ifdef __clang__
|
||||
|
@ -101,7 +102,7 @@ LUAU_FASTFLAG(LuauUniformTopHandling)
|
|||
VM_DISPATCH_OP(LOP_CONCAT), VM_DISPATCH_OP(LOP_NOT), VM_DISPATCH_OP(LOP_MINUS), VM_DISPATCH_OP(LOP_LENGTH), VM_DISPATCH_OP(LOP_NEWTABLE), \
|
||||
VM_DISPATCH_OP(LOP_DUPTABLE), VM_DISPATCH_OP(LOP_SETLIST), VM_DISPATCH_OP(LOP_FORNPREP), VM_DISPATCH_OP(LOP_FORNLOOP), \
|
||||
VM_DISPATCH_OP(LOP_FORGLOOP), VM_DISPATCH_OP(LOP_FORGPREP_INEXT), VM_DISPATCH_OP(LOP_DEP_FORGLOOP_INEXT), VM_DISPATCH_OP(LOP_FORGPREP_NEXT), \
|
||||
VM_DISPATCH_OP(LOP_DEP_FORGLOOP_NEXT), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
|
||||
VM_DISPATCH_OP(LOP_NATIVECALL), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
|
||||
VM_DISPATCH_OP(LOP_LOADKX), VM_DISPATCH_OP(LOP_JUMPX), VM_DISPATCH_OP(LOP_FASTCALL), VM_DISPATCH_OP(LOP_COVERAGE), \
|
||||
VM_DISPATCH_OP(LOP_CAPTURE), VM_DISPATCH_OP(LOP_DEP_JUMPIFEQK), VM_DISPATCH_OP(LOP_DEP_JUMPIFNOTEQK), VM_DISPATCH_OP(LOP_FASTCALL1), \
|
||||
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K), VM_DISPATCH_OP(LOP_FORGPREP), VM_DISPATCH_OP(LOP_JUMPXEQKNIL), \
|
||||
|
@ -210,7 +211,7 @@ static void luau_execute(lua_State* L)
|
|||
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if ((L->ci->flags & LUA_CALLINFO_CUSTOM) && !SingleStep)
|
||||
if ((L->ci->flags & LUA_CALLINFO_NATIVE) && !SingleStep)
|
||||
{
|
||||
Proto* p = clvalue(L->ci->func)->l.p;
|
||||
LUAU_ASSERT(p->execdata);
|
||||
|
@ -432,12 +433,20 @@ reentry:
|
|||
{
|
||||
uint32_t aux = *pc++;
|
||||
|
||||
VM_PROTECT(luaV_getimport(L, cl->env, k, aux, /* propagatenil= */ false));
|
||||
ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
if (FFlag::LuauGetImportDirect)
|
||||
{
|
||||
VM_PROTECT(luaV_getimport(L, cl->env, k, ra, aux, /* propagatenil= */ false));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
VM_PROTECT(luaV_getimport_dep(L, cl->env, k, aux, /* propagatenil= */ false));
|
||||
ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
|
||||
setobj2s(L, ra, L->top - 1);
|
||||
L->top--;
|
||||
VM_NEXT();
|
||||
setobj2s(L, ra, L->top - 1);
|
||||
L->top--;
|
||||
VM_NEXT();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -954,21 +963,11 @@ reentry:
|
|||
setnilvalue(argi++); // complete missing arguments
|
||||
L->top = p->is_vararg ? argi : ci->top;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY(p->execdata && !SingleStep))
|
||||
{
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
if (L->global->ecb.enter(L, p) == 1)
|
||||
goto reentry;
|
||||
else
|
||||
goto exit;
|
||||
}
|
||||
#endif
|
||||
|
||||
// reentry
|
||||
pc = p->code;
|
||||
// codeentry may point to NATIVECALL instruction when proto is compiled to native code
|
||||
// this will result in execution continuing in native code, and is equivalent to if (p->execdata) but has no additional overhead
|
||||
// note that p->codeentry may point *outside* of p->code..p->code+p->sizecode, but that pointer never gets saved to savedpc.
|
||||
pc = SingleStep ? p->code : p->codeentry;
|
||||
cl = ccl;
|
||||
base = L->base;
|
||||
k = p->k;
|
||||
|
@ -1055,7 +1054,7 @@ reentry:
|
|||
Proto* nextproto = nextcl->l.p;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_CUSTOM) && !SingleStep))
|
||||
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_NATIVE) && !SingleStep))
|
||||
{
|
||||
if (L->global->ecb.enter(L, nextproto) == 1)
|
||||
goto reentry;
|
||||
|
@ -2380,10 +2379,24 @@ reentry:
|
|||
VM_NEXT();
|
||||
}
|
||||
|
||||
VM_CASE(LOP_DEP_FORGLOOP_NEXT)
|
||||
VM_CASE(LOP_NATIVECALL)
|
||||
{
|
||||
LUAU_ASSERT(!"Unsupported deprecated opcode");
|
||||
Proto* p = cl->l.p;
|
||||
LUAU_ASSERT(p->execdata);
|
||||
|
||||
CallInfo* ci = L->ci;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (L->global->ecb.enter(L, p) == 1)
|
||||
goto reentry;
|
||||
else
|
||||
goto exit;
|
||||
#else
|
||||
LUAU_ASSERT(!"Opcode is only valid when LUA_CUSTOM_EXECUTION is defined");
|
||||
LUAU_UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
|
||||
VM_CASE(LOP_GETVARARGS)
|
||||
|
@ -2896,7 +2909,7 @@ int luau_precall(lua_State* L, StkId func, int nresults)
|
|||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (p->execdata)
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
#endif
|
||||
|
||||
return PCRLUA;
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauGetImportDirect, false)
|
||||
|
||||
// TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens
|
||||
template<typename T>
|
||||
struct TempBuffer
|
||||
|
@ -40,8 +42,45 @@ struct TempBuffer
|
|||
}
|
||||
};
|
||||
|
||||
void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil)
|
||||
void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil)
|
||||
{
|
||||
int count = id >> 30;
|
||||
LUAU_ASSERT(count > 0);
|
||||
|
||||
int id0 = int(id >> 20) & 1023;
|
||||
int id1 = int(id >> 10) & 1023;
|
||||
int id2 = int(id) & 1023;
|
||||
|
||||
// after the first call to luaV_gettable, res may be invalid, and env may (sometimes) be garbage collected
|
||||
// we take care to not use env again and to restore res before every consecutive use
|
||||
ptrdiff_t resp = savestack(L, res);
|
||||
|
||||
// global lookup for id0
|
||||
TValue g;
|
||||
sethvalue(L, &g, env);
|
||||
luaV_gettable(L, &g, &k[id0], res);
|
||||
|
||||
// table lookup for id1
|
||||
if (count < 2)
|
||||
return;
|
||||
|
||||
res = restorestack(L, resp);
|
||||
if (!propagatenil || !ttisnil(res))
|
||||
luaV_gettable(L, res, &k[id1], res);
|
||||
|
||||
// table lookup for id2
|
||||
if (count < 3)
|
||||
return;
|
||||
|
||||
res = restorestack(L, resp);
|
||||
if (!propagatenil || !ttisnil(res))
|
||||
luaV_gettable(L, res, &k[id2], res);
|
||||
}
|
||||
|
||||
void luaV_getimport_dep(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil)
|
||||
{
|
||||
LUAU_ASSERT(!FFlag::LuauGetImportDirect);
|
||||
|
||||
int count = id >> 30;
|
||||
int id0 = count > 0 ? int(id >> 20) & 1023 : -1;
|
||||
int id1 = count > 1 ? int(id >> 10) & 1023 : -1;
|
||||
|
@ -114,7 +153,17 @@ static void resolveImportSafe(lua_State* L, Table* env, TValue* k, uint32_t id)
|
|||
// note: we call getimport with nil propagation which means that accesses to table chains like A.B.C will resolve in nil
|
||||
// this is technically not necessary but it reduces the number of exceptions when loading scripts that rely on getfenv/setfenv for global
|
||||
// injection
|
||||
luaV_getimport(L, L->gt, self->k, self->id, /* propagatenil= */ true);
|
||||
if (FFlag::LuauGetImportDirect)
|
||||
{
|
||||
// allocate a stack slot so that we can do table lookups
|
||||
luaD_checkstack(L, 1);
|
||||
setnilvalue(L->top);
|
||||
L->top++;
|
||||
|
||||
luaV_getimport(L, L->gt, self->k, L->top - 1, self->id, /* propagatenil= */ true);
|
||||
}
|
||||
else
|
||||
luaV_getimport_dep(L, L->gt, self->k, self->id, /* propagatenil= */ true);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -204,6 +253,8 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
|
|||
for (int j = 0; j < p->sizecode; ++j)
|
||||
p->code[j] = read<uint32_t>(data, size, offset);
|
||||
|
||||
p->codeentry = p->code;
|
||||
|
||||
p->sizek = readVarInt(data, size, offset);
|
||||
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
|
||||
|
||||
|
|
|
@ -455,6 +455,11 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Conditionals")
|
|||
SINGLE_COMPARE(cset(x1, ConditionA64::Less), 0x9A9FA7E1);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Undefined")
|
||||
{
|
||||
SINGLE_COMPARE(udf(), 0x00000000);
|
||||
}
|
||||
|
||||
TEST_CASE("LogTest")
|
||||
{
|
||||
AssemblyBuilderA64 build(/* logText= */ true);
|
||||
|
|
|
@ -537,6 +537,7 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXTernaryInstructionForms")
|
|||
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "MiscInstructions")
|
||||
{
|
||||
SINGLE_COMPARE(int3(), 0xcc);
|
||||
SINGLE_COMPARE(ud2(), 0x0f, 0x0b);
|
||||
SINGLE_COMPARE(bsr(eax, edx), 0x0f, 0xbd, 0xc2);
|
||||
SINGLE_COMPARE(bsf(eax, edx), 0x0f, 0xbc, 0xc2);
|
||||
}
|
||||
|
|
|
@ -5796,7 +5796,9 @@ RETURN R3 1
|
|||
|
||||
TEST_CASE("InlineRecurseArguments")
|
||||
{
|
||||
// we can't inline a function if it's used to compute its own arguments
|
||||
ScopedFastFlag sff("LuauCompileInlineDefer", true);
|
||||
|
||||
// the example looks silly but we preserve it verbatim as it was found by fuzzer for a previous version of the compiler
|
||||
CHECK_EQ("\n" + compileFunction(R"(
|
||||
local function foo(a, b)
|
||||
end
|
||||
|
@ -5805,15 +5807,82 @@ foo(foo(foo,foo(foo,foo))[foo])
|
|||
1, 2),
|
||||
R"(
|
||||
DUPCLOSURE R0 K0 ['foo']
|
||||
MOVE R2 R0
|
||||
MOVE R3 R0
|
||||
MOVE R4 R0
|
||||
MOVE R5 R0
|
||||
MOVE R6 R0
|
||||
CALL R4 2 -1
|
||||
CALL R2 -1 1
|
||||
LOADNIL R3
|
||||
LOADNIL R2
|
||||
GETTABLE R1 R2 R0
|
||||
RETURN R0 0
|
||||
)");
|
||||
|
||||
// verify that invocations of the inlined function in any position for computing the arguments to itself compile
|
||||
CHECK_EQ("\n" + compileFunction(R"(
|
||||
local function foo(a, b)
|
||||
return a + b
|
||||
end
|
||||
|
||||
local x, y, z = ...
|
||||
|
||||
return foo(foo(x, y), foo(z, 1))
|
||||
)",
|
||||
1, 2),
|
||||
R"(
|
||||
DUPCLOSURE R0 K0 ['foo']
|
||||
GETVARARGS R1 3
|
||||
ADD R5 R1 R2
|
||||
ADDK R6 R3 K1 [1]
|
||||
ADD R4 R5 R6
|
||||
RETURN R4 1
|
||||
)");
|
||||
|
||||
// verify that invocations of the inlined function in any position for computing the arguments to itself compile, including constants and locals
|
||||
// note that foo(k1, k2) doesn't get constant folded, so there's still actual math emitted for some of the calls below
|
||||
CHECK_EQ("\n" + compileFunction(R"(
|
||||
local function foo(a, b)
|
||||
return a + b
|
||||
end
|
||||
|
||||
local x, y, z = ...
|
||||
|
||||
return
|
||||
foo(foo(1, 2), 3),
|
||||
foo(1, foo(2, 3)),
|
||||
foo(x, foo(2, 3)),
|
||||
foo(x, foo(y, 3)),
|
||||
foo(x, foo(y, z)),
|
||||
foo(x+0, foo(y, z)),
|
||||
foo(x+0, foo(y+0, z)),
|
||||
foo(x+0, foo(y, z+0)),
|
||||
foo(1, foo(x, y))
|
||||
)",
|
||||
1, 2),
|
||||
R"(
|
||||
DUPCLOSURE R0 K0 ['foo']
|
||||
GETVARARGS R1 3
|
||||
LOADN R5 3
|
||||
ADDK R4 R5 K1 [3]
|
||||
LOADN R6 5
|
||||
LOADN R7 1
|
||||
ADD R5 R7 R6
|
||||
LOADN R7 5
|
||||
ADD R6 R1 R7
|
||||
ADDK R8 R2 K1 [3]
|
||||
ADD R7 R1 R8
|
||||
ADD R9 R2 R3
|
||||
ADD R8 R1 R9
|
||||
ADDK R10 R1 K2 [0]
|
||||
ADD R11 R2 R3
|
||||
ADD R9 R10 R11
|
||||
ADDK R11 R1 K2 [0]
|
||||
ADDK R13 R2 K2 [0]
|
||||
ADD R12 R13 R3
|
||||
ADD R10 R11 R12
|
||||
ADDK R12 R1 K2 [0]
|
||||
ADDK R14 R3 K2 [0]
|
||||
ADD R13 R2 R14
|
||||
ADD R11 R12 R13
|
||||
ADD R13 R1 R2
|
||||
LOADN R14 1
|
||||
ADD R12 R14 R13
|
||||
RETURN R4 9
|
||||
)");
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,8 @@ public:
|
|||
static const int tnil = 0;
|
||||
static const int tboolean = 1;
|
||||
static const int tnumber = 3;
|
||||
static const int tstring = 5;
|
||||
static const int ttable = 6;
|
||||
};
|
||||
|
||||
TEST_SUITE_BEGIN("Optimization");
|
||||
|
@ -1286,8 +1288,8 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "IntEqRemoval")
|
|||
IrOp falseBlock = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(block);
|
||||
IrOp value = build.inst(IrCmd::LOAD_INT, build.vmReg(1));
|
||||
build.inst(IrCmd::STORE_INT, build.vmReg(1), build.constInt(5));
|
||||
IrOp value = build.inst(IrCmd::LOAD_INT, build.vmReg(1));
|
||||
build.inst(IrCmd::JUMP_EQ_INT, value, build.constInt(5), trueBlock, falseBlock);
|
||||
|
||||
build.beginBlock(trueBlock);
|
||||
|
@ -1317,8 +1319,8 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumCmpRemoval")
|
|||
IrOp falseBlock = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(block);
|
||||
IrOp value = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(1));
|
||||
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(1), build.constDouble(4.0));
|
||||
IrOp value = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(1));
|
||||
build.inst(IrCmd::JUMP_CMP_NUM, value, build.constDouble(8.0), build.cond(IrCondition::Greater), trueBlock, falseBlock);
|
||||
|
||||
build.beginBlock(trueBlock);
|
||||
|
@ -1551,6 +1553,50 @@ bb_0:
|
|||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "InvalidateReglinkVersion")
|
||||
{
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(2), build.constTag(tstring));
|
||||
IrOp tv2 = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(2));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), tv2);
|
||||
IrOp ft = build.inst(IrCmd::NEW_TABLE);
|
||||
build.inst(IrCmd::STORE_POINTER, build.vmReg(2), ft);
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(2), build.constTag(ttable));
|
||||
IrOp tv1 = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(1));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(0), tv1);
|
||||
IrOp tag = build.inst(IrCmd::LOAD_TAG, build.vmReg(0));
|
||||
build.inst(IrCmd::CHECK_TAG, tag, build.constTag(ttable), fallback);
|
||||
build.inst(IrCmd::RETURN, build.constUint(0));
|
||||
|
||||
build.beginBlock(fallback);
|
||||
build.inst(IrCmd::RETURN, build.constUint(1));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
|
||||
bb_0:
|
||||
STORE_TAG R2, tstring
|
||||
%1 = LOAD_TVALUE R2
|
||||
STORE_TVALUE R1, %1
|
||||
%3 = NEW_TABLE
|
||||
STORE_POINTER R2, %3
|
||||
STORE_TAG R2, ttable
|
||||
STORE_TVALUE R0, %1
|
||||
%8 = LOAD_TAG R0
|
||||
CHECK_TAG %8, ttable, bb_fallback_1
|
||||
RETURN 0u
|
||||
|
||||
bb_fallback_1:
|
||||
RETURN 1u
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
||||
TEST_SUITE_BEGIN("LinearExecutionFlowExtraction");
|
||||
|
@ -2257,7 +2303,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPropagationOfCapturedRegs")
|
|||
IrOp entry = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(entry);
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(0), build.constBool(true));
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(0), build.constUint(1));
|
||||
IrOp op1 = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(0));
|
||||
IrOp op2 = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(0));
|
||||
IrOp sum = build.inst(IrCmd::ADD_NUM, op1, op2);
|
||||
|
@ -2273,7 +2319,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPropagationOfCapturedRegs")
|
|||
|
||||
bb_0:
|
||||
; in regs: R0
|
||||
CAPTURE R0, true
|
||||
CAPTURE R0, 1u
|
||||
%1 = LOAD_DOUBLE R0
|
||||
%2 = LOAD_DOUBLE R0
|
||||
%3 = ADD_NUM %1, %2
|
||||
|
|
|
@ -213,4 +213,36 @@ TEST_CASE_FIXTURE(Fixture, "add_family_at_work")
|
|||
CHECK(toString(result.errors[1]) == "Type family instance Add<string, number> is uninhabited");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "internal_families_raise_errors")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function innerSum(a, b)
|
||||
local _ = a + b
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK(toString(result.errors[0]) == "Type family instance Add<a, b> depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "type_families_inhabited_with_normalization")
|
||||
{
|
||||
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local useGridConfig : any
|
||||
local columns = useGridConfig("columns", {}) or 1
|
||||
local gutter = useGridConfig('gutter', {}) or 0
|
||||
local margin = useGridConfig('margin', {}) or 0
|
||||
return function(frameAbsoluteWidth: number)
|
||||
local cellAbsoluteWidth = (frameAbsoluteWidth - 2 * margin + gutter) / columns - gutter
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
|
|
@ -1922,6 +1922,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
|
|||
{"LuauClonePublicInterfaceLess2", true},
|
||||
{"LuauSubstitutionReentrant", true},
|
||||
{"LuauSubstitutionFixMissingFields", true},
|
||||
{"LuauCloneSkipNonInternalVisit", true},
|
||||
};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
|
@ -1930,13 +1931,10 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
|
|||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(2, result);
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
|
||||
CHECK_MESSAGE(get<CodeTooComplex>(result.errors[0]), "Expected CodeTooComplex but got: " << toString(result.errors[0]));
|
||||
CHECK(Location({1, 17}, {1, 18}) == result.errors[0].location);
|
||||
|
||||
CHECK_MESSAGE(get<UnificationTooComplex>(result.errors[1]), "Expected UnificationTooComplex but got: " << toString(result.errors[1]));
|
||||
CHECK(Location({0, 0}, {4, 4}) == result.errors[1].location);
|
||||
}
|
||||
|
||||
/* We had a bug under DCR where instantiated type packs had a nullptr scope.
|
||||
|
|
|
@ -60,23 +60,44 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "iteration_regression_issue_69967")
|
|||
{
|
||||
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
|
||||
CheckResult result = check(R"(
|
||||
type Iterable = typeof(setmetatable(
|
||||
{},
|
||||
{}::{
|
||||
__iter: (self: Iterable) -> () -> (number, string)
|
||||
}
|
||||
))
|
||||
|
||||
type Iterable = typeof(setmetatable(
|
||||
{},
|
||||
{}::{
|
||||
__iter: (self: Iterable) -> () -> (number, string)
|
||||
}
|
||||
))
|
||||
local t: Iterable
|
||||
|
||||
local t: Iterable
|
||||
for a, b in t do end
|
||||
)");
|
||||
|
||||
for a, b in t do end
|
||||
)");
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
GenericError* ge = get<GenericError>(result.errors[0]);
|
||||
REQUIRE(ge);
|
||||
CHECK_EQ("__iter metamethod must return (next[, table[, state]])", ge->message);
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "iteration_regression_issue_69967_alt")
|
||||
{
|
||||
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
|
||||
CheckResult result = check(R"(
|
||||
type Iterable = typeof(setmetatable(
|
||||
{},
|
||||
{}::{
|
||||
__iter: (self: Iterable) -> () -> (number, string)
|
||||
}
|
||||
))
|
||||
|
||||
local t: Iterable
|
||||
local x, y
|
||||
|
||||
for a, b in t do
|
||||
x = a
|
||||
y = b
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
CHECK_EQ("number", toString(requireType("x")));
|
||||
CHECK_EQ("string", toString(requireType("y")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "for_in_loop")
|
||||
|
@ -777,4 +798,130 @@ TEST_CASE_FIXTURE(Fixture, "iterate_over_free_table")
|
|||
CHECK("Cannot iterate over a table without indexer" == ge->message);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_explore_raycast_minimization")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local testResults = {}
|
||||
for _, testData in pairs(testResults) do
|
||||
end
|
||||
|
||||
table.insert(testResults, {})
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_minimized_fragmented_keys_1")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local function rawpairs(t)
|
||||
return next, t, nil
|
||||
end
|
||||
|
||||
local function getFragmentedKeys(tbl)
|
||||
local _ = rawget(tbl, 0)
|
||||
for _ in rawpairs(tbl) do
|
||||
end
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_minimized_fragmented_keys_2")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local function getFragmentedKeys(tbl)
|
||||
local _ = rawget(tbl, 0)
|
||||
for _ in next, tbl, nil do
|
||||
end
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_minimized_fragmented_keys_3")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local function getFragmentedKeys(tbl)
|
||||
local _ = rawget(tbl, 0)
|
||||
for _ in pairs(tbl) do
|
||||
end
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_fragmented_keys")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local function isIndexKey(k, contiguousLength)
|
||||
return true
|
||||
end
|
||||
|
||||
local function getTableLength(tbl)
|
||||
local length = 1
|
||||
local value = rawget(tbl, length)
|
||||
while value ~= nil do
|
||||
length += 1
|
||||
value = rawget(tbl, length)
|
||||
end
|
||||
return length - 1
|
||||
end
|
||||
|
||||
local function rawpairs(t)
|
||||
return next, t, nil
|
||||
end
|
||||
|
||||
local function getFragmentedKeys(tbl)
|
||||
local keys = {}
|
||||
local keysLength = 0
|
||||
local tableLength = getTableLength(tbl)
|
||||
for key, _ in rawpairs(tbl) do
|
||||
if not isIndexKey(key, tableLength) then
|
||||
keysLength = keysLength + 1
|
||||
keys[keysLength] = key
|
||||
end
|
||||
end
|
||||
return keys, keysLength, tableLength
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_xpath_candidates")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
type Instance = {}
|
||||
local function findCandidates(instances: { Instance }, path: { string })
|
||||
for _, name in ipairs(path) do
|
||||
end
|
||||
return {}
|
||||
end
|
||||
|
||||
local canditates = findCandidates({}, {})
|
||||
for _, canditate in ipairs(canditates) do end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_on_never_gives_never")
|
||||
{
|
||||
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
|
||||
CheckResult result = check(R"(
|
||||
local iter: never
|
||||
local ans
|
||||
for xs in iter do
|
||||
ans = xs
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
CHECK(toString(requireType("ans")) == "never");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
|
|
@ -679,10 +679,9 @@ TEST_CASE_FIXTURE(Fixture, "strict_binary_op_where_lhs_unknown")
|
|||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// TODO: This will eventually entirely go away, but for now the Add
|
||||
// family will ensure there's one less error.
|
||||
LUAU_REQUIRE_ERROR_COUNT(ops.size() - 1, result);
|
||||
CHECK_EQ("Unknown type used in - operation; consider adding a type annotation to 'a'", toString(result.errors[0]));
|
||||
LUAU_REQUIRE_ERROR_COUNT(ops.size(), result);
|
||||
CHECK_EQ("Type family instance Add<a, b> depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time", toString(result.errors[0]));
|
||||
CHECK_EQ("Unknown type used in - operation; consider adding a type annotation to 'a'", toString(result.errors[1]));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
70
tests/TypeInfer.rwprops.test.cpp
Normal file
70
tests/TypeInfer.rwprops.test.cpp
Normal file
|
@ -0,0 +1,70 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#include "Fixture.h"
|
||||
|
||||
#include "doctest.h"
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
using namespace Luau;
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct ReadWriteFixture : Fixture
|
||||
{
|
||||
ScopedFastFlag dcr{"DebugLuauDeferredConstraintResolution", true};
|
||||
|
||||
ReadWriteFixture()
|
||||
: Fixture()
|
||||
{
|
||||
if (!FFlag::DebugLuauReadWriteProperties)
|
||||
return;
|
||||
|
||||
TypeArena* arena = &frontend.globals.globalTypes;
|
||||
NotNull<Scope> globalScope{frontend.globals.globalScope.get()};
|
||||
|
||||
unfreeze(*arena);
|
||||
|
||||
TypeId genericT = arena->addType(GenericType{"T"});
|
||||
|
||||
TypeId readonlyX = arena->addType(TableType{TableState::Sealed, TypeLevel{}, globalScope});
|
||||
getMutable<TableType>(readonlyX)->props["x"] = Property::readonly(genericT);
|
||||
globalScope->addBuiltinTypeBinding("ReadonlyX", TypeFun{{{genericT}}, readonlyX});
|
||||
|
||||
freeze(*arena);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST_SUITE_BEGIN("ReadWriteProperties");
|
||||
|
||||
TEST_CASE_FIXTURE(ReadWriteFixture, "read_from_a_readonly_prop")
|
||||
{
|
||||
if (!FFlag::DebugLuauReadWriteProperties)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f(rx: ReadonlyX<string>)
|
||||
local x = rx.x
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ReadWriteFixture, "write_to_a_readonly_prop")
|
||||
{
|
||||
if (!FFlag::DebugLuauReadWriteProperties)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f(rx: ReadonlyX<string>)
|
||||
rx.x = "hello!" -- error
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
|
@ -20,7 +20,7 @@ struct TryUnifyFixture : Fixture
|
|||
InternalErrorReporter iceHandler;
|
||||
UnifierSharedState unifierState{&iceHandler};
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&unifierState}};
|
||||
Unifier state{NotNull{&normalizer}, Mode::Strict, NotNull{globalScope.get()}, Location{}, Variance::Covariant};
|
||||
Unifier state{NotNull{&normalizer}, NotNull{globalScope.get()}, Location{}, Variance::Covariant};
|
||||
};
|
||||
|
||||
TEST_SUITE_BEGIN("TryUnifyTests");
|
||||
|
|
|
@ -810,7 +810,7 @@ TEST_CASE_FIXTURE(Fixture, "free_options_can_be_unified_together")
|
|||
InternalErrorReporter iceHandler;
|
||||
UnifierSharedState sharedState{&iceHandler};
|
||||
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
|
||||
Unifier u{NotNull{&normalizer}, Mode::Strict, NotNull{scope.get()}, Location{}, Variance::Covariant};
|
||||
Unifier u{NotNull{&normalizer}, NotNull{scope.get()}, Location{}, Variance::Covariant};
|
||||
|
||||
u.tryUnify(option1, option2);
|
||||
|
||||
|
|
|
@ -324,4 +324,16 @@ TEST_CASE_FIXTURE(Fixture, "math_operators_and_never")
|
|||
CHECK_EQ("<a>(nil, a) -> boolean", toString(requireType("mul")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "compare_never")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local function cmp(x: nil, y: number)
|
||||
return x ~= nil and x > y and x < y -- infers boolean | never, which is normalized into boolean
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
CHECK_EQ("(nil, number) -> boolean", toString(requireType("cmp")));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
|
|
@ -14,4 +14,27 @@ assert((function(x, y)
|
|||
return c, b, t, t1, t2
|
||||
end)(5, 10) == 50)
|
||||
|
||||
local function fuzzfail1(...)
|
||||
repeat
|
||||
_ = nil
|
||||
until not {}
|
||||
for _ in ... do
|
||||
for l0=_,_ do
|
||||
end
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local function fuzzFail2()
|
||||
local _
|
||||
do
|
||||
repeat
|
||||
_ = typeof(_),{_=_,}
|
||||
_ = _(_._)
|
||||
until _
|
||||
end
|
||||
end
|
||||
|
||||
assert(pcall(fuzzFail2) == false)
|
||||
|
||||
return('OK')
|
||||
|
|
|
@ -151,8 +151,6 @@ TypeInferFunctions.too_many_arguments_error_location
|
|||
TypeInferFunctions.too_many_return_values_in_parentheses
|
||||
TypeInferFunctions.too_many_return_values_no_function
|
||||
TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_values
|
||||
TypeInferLoops.for_in_loop_with_next
|
||||
TypeInferLoops.for_in_with_generic_next
|
||||
TypeInferLoops.loop_iter_trailing_nil
|
||||
TypeInferLoops.unreachable_code_after_infinite_loop
|
||||
TypeInferModules.do_not_modify_imported_types_5
|
||||
|
@ -175,7 +173,6 @@ TypeInferOperators.unrelated_classes_cannot_be_compared
|
|||
TypeInferOperators.unrelated_primitives_cannot_be_compared
|
||||
TypeInferPrimitives.CheckMethodsOfNumber
|
||||
TypeInferPrimitives.string_index
|
||||
TypeInferUnknownNever.dont_unify_operands_if_one_of_the_operand_is_never_in_any_ordering_operators
|
||||
TypeInferUnknownNever.math_operators_and_never
|
||||
TypePackTests.detect_cyclic_typepacks2
|
||||
TypePackTests.pack_tail_unification_check
|
||||
|
|
Loading…
Reference in a new issue