Sync to upstream/release/591

This commit is contained in:
Andy Friesen 2023-08-18 10:06:29 -07:00
parent 089da9e924
commit 433d966ea8
63 changed files with 1976 additions and 424 deletions

View file

@ -279,8 +279,6 @@ private:
TypeId errorRecoveryType() const;
TypePackId errorRecoveryTypePack() const;
TypeId unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope, bool unifyFreeTypes);
TypePackId anyifyModuleReturnTypePackGenerics(TypePackId tp);
void throwTimeLimitError();

View file

@ -100,6 +100,12 @@ struct FrontendOptions
std::optional<LintOptions> enabledLintWarnings;
std::shared_ptr<FrontendCancellationToken> cancellationToken;
// Time limit for typechecking a single module
std::optional<double> moduleTimeLimitSec;
// When true, some internal complexity limits will be scaled down for modules that miss the limit set by moduleTimeLimitSec
bool applyInternalLimitScaling = false;
};
struct CheckResult

View file

@ -111,6 +111,7 @@ struct Module
LintResult lintResult;
Mode mode;
SourceCode::Type type;
double checkDurationSec = 0.0;
bool timeout = false;
bool cancelled = false;

View file

@ -0,0 +1,63 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Type.h"
#include <vector>
#include <optional>
namespace Luau
{
template<typename A, typename B>
struct TryPair;
class Normalizer;
struct NormalizedType;
struct SubtypingGraph
{
// Did the test succeed?
bool isSubtype = false;
bool isErrorSuppressing = false;
bool normalizationTooComplex = false;
// If so, what constraints are implied by this relation?
// If not, what happened?
SubtypingGraph and_(const SubtypingGraph& other);
SubtypingGraph or_(const SubtypingGraph& other);
static SubtypingGraph and_(const std::vector<SubtypingGraph>& results);
static SubtypingGraph or_(const std::vector<SubtypingGraph>& results);
};
struct Subtyping
{
NotNull<BuiltinTypes> builtinTypes;
NotNull<Normalizer> normalizer;
// TODO cache
// TODO cyclic types
// TODO recursion limits
SubtypingGraph isSubtype(TypeId subTy, TypeId superTy);
SubtypingGraph isSubtype(TypePackId subTy, TypePackId superTy);
private:
template<typename SubTy, typename SuperTy>
SubtypingGraph isSubtype(const TryPair<const SubTy*, const SuperTy*>& pair);
SubtypingGraph isSubtype(TypeId subTy, const UnionType* superUnion);
SubtypingGraph isSubtype(const UnionType* subUnion, TypeId superTy);
SubtypingGraph isSubtype(TypeId subTy, const IntersectionType* superIntersection);
SubtypingGraph isSubtype(const IntersectionType* subIntersection, TypeId superTy);
SubtypingGraph isSubtype(const PrimitiveType* subPrim, const PrimitiveType* superPrim);
SubtypingGraph isSubtype(const SingletonType* subSingleton, const PrimitiveType* superPrim);
SubtypingGraph isSubtype(const SingletonType* subSingleton, const SingletonType* superSingleton);
SubtypingGraph isSubtype(const FunctionType* subFunction, const FunctionType* superFunction);
SubtypingGraph isSubtype(const NormalizedType* subNorm, const NormalizedType* superNorm);
};
} // namespace Luau

View file

@ -733,9 +733,17 @@ struct Type final
using SeenSet = std::set<std::pair<const void*, const void*>>;
bool areEqual(SeenSet& seen, const Type& lhs, const Type& rhs);
enum class FollowOption
{
Normal,
DisableLazyTypeThunks,
};
// Follow BoundTypes until we get to something real
TypeId follow(TypeId t);
TypeId follow(TypeId t, FollowOption followOption);
TypeId follow(TypeId t, const void* context, TypeId (*mapper)(const void*, TypeId));
TypeId follow(TypeId t, FollowOption followOption, const void* context, TypeId (*mapper)(const void*, TypeId));
std::vector<TypeId> flattenIntersection(TypeId ty);
@ -818,6 +826,7 @@ public:
const TypeId optionalNumberType;
const TypeId optionalStringType;
const TypePackId emptyTypePack;
const TypePackId anyTypePack;
const TypePackId neverTypePack;
const TypePackId uninhabitableTypePack;

View file

@ -101,6 +101,30 @@ ErrorSuppression shouldSuppressErrors(NotNull<Normalizer> normalizer, TypeId ty1
*/
ErrorSuppression shouldSuppressErrors(NotNull<Normalizer> normalizer, TypePackId tp1, TypePackId tp2);
// Similar to `std::optional<std::pair<A, B>>`, but whose `sizeof()` is the same as `std::pair<A, B>`
// and cooperates with C++'s `if (auto p = ...)` syntax without the extra fatness of `std::optional`.
template<typename A, typename B>
struct TryPair {
A first;
B second;
operator bool() const
{
return bool(first) && bool(second);
}
};
template<typename A, typename B, typename Ty>
TryPair<const A*, const B*> get2(Ty one, Ty two)
{
const A* a = get<A>(one);
const B* b = get<B>(two);
if (a && b)
return {a, b};
else
return {nullptr, nullptr};
}
template<typename T, typename Ty>
const T* get(std::optional<Ty> ty)
{

View file

@ -13,7 +13,6 @@
#include <utility>
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
LUAU_FASTFLAGVARIABLE(LuauDisableCompletionOutsideQuotes, false)
LUAU_FASTFLAGVARIABLE(LuauAnonymousAutofilled1, false);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteLastTypecheck, false)
LUAU_FASTFLAGVARIABLE(LuauAutocompleteHideSelfArg, false)
@ -1345,7 +1344,7 @@ static std::optional<AutocompleteEntryMap> autocompleteStringParams(const Source
return std::nullopt;
}
if (FFlag::LuauDisableCompletionOutsideQuotes && !nodes.back()->is<AstExprError>())
if (!nodes.back()->is<AstExprError>())
{
if (nodes.back()->location.end == position || nodes.back()->location.begin == position)
{

View file

@ -4,6 +4,7 @@
#include "Luau/NotNull.h"
#include "Luau/RecursionCounter.h"
#include "Luau/TxnLog.h"
#include "Luau/Type.h"
#include "Luau/TypePack.h"
#include "Luau/Unifiable.h"
@ -14,7 +15,7 @@ LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300)
LUAU_FASTFLAGVARIABLE(LuauCloneCyclicUnions, false)
LUAU_FASTFLAGVARIABLE(LuauStacklessTypeClone, false)
LUAU_FASTFLAGVARIABLE(LuauStacklessTypeClone2, false)
LUAU_FASTINTVARIABLE(LuauTypeCloneIterationLimit, 100'000)
namespace Luau
@ -115,6 +116,7 @@ private:
std::optional<TypeId> find(TypeId ty) const
{
ty = follow(ty, FollowOption::DisableLazyTypeThunks);
if (auto it = types->find(ty); it != types->end())
return it->second;
return std::nullopt;
@ -122,6 +124,7 @@ private:
std::optional<TypePackId> find(TypePackId tp) const
{
tp = follow(tp);
if (auto it = packs->find(tp); it != packs->end())
return it->second;
return std::nullopt;
@ -143,24 +146,17 @@ private:
private:
TypeId shallowClone(TypeId ty)
{
// We want to [`Luau::follow`] but without forcing the expansion of [`LazyType`]s.
ty = follow(ty, FollowOption::DisableLazyTypeThunks);
if (auto clone = find(ty))
return *clone;
else if (ty->persistent)
return ty;
// We want to [`Luau::follow`] but without forcing the expansion of [`LazyType`]s.
TypeId target = nullptr;
if (auto bt = get<BoundType>(ty))
target = bt->boundTo;
else if (auto tt = get<TableType>(ty); tt && tt->boundTo)
target = *tt->boundTo;
else
{
target = arena->addType(ty->ty);
asMutable(target)->documentationSymbol = ty->documentationSymbol;
}
TypeId target = arena->addType(ty->ty);
asMutable(target)->documentationSymbol = ty->documentationSymbol;
LUAU_ASSERT(target);
(*types)[ty] = target;
queue.push_back(target);
return target;
@ -168,18 +164,15 @@ private:
TypePackId shallowClone(TypePackId tp)
{
tp = follow(tp);
if (auto clone = find(tp))
return *clone;
else if (tp->persistent)
return tp;
TypePackId target;
if (auto btp = get<BoundTypePack>(tp))
target = btp->boundTo;
else
target = arena->addTypePack(tp->ty);
TypePackId target = arena->addTypePack(tp->ty);
LUAU_ASSERT(target);
(*packs)[tp] = target;
queue.push_back(target);
return target;
@ -883,7 +876,7 @@ TypePackId clone(TypePackId tp, TypeArena& dest, CloneState& cloneState)
if (tp->persistent)
return tp;
if (FFlag::LuauStacklessTypeClone)
if (FFlag::LuauStacklessTypeClone2)
{
TypeCloner2 cloner{NotNull{&dest}, cloneState.builtinTypes, NotNull{&cloneState.seenTypes}, NotNull{&cloneState.seenTypePacks}};
return cloner.clone(tp);
@ -909,7 +902,7 @@ TypeId clone(TypeId typeId, TypeArena& dest, CloneState& cloneState)
if (typeId->persistent)
return typeId;
if (FFlag::LuauStacklessTypeClone)
if (FFlag::LuauStacklessTypeClone2)
{
TypeCloner2 cloner{NotNull{&dest}, cloneState.builtinTypes, NotNull{&cloneState.seenTypes}, NotNull{&cloneState.seenTypePacks}};
return cloner.clone(typeId);
@ -938,7 +931,7 @@ TypeId clone(TypeId typeId, TypeArena& dest, CloneState& cloneState)
TypeFun clone(const TypeFun& typeFun, TypeArena& dest, CloneState& cloneState)
{
if (FFlag::LuauStacklessTypeClone)
if (FFlag::LuauStacklessTypeClone2)
{
TypeCloner2 cloner{NotNull{&dest}, cloneState.builtinTypes, NotNull{&cloneState.seenTypes}, NotNull{&cloneState.seenTypePacks}};

View file

@ -2727,41 +2727,6 @@ TypePackId ConstraintSolver::errorRecoveryTypePack() const
return builtinTypes->errorRecoveryTypePack();
}
TypeId ConstraintSolver::unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope, bool unifyFreeTypes)
{
a = follow(a);
b = follow(b);
if (unifyFreeTypes && (get<FreeType>(a) || get<FreeType>(b)))
{
Unifier u{normalizer, scope, Location{}, Covariant};
u.enableNewSolver();
u.tryUnify(b, a);
if (u.errors.empty())
{
u.log.commit();
return a;
}
else
{
return builtinTypes->errorRecoveryType(builtinTypes->anyType);
}
}
if (*a == *b)
return a;
std::vector<TypeId> types = reduceUnion({a, b});
if (types.empty())
return builtinTypes->neverType;
if (types.size() == 1)
return types[0];
return arena->addType(UnionType{types});
}
TypePackId ConstraintSolver::anyifyModuleReturnTypePackGenerics(TypePackId tp)
{
tp = follow(tp);

View file

@ -148,8 +148,7 @@ declare coroutine: {
resume: <A..., R...>(co: thread, A...) -> (boolean, R...),
running: () -> thread,
status: (co: thread) -> "dead" | "running" | "normal" | "suspended",
-- FIXME: This technically returns a function, but we can't represent this yet.
wrap: <A..., R...>(f: (A...) -> R...) -> any,
wrap: <A..., R...>(f: (A...) -> R...) -> ((A...) -> R...),
yield: <A..., R...>(A...) -> R...,
isyieldable: () -> boolean,
close: (co: thread) -> (boolean, any)

View file

@ -31,10 +31,11 @@ LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTINT(LuauTarjanChildLimit)
LUAU_FASTFLAG(LuauInferInNoCheckMode)
LUAU_FASTFLAGVARIABLE(LuauKnowsTheDataModel3, false)
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100) // TODO: Remove with FFlagLuauTypecheckLimitControls
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
LUAU_FASTFLAGVARIABLE(DebugLuauReadWriteProperties, false)
LUAU_FASTFLAGVARIABLE(LuauTypecheckLimitControls, false)
namespace Luau
{
@ -873,6 +874,14 @@ void Frontend::addBuildQueueItems(std::vector<BuildQueueItem>& items, std::vecto
}
}
static void applyInternalLimitScaling(SourceNode& sourceNode, const ModulePtr module, double limit)
{
if (module->timeout)
sourceNode.autocompleteLimitsMult = sourceNode.autocompleteLimitsMult / 2.0;
else if (module->checkDurationSec < limit / 2.0)
sourceNode.autocompleteLimitsMult = std::min(sourceNode.autocompleteLimitsMult * 2.0, 1.0);
}
void Frontend::checkBuildQueueItem(BuildQueueItem& item)
{
SourceNode& sourceNode = *item.sourceNode;
@ -883,43 +892,85 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
double timestamp = getTimestamp();
const std::vector<RequireCycle>& requireCycles = item.requireCycles;
if (item.options.forAutocomplete)
TypeCheckLimits typeCheckLimits;
if (FFlag::LuauTypecheckLimitControls)
{
double autocompleteTimeLimit = FInt::LuauAutocompleteCheckTimeoutMs / 1000.0;
// The autocomplete typecheck is always in strict mode with DM awareness
// to provide better type information for IDE features
TypeCheckLimits typeCheckLimits;
if (autocompleteTimeLimit != 0.0)
typeCheckLimits.finishTime = TimeTrace::getClock() + autocompleteTimeLimit;
if (item.options.moduleTimeLimitSec)
typeCheckLimits.finishTime = TimeTrace::getClock() + *item.options.moduleTimeLimitSec;
else
typeCheckLimits.finishTime = std::nullopt;
// TODO: This is a dirty ad hoc solution for autocomplete timeouts
// We are trying to dynamically adjust our existing limits to lower total typechecking time under the limit
// so that we'll have type information for the whole file at lower quality instead of a full abort in the middle
if (FInt::LuauTarjanChildLimit > 0)
typeCheckLimits.instantiationChildLimit = std::max(1, int(FInt::LuauTarjanChildLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.instantiationChildLimit = std::nullopt;
if (item.options.applyInternalLimitScaling)
{
if (FInt::LuauTarjanChildLimit > 0)
typeCheckLimits.instantiationChildLimit = std::max(1, int(FInt::LuauTarjanChildLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.instantiationChildLimit = std::nullopt;
if (FInt::LuauTypeInferIterationLimit > 0)
typeCheckLimits.unifierIterationLimit = std::max(1, int(FInt::LuauTypeInferIterationLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.unifierIterationLimit = std::nullopt;
if (FInt::LuauTypeInferIterationLimit > 0)
typeCheckLimits.unifierIterationLimit = std::max(1, int(FInt::LuauTypeInferIterationLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.unifierIterationLimit = std::nullopt;
}
typeCheckLimits.cancellationToken = item.options.cancellationToken;
}
if (item.options.forAutocomplete)
{
double autocompleteTimeLimit = FInt::LuauAutocompleteCheckTimeoutMs / 1000.0;
if (!FFlag::LuauTypecheckLimitControls)
{
// The autocomplete typecheck is always in strict mode with DM awareness
// to provide better type information for IDE features
TypeCheckLimits typeCheckLimits;
if (autocompleteTimeLimit != 0.0)
typeCheckLimits.finishTime = TimeTrace::getClock() + autocompleteTimeLimit;
else
typeCheckLimits.finishTime = std::nullopt;
// TODO: This is a dirty ad hoc solution for autocomplete timeouts
// We are trying to dynamically adjust our existing limits to lower total typechecking time under the limit
// so that we'll have type information for the whole file at lower quality instead of a full abort in the middle
if (FInt::LuauTarjanChildLimit > 0)
typeCheckLimits.instantiationChildLimit = std::max(1, int(FInt::LuauTarjanChildLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.instantiationChildLimit = std::nullopt;
if (FInt::LuauTypeInferIterationLimit > 0)
typeCheckLimits.unifierIterationLimit = std::max(1, int(FInt::LuauTypeInferIterationLimit * sourceNode.autocompleteLimitsMult));
else
typeCheckLimits.unifierIterationLimit = std::nullopt;
typeCheckLimits.cancellationToken = item.options.cancellationToken;
}
// The autocomplete typecheck is always in strict mode with DM awareness to provide better type information for IDE features
ModulePtr moduleForAutocomplete = check(sourceModule, Mode::Strict, requireCycles, environmentScope, /*forAutocomplete*/ true,
/*recordJsonLog*/ false, typeCheckLimits);
double duration = getTimestamp() - timestamp;
if (moduleForAutocomplete->timeout)
sourceNode.autocompleteLimitsMult = sourceNode.autocompleteLimitsMult / 2.0;
else if (duration < autocompleteTimeLimit / 2.0)
sourceNode.autocompleteLimitsMult = std::min(sourceNode.autocompleteLimitsMult * 2.0, 1.0);
if (FFlag::LuauTypecheckLimitControls)
{
moduleForAutocomplete->checkDurationSec = duration;
if (item.options.moduleTimeLimitSec && item.options.applyInternalLimitScaling)
applyInternalLimitScaling(sourceNode, moduleForAutocomplete, *item.options.moduleTimeLimitSec);
}
else
{
if (moduleForAutocomplete->timeout)
sourceNode.autocompleteLimitsMult = sourceNode.autocompleteLimitsMult / 2.0;
else if (duration < autocompleteTimeLimit / 2.0)
sourceNode.autocompleteLimitsMult = std::min(sourceNode.autocompleteLimitsMult * 2.0, 1.0);
}
item.stats.timeCheck += duration;
item.stats.filesStrict += 1;
@ -928,13 +979,29 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
return;
}
TypeCheckLimits typeCheckLimits;
typeCheckLimits.cancellationToken = item.options.cancellationToken;
if (!FFlag::LuauTypecheckLimitControls)
{
typeCheckLimits.cancellationToken = item.options.cancellationToken;
}
ModulePtr module = check(sourceModule, mode, requireCycles, environmentScope, /*forAutocomplete*/ false, item.recordJsonLog, typeCheckLimits);
item.stats.timeCheck += getTimestamp() - timestamp;
if (FFlag::LuauTypecheckLimitControls)
{
double duration = getTimestamp() - timestamp;
module->checkDurationSec = duration;
if (item.options.moduleTimeLimitSec && item.options.applyInternalLimitScaling)
applyInternalLimitScaling(sourceNode, module, *item.options.moduleTimeLimitSec);
item.stats.timeCheck += duration;
}
else
{
item.stats.timeCheck += getTimestamp() - timestamp;
}
item.stats.filesStrict += mode == Mode::Strict;
item.stats.filesNonstrict += mode == Mode::Nonstrict;

View file

@ -2,10 +2,12 @@
#include "Luau/Simplify.h"
#include "Luau/Normalize.h" // TypeIds
#include "Luau/RecursionCounter.h"
#include "Luau/ToString.h"
#include "Luau/TypeArena.h"
#include "Luau/Normalize.h" // TypeIds
#include "Luau/TypeUtils.h"
#include <algorithm>
LUAU_FASTINT(LuauTypeReductionRecursionLimit)
@ -47,14 +49,6 @@ struct TypeSimplifier
TypeId simplify(TypeId ty, DenseHashSet<TypeId>& seen);
};
template<typename A, typename B, typename TID>
static std::pair<const A*, const B*> get2(TID one, TID two)
{
const A* a = get<A>(one);
const B* b = get<B>(two);
return a && b ? std::make_pair(a, b) : std::make_pair(nullptr, nullptr);
}
// Match the exact type false|nil
static bool isFalsyType(TypeId ty)
{

344
Analysis/src/Subtyping.cpp Normal file
View file

@ -0,0 +1,344 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Subtyping.h"
#include "Luau/Common.h"
#include "Luau/Normalize.h"
#include "Luau/Type.h"
#include "Luau/TypePack.h"
#include "Luau/TypeUtils.h"
#include <algorithm>
namespace Luau
{
SubtypingGraph SubtypingGraph::and_(const SubtypingGraph& other)
{
return SubtypingGraph{
isSubtype && other.isSubtype,
// `||` is intentional here, we want to preserve error-suppressing flag.
isErrorSuppressing || other.isErrorSuppressing,
normalizationTooComplex || other.normalizationTooComplex,
};
}
SubtypingGraph SubtypingGraph::or_(const SubtypingGraph& other)
{
return SubtypingGraph{
isSubtype || other.isSubtype,
isErrorSuppressing || other.isErrorSuppressing,
normalizationTooComplex || other.normalizationTooComplex,
};
}
SubtypingGraph SubtypingGraph::and_(const std::vector<SubtypingGraph>& results)
{
SubtypingGraph acc{true, false};
for (const SubtypingGraph& current : results)
acc = acc.and_(current);
return acc;
}
SubtypingGraph SubtypingGraph::or_(const std::vector<SubtypingGraph>& results)
{
SubtypingGraph acc{false, false};
for (const SubtypingGraph& current : results)
acc = acc.or_(current);
return acc;
}
SubtypingGraph Subtyping::isSubtype(TypeId subTy, TypeId superTy)
{
subTy = follow(subTy);
superTy = follow(superTy);
// TODO: Do we care about returning a proof that this is error-suppressing?
// e.g. given `a | error <: a | error` where both operands are pointer equal,
// then should it also carry the information that it's error-suppressing?
// If it should, then `error <: error` should also do the same.
if (subTy == superTy)
return {true};
if (auto superUnion = get<UnionType>(superTy))
return isSubtype(subTy, superUnion);
else if (auto subUnion = get<UnionType>(subTy))
return isSubtype(subUnion, superTy);
else if (auto superIntersection = get<IntersectionType>(superTy))
return isSubtype(subTy, superIntersection);
else if (auto subIntersection = get<IntersectionType>(subTy))
{
SubtypingGraph result = isSubtype(subIntersection, superTy);
if (result.isSubtype || result.isErrorSuppressing || result.normalizationTooComplex)
return result;
else
return isSubtype(normalizer->normalize(subTy), normalizer->normalize(superTy));
}
else if (get<AnyType>(superTy))
return {true}; // This is always true.
else if (get<AnyType>(subTy))
{
// any = unknown | error, so we rewrite this to match.
// As per TAPL: A | B <: T iff A <: T && B <: T
return isSubtype(builtinTypes->unknownType, superTy).and_(isSubtype(builtinTypes->errorType, superTy));
}
else if (auto superUnknown = get<UnknownType>(superTy))
{
LUAU_ASSERT(!get<AnyType>(subTy)); // TODO: replace with ice.
LUAU_ASSERT(!get<UnionType>(subTy)); // TODO: replace with ice.
LUAU_ASSERT(!get<IntersectionType>(subTy)); // TODO: replace with ice.
bool errorSuppressing = get<ErrorType>(subTy);
return {!errorSuppressing, errorSuppressing};
}
else if (get<NeverType>(subTy))
return {true};
else if (get<ErrorType>(superTy))
return {false, true};
else if (get<ErrorType>(subTy))
return {false, true};
else if (auto p = get2<PrimitiveType, PrimitiveType>(subTy, superTy))
return isSubtype(p);
else if (auto p = get2<SingletonType, PrimitiveType>(subTy, superTy))
return isSubtype(p);
else if (auto p = get2<SingletonType, SingletonType>(subTy, superTy))
return isSubtype(p);
else if (auto p = get2<FunctionType, FunctionType>(subTy, superTy))
return isSubtype(p);
return {false};
}
SubtypingGraph Subtyping::isSubtype(TypePackId subTp, TypePackId superTp)
{
subTp = follow(subTp);
superTp = follow(superTp);
auto [subHead, subTail] = flatten(subTp);
auto [superHead, superTail] = flatten(superTp);
const size_t headSize = std::min(subHead.size(), superHead.size());
std::vector<SubtypingGraph> results;
results.reserve(std::max(subHead.size(), superHead.size()) + 1);
// Match head types pairwise
for (size_t i = 0; i < headSize; ++i)
{
results.push_back(isSubtype(subHead[i], superHead[i]));
if (!results.back().isSubtype)
return {false};
}
// Handle mismatched head sizes
if (subHead.size() < superHead.size())
{
if (subTail)
{
if (auto vt = get<VariadicTypePack>(*subTail))
{
for (size_t i = headSize; i < superHead.size(); ++i)
{
results.push_back(isSubtype(vt->ty, superHead[i]));
}
}
else
LUAU_ASSERT(0); // TODO
}
else
return {false};
}
else if (subHead.size() > superHead.size())
{
if (superTail)
{
if (auto vt = get<VariadicTypePack>(*superTail))
{
for (size_t i = headSize; i < subHead.size(); ++i)
{
results.push_back(isSubtype(subHead[i], vt->ty));
}
}
else
LUAU_ASSERT(0); // TODO
}
else
return {false};
}
else
{
// subHead and superHead are the same size. Nothing more must be done.
}
// Handle tails
if (subTail && superTail)
{
if (auto p = get2<VariadicTypePack, VariadicTypePack>(*subTail, *superTail))
{
results.push_back(isSubtype(p.first->ty, p.second->ty));
}
else
LUAU_ASSERT(0); // TODO
}
else if (subTail)
{
if (get<VariadicTypePack>(*subTail))
{
return {false};
}
LUAU_ASSERT(0); // TODO
}
else if (superTail)
{
if (get<VariadicTypePack>(*superTail))
{
/*
* A variadic type pack ...T can be thought of as an infinite union of finite type packs.
* () | (T) | (T, T) | (T, T, T) | ...
*
* And, per TAPL:
* T <: A | B iff T <: A or T <: B
*
* All variadic type packs are therefore supertypes of the empty type pack.
*/
}
else
LUAU_ASSERT(0); // TODO
}
return SubtypingGraph::and_(results);
}
template<typename SubTy, typename SuperTy>
SubtypingGraph Subtyping::isSubtype(const TryPair<const SubTy*, const SuperTy*>& pair)
{
return isSubtype(pair.first, pair.second);
}
/*
* This is much simpler than the Unifier implementation because we don't
* actually care about potential "cross-talk" between union parts that match the
* left side.
*
* In fact, we're very limited in what we can do: If multiple choices match, but
* all of them have non-overlapping constraints, then we're stuck with an "or"
* conjunction of constraints. Solving this in the general case is quite
* difficult.
*
* For example, we cannot dispatch anything from this constraint:
*
* {x: number, y: string} <: {x: number, y: 'a} | {x: 'b, y: string}
*
* From this constraint, we can know that either string <: 'a or number <: 'b,
* but we don't know which!
*
* However:
*
* {x: number, y: string} <: {x: number, y: 'a} | {x: number, y: string}
*
* We can dispatch this constraint because there is no 'or' conjunction. One of
* the arms requires 0 matches.
*
* {x: number, y: string, z: boolean} | {x: number, y: 'a, z: 'b} | {x: number,
* y: string, z: 'b}
*
* Here, we have two matches. One asks for string ~ 'a and boolean ~ 'b. The
* other just asks for boolean ~ 'b. We can dispatch this and only commit
* boolean ~ 'b. This constraint does not teach us anything about 'a.
*/
SubtypingGraph Subtyping::isSubtype(TypeId subTy, const UnionType* superUnion)
{
// As per TAPL: T <: A | B iff T <: A || T <: B
std::vector<SubtypingGraph> subtypings;
for (TypeId ty : superUnion)
subtypings.push_back(isSubtype(subTy, ty));
return SubtypingGraph::or_(subtypings);
}
SubtypingGraph Subtyping::isSubtype(const UnionType* subUnion, TypeId superTy)
{
// As per TAPL: A | B <: T iff A <: T && B <: T
std::vector<SubtypingGraph> subtypings;
for (TypeId ty : subUnion)
subtypings.push_back(isSubtype(ty, superTy));
return SubtypingGraph::and_(subtypings);
}
SubtypingGraph Subtyping::isSubtype(TypeId subTy, const IntersectionType* superIntersection)
{
// As per TAPL: T <: A & B iff T <: A && T <: B
std::vector<SubtypingGraph> subtypings;
for (TypeId ty : superIntersection)
subtypings.push_back(isSubtype(subTy, ty));
return SubtypingGraph::and_(subtypings);
}
SubtypingGraph Subtyping::isSubtype(const IntersectionType* subIntersection, TypeId superTy)
{
// TODO: Semantic subtyping here.
// As per TAPL: A & B <: T iff A <: T || B <: T
std::vector<SubtypingGraph> subtypings;
for (TypeId ty : subIntersection)
subtypings.push_back(isSubtype(ty, superTy));
return SubtypingGraph::or_(subtypings);
}
SubtypingGraph Subtyping::isSubtype(const PrimitiveType* subPrim, const PrimitiveType* superPrim)
{
return {subPrim->type == superPrim->type};
}
SubtypingGraph Subtyping::isSubtype(const SingletonType* subSingleton, const PrimitiveType* superPrim)
{
if (get<StringSingleton>(subSingleton) && superPrim->type == PrimitiveType::String)
return {true};
else if (get<BooleanSingleton>(subSingleton) && superPrim->type == PrimitiveType::Boolean)
return {true};
else
return {false};
}
SubtypingGraph Subtyping::isSubtype(const SingletonType* subSingleton, const SingletonType* superSingleton)
{
return {*subSingleton == *superSingleton};
}
SubtypingGraph Subtyping::isSubtype(const FunctionType* subFunction, const FunctionType* superFunction)
{
SubtypingGraph argResult = isSubtype(superFunction->argTypes, subFunction->argTypes);
SubtypingGraph retResult = isSubtype(subFunction->retTypes, superFunction->retTypes);
return argResult.and_(retResult);
}
SubtypingGraph Subtyping::isSubtype(const NormalizedType* subNorm, const NormalizedType* superNorm)
{
if (!subNorm || !superNorm)
return {false, true, true};
SubtypingGraph result{true};
result = result.and_(isSubtype(subNorm->tops, superNorm->tops));
result = result.and_(isSubtype(subNorm->booleans, superNorm->booleans));
// isSubtype(subNorm->classes, superNorm->classes);
// isSubtype(subNorm->classes, superNorm->tables);
result = result.and_(isSubtype(subNorm->errors, superNorm->errors));
result = result.and_(isSubtype(subNorm->nils, superNorm->nils));
result = result.and_(isSubtype(subNorm->numbers, superNorm->numbers));
result.isSubtype &= Luau::isSubtype(subNorm->strings, superNorm->strings);
// isSubtype(subNorm->strings, superNorm->tables);
result = result.and_(isSubtype(subNorm->threads, superNorm->threads));
// isSubtype(subNorm->tables, superNorm->tables);
// isSubtype(subNorm->tables, superNorm->strings);
// isSubtype(subNorm->tables, superNorm->classes);
// isSubtype(subNorm->functions, superNorm->functions);
// isSubtype(subNorm->tyvars, superNorm->tyvars);
return result;
}
} // namespace Luau

View file

@ -69,14 +69,24 @@ static LUAU_NOINLINE TypeId unwrapLazy(LazyType* ltv)
TypeId follow(TypeId t)
{
return follow(t, nullptr, [](const void*, TypeId t) -> TypeId {
return follow(t, FollowOption::Normal);
}
TypeId follow(TypeId t, FollowOption followOption)
{
return follow(t, followOption, nullptr, [](const void*, TypeId t) -> TypeId {
return t;
});
}
TypeId follow(TypeId t, const void* context, TypeId (*mapper)(const void*, TypeId))
{
auto advance = [context, mapper](TypeId ty) -> std::optional<TypeId> {
return follow(t, FollowOption::Normal, context, mapper);
}
TypeId follow(TypeId t, FollowOption followOption, const void* context, TypeId (*mapper)(const void*, TypeId))
{
auto advance = [followOption, context, mapper](TypeId ty) -> std::optional<TypeId> {
TypeId mapped = mapper(context, ty);
if (auto btv = get<Unifiable::Bound<TypeId>>(mapped))
@ -85,7 +95,7 @@ TypeId follow(TypeId t, const void* context, TypeId (*mapper)(const void*, TypeI
if (auto ttv = get<TableType>(mapped))
return ttv->boundTo;
if (auto ltv = getMutable<LazyType>(mapped))
if (auto ltv = getMutable<LazyType>(mapped); ltv && followOption != FollowOption::DisableLazyTypeThunks)
return unwrapLazy(ltv);
return std::nullopt;
@ -945,6 +955,7 @@ BuiltinTypes::BuiltinTypes()
, truthyType(arena->addType(Type{NegationType{falsyType}, /*persistent*/ true}))
, optionalNumberType(arena->addType(Type{UnionType{{numberType, nilType}}, /*persistent*/ true}))
, optionalStringType(arena->addType(Type{UnionType{{stringType, nilType}}, /*persistent*/ true}))
, emptyTypePack(arena->addTypePack(TypePackVar{TypePack{{}}, /*persistent*/ true}))
, anyTypePack(arena->addTypePack(TypePackVar{VariadicTypePack{anyType}, /*persistent*/ true}))
, neverTypePack(arena->addTypePack(TypePackVar{VariadicTypePack{neverType}, /*persistent*/ true}))
, uninhabitableTypePack(arena->addTypePack(TypePackVar{TypePack{{neverType}, neverTypePack}, /*persistent*/ true}))

View file

@ -26,6 +26,7 @@ project(Luau LANGUAGES CXX C)
add_library(Luau.Common INTERFACE)
add_library(Luau.Ast STATIC)
add_library(Luau.Compiler STATIC)
add_library(Luau.Config STATIC)
add_library(Luau.Analysis STATIC)
add_library(Luau.CodeGen STATIC)
add_library(Luau.VM STATIC)
@ -71,9 +72,13 @@ target_compile_features(Luau.Compiler PUBLIC cxx_std_17)
target_include_directories(Luau.Compiler PUBLIC Compiler/include)
target_link_libraries(Luau.Compiler PUBLIC Luau.Ast)
target_compile_features(Luau.Config PUBLIC cxx_std_17)
target_include_directories(Luau.Config PUBLIC Config/include)
target_link_libraries(Luau.Config PUBLIC Luau.Ast)
target_compile_features(Luau.Analysis PUBLIC cxx_std_17)
target_include_directories(Luau.Analysis PUBLIC Analysis/include)
target_link_libraries(Luau.Analysis PUBLIC Luau.Ast)
target_link_libraries(Luau.Analysis PUBLIC Luau.Ast Luau.Config)
target_compile_features(Luau.CodeGen PRIVATE cxx_std_17)
target_include_directories(Luau.CodeGen PUBLIC CodeGen/include)

View file

@ -4,6 +4,7 @@
#include "Luau/Common.h"
#include <bitset>
#include <queue>
#include <utility>
#include <vector>
@ -96,6 +97,46 @@ struct CfgInfo
void computeCfgImmediateDominators(IrFunction& function);
void computeCfgDominanceTreeChildren(IrFunction& function);
struct IdfContext
{
struct BlockAndOrdering
{
uint32_t blockIdx;
BlockOrdering ordering;
bool operator<(const BlockAndOrdering& rhs) const
{
if (ordering.depth != rhs.ordering.depth)
return ordering.depth < rhs.ordering.depth;
return ordering.preOrder < rhs.ordering.preOrder;
}
};
// Using priority queue to work on nodes in the order from the bottom of the dominator tree to the top
// If the depth of keys is equal, DFS order is used to provide strong ordering
std::priority_queue<BlockAndOrdering> queue;
std::vector<uint32_t> worklist;
struct IdfVisitMarks
{
bool seenInQueue = false;
bool seenInWorklist = false;
};
std::vector<IdfVisitMarks> visits;
std::vector<uint32_t> idf;
};
// Compute iterated dominance frontier (IDF or DF+) for a variable, given the set of blocks where that variable is defined
// Providing a set of blocks where the variable is a live-in at the entry helps produce a pruned SSA form (inserted phi nodes will not be dead)
//
// 'Iterated' comes from the definition where we recompute the IDFn+1 = DF(S) while adding IDFn to S until a fixed point is reached
// Iterated dominance frontier has been shown to be equal to the set of nodes where phi instructions have to be inserted
void computeIteratedDominanceFrontierForDefs(
IdfContext& ctx, const IrFunction& function, const std::vector<uint32_t>& defBlocks, const std::vector<uint32_t>& liveInBlocks);
// Function used to update all CFG data
void computeCfgInfo(IrFunction& function);

View file

@ -53,12 +53,9 @@ enum class IrCmd : uint8_t
// Load a TValue from memory
// A: Rn or Kn or pointer (TValue)
// B: int (optional 'A' pointer offset)
LOAD_TVALUE,
// Load a TValue from table node value
// A: pointer (LuaNode)
LOAD_NODE_VALUE_TV, // TODO: we should find a way to generalize LOAD_TVALUE
// Load current environment table
LOAD_ENV,
@ -113,12 +110,15 @@ enum class IrCmd : uint8_t
// Store a TValue into memory
// A: Rn or pointer (TValue)
// B: TValue
// C: int (optional 'A' pointer offset)
STORE_TVALUE,
// Store a TValue into table node value
// A: pointer (LuaNode)
// B: TValue
STORE_NODE_VALUE_TV, // TODO: we should find a way to generalize STORE_TVALUE
// Store a pair of tag and value into memory
// A: Rn or pointer (TValue)
// B: tag (must be a constant)
// C: int/double/pointer
// D: int (optional 'A' pointer offset)
STORE_SPLIT_TVALUE,
// Add/Sub two integers together
// A, B: int
@ -356,6 +356,7 @@ enum class IrCmd : uint8_t
// Store TValue from stack slot into a function upvalue
// A: UPn
// B: Rn
// C: tag/undef (tag of the value that was written)
SET_UPVALUE,
// Convert TValues into numbers for a numerical for loop

View file

@ -145,7 +145,6 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
case IrCmd::LOAD_NODE_VALUE_TV:
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
case IrCmd::GET_SLOT_NODE_ADDR:

View file

@ -225,6 +225,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
build.stp(x19, x20, mem(sp, 16));
build.stp(x21, x22, mem(sp, 32));
build.stp(x23, x24, mem(sp, 48));
build.str(x25, mem(sp, 64));
build.mov(x29, sp); // this is only necessary if we maintain frame pointers, which we do in the JIT for now
@ -235,6 +236,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
// Setup native execution environment
build.mov(rState, x0);
build.mov(rNativeContext, x3);
build.ldr(rGlobalState, mem(x0, offsetof(lua_State, global)));
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
@ -252,6 +254,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
locations.epilogueStart = build.setLabel();
// Cleanup and exit
build.ldr(x25, mem(sp, 64));
build.ldp(x23, x24, mem(sp, 48));
build.ldp(x21, x22, mem(sp, 32));
build.ldp(x19, x20, mem(sp, 16));
@ -262,7 +265,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
// Our entry function is special, it spans the whole remaining code area
unwind.startFunction();
unwind.prologueA64(prologueSize, kStackSize, {x29, x30, x19, x20, x21, x22, x23, x24});
unwind.prologueA64(prologueSize, kStackSize, {x29, x30, x19, x20, x21, x22, x23, x24, x25});
unwind.finishFunction(build.getLabelOffset(locations.start), kFullBlockFuncton);
return locations;

View file

@ -194,7 +194,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
}
}
lowering.finishBlock();
lowering.finishBlock(block, nextBlock);
if (options.includeIr)
build.logAppend("#\n");

View file

@ -31,23 +31,24 @@ namespace A64
// 1. Constant registers (only loaded during codegen entry)
constexpr RegisterA64 rState = x19; // lua_State* L
constexpr RegisterA64 rNativeContext = x20; // NativeContext* context
constexpr RegisterA64 rGlobalState = x21; // global_State* L->global
// 2. Frame registers (reloaded when call frame changes; rBase is also reloaded after all calls that may reallocate stack)
constexpr RegisterA64 rConstants = x21; // TValue* k
constexpr RegisterA64 rClosure = x22; // Closure* cl
constexpr RegisterA64 rCode = x23; // Instruction* code
constexpr RegisterA64 rBase = x24; // StkId base
constexpr RegisterA64 rConstants = x22; // TValue* k
constexpr RegisterA64 rClosure = x23; // Closure* cl
constexpr RegisterA64 rCode = x24; // Instruction* code
constexpr RegisterA64 rBase = x25; // StkId base
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
// See CodeGenA64.cpp for layout
constexpr unsigned kStashSlots = 8; // stashed non-volatile registers
constexpr unsigned kStashSlots = 9; // stashed non-volatile registers
constexpr unsigned kTempSlots = 1; // 8 bytes of temporary space, such luxury!
constexpr unsigned kSpillSlots = 22; // slots for spilling temporary registers
constexpr unsigned kTempSlots = 2; // 16 bytes of temporary space, such luxury!
constexpr unsigned kStackSize = (kStashSlots + kSpillSlots + kTempSlots) * 8;
constexpr unsigned kStackSize = (kStashSlots + kTempSlots + kSpillSlots) * 8;
constexpr AddressA64 sSpillArea = mem(sp, kStashSlots * 8);
constexpr AddressA64 sTemporary = mem(sp, (kStashSlots + kSpillSlots) * 8);
constexpr AddressA64 sSpillArea = mem(sp, (kStashSlots + kTempSlots) * 8);
constexpr AddressA64 sTemporary = mem(sp, kStashSlots * 8);
inline void emitUpdateBase(AssemblyBuilderA64& build)
{

View file

@ -114,11 +114,6 @@ inline OperandX64 luauNodeKeyTag(RegisterX64 node)
return dword[node + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext];
}
inline OperandX64 luauNodeValue(RegisterX64 node)
{
return xmmword[node + offsetof(LuaNode, val)];
}
inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);

View file

@ -209,6 +209,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrBlock&
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
case IrCmd::STORE_SPLIT_TVALUE:
visitor.maybeDef(inst.a); // Argument can also be a pointer value
break;
case IrCmd::CMP_ANY:
@ -897,6 +898,79 @@ void computeCfgDominanceTreeChildren(IrFunction& function)
computeBlockOrdering<domChildren>(function, info.domOrdering, /* preOrder */ nullptr, /* postOrder */ nullptr);
}
// This algorithm is based on 'A Linear Time Algorithm for Placing Phi-Nodes' [Vugranam C.Sreedhar]
// It uses the optimized form from LLVM that relies an implicit DJ-graph (join edges are edges of the CFG that are not part of the dominance tree)
void computeIteratedDominanceFrontierForDefs(
IdfContext& ctx, const IrFunction& function, const std::vector<uint32_t>& defBlocks, const std::vector<uint32_t>& liveInBlocks)
{
LUAU_ASSERT(!function.cfg.domOrdering.empty());
LUAU_ASSERT(ctx.queue.empty());
LUAU_ASSERT(ctx.worklist.empty());
ctx.idf.clear();
ctx.visits.clear();
ctx.visits.resize(function.blocks.size());
for (uint32_t defBlock : defBlocks)
{
const BlockOrdering& ordering = function.cfg.domOrdering[defBlock];
ctx.queue.push({defBlock, ordering});
}
while (!ctx.queue.empty())
{
IdfContext::BlockAndOrdering root = ctx.queue.top();
ctx.queue.pop();
LUAU_ASSERT(ctx.worklist.empty());
ctx.worklist.push_back(root.blockIdx);
ctx.visits[root.blockIdx].seenInWorklist = true;
while (!ctx.worklist.empty())
{
uint32_t blockIdx = ctx.worklist.back();
ctx.worklist.pop_back();
// Check if successor node is the node where dominance of the current root ends, making it a part of dominance frontier set
for (uint32_t succIdx : successors(function.cfg, blockIdx))
{
const BlockOrdering& succOrdering = function.cfg.domOrdering[succIdx];
// Nodes in the DF of root always have a level that is less than or equal to the level of root
if (succOrdering.depth > root.ordering.depth)
continue;
if (ctx.visits[succIdx].seenInQueue)
continue;
ctx.visits[succIdx].seenInQueue = true;
// Skip successor block if it doesn't have our variable as a live in there
if (std::find(liveInBlocks.begin(), liveInBlocks.end(), succIdx) == liveInBlocks.end())
continue;
ctx.idf.push_back(succIdx);
// If block doesn't have its own definition of the variable, add it to the queue
if (std::find(defBlocks.begin(), defBlocks.end(), succIdx) == defBlocks.end())
ctx.queue.push({succIdx, succOrdering});
}
// Add dominance tree children that haven't been processed yet to the worklist
for (uint32_t domChildIdx : domChildren(function.cfg, blockIdx))
{
if (ctx.visits[domChildIdx].seenInWorklist)
continue;
ctx.visits[domChildIdx].seenInWorklist = true;
ctx.worklist.push_back(domChildIdx);
}
}
}
}
void computeCfgInfo(IrFunction& function)
{
computeCfgBlockEdges(function);

View file

@ -1,6 +1,8 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrBuilder.h"
#include "Luau/Bytecode.h"
#include "Luau/BytecodeUtils.h"
#include "Luau/IrData.h"
#include "Luau/IrUtils.h"

View file

@ -89,8 +89,6 @@ const char* getCmdName(IrCmd cmd)
return "LOAD_INT";
case IrCmd::LOAD_TVALUE:
return "LOAD_TVALUE";
case IrCmd::LOAD_NODE_VALUE_TV:
return "LOAD_NODE_VALUE_TV";
case IrCmd::LOAD_ENV:
return "LOAD_ENV";
case IrCmd::GET_ARR_ADDR:
@ -113,8 +111,8 @@ const char* getCmdName(IrCmd cmd)
return "STORE_VECTOR";
case IrCmd::STORE_TVALUE:
return "STORE_TVALUE";
case IrCmd::STORE_NODE_VALUE_TV:
return "STORE_NODE_VALUE_TV";
case IrCmd::STORE_SPLIT_TVALUE:
return "STORE_SPLIT_TVALUE";
case IrCmd::ADD_INT:
return "ADD_INT";
case IrCmd::SUB_INT:

View file

@ -122,6 +122,7 @@ static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos)
static void emitInvokeLibm1P(AssemblyBuilderA64& build, size_t func, int arg)
{
LUAU_ASSERT(kTempSlots >= 1);
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
build.add(x0, sp, sTemporary.data); // sp-relative offset
build.ldr(x1, mem(rNativeContext, uint32_t(func)));
@ -224,16 +225,12 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
case IrCmd::LOAD_TVALUE:
{
inst.regA64 = regs.allocReg(KindA64::q, index);
AddressA64 addr = tempAddr(inst.a, 0);
int addrOffset = inst.b.kind != IrOpKind::None ? intOp(inst.b) : 0;
AddressA64 addr = tempAddr(inst.a, addrOffset);
build.ldr(inst.regA64, addr);
break;
}
case IrCmd::LOAD_NODE_VALUE_TV:
{
inst.regA64 = regs.allocReg(KindA64::q, index);
build.ldr(inst.regA64, mem(regOp(inst.a), offsetof(LuaNode, val)));
break;
}
case IrCmd::LOAD_ENV:
inst.regA64 = regs.allocReg(KindA64::x, index);
build.ldr(inst.regA64, mem(rClosure, offsetof(Closure, env)));
@ -322,10 +319,17 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
}
case IrCmd::STORE_TAG:
{
RegisterA64 temp = regs.allocTemp(KindA64::w);
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, tt));
build.mov(temp, tagOp(inst.b));
build.str(temp, addr);
if (tagOp(inst.b) == 0)
{
build.str(wzr, addr);
}
else
{
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.mov(temp, tagOp(inst.b));
build.str(temp, addr);
}
break;
}
case IrCmd::STORE_POINTER:
@ -343,9 +347,16 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
}
case IrCmd::STORE_INT:
{
RegisterA64 temp = tempInt(inst.b);
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value));
build.str(temp, addr);
if (inst.b.kind == IrOpKind::Constant && intOp(inst.b) == 0)
{
build.str(wzr, addr);
}
else
{
RegisterA64 temp = tempInt(inst.b);
build.str(temp, addr);
}
break;
}
case IrCmd::STORE_VECTOR:
@ -368,13 +379,48 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
}
case IrCmd::STORE_TVALUE:
{
AddressA64 addr = tempAddr(inst.a, 0);
int addrOffset = inst.c.kind != IrOpKind::None ? intOp(inst.c) : 0;
AddressA64 addr = tempAddr(inst.a, addrOffset);
build.str(regOp(inst.b), addr);
break;
}
case IrCmd::STORE_NODE_VALUE_TV:
build.str(regOp(inst.b), mem(regOp(inst.a), offsetof(LuaNode, val)));
case IrCmd::STORE_SPLIT_TVALUE:
{
int addrOffset = inst.d.kind != IrOpKind::None ? intOp(inst.d) : 0;
RegisterA64 tempt = regs.allocTemp(KindA64::w);
AddressA64 addrt = tempAddr(inst.a, offsetof(TValue, tt) + addrOffset);
build.mov(tempt, tagOp(inst.b));
build.str(tempt, addrt);
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value) + addrOffset);
if (tagOp(inst.b) == LUA_TBOOLEAN)
{
if (inst.c.kind == IrOpKind::Constant)
{
// note: we reuse tag temp register as value for true booleans, and use built-in zero register for false values
LUAU_ASSERT(LUA_TBOOLEAN == 1);
build.str(intOp(inst.c) ? tempt : wzr, addr);
}
else
build.str(regOp(inst.c), addr);
}
else if (tagOp(inst.b) == LUA_TNUMBER)
{
RegisterA64 temp = tempDouble(inst.c);
build.str(temp, addr);
}
else if (isGCO(tagOp(inst.b)))
{
build.str(regOp(inst.c), addr);
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
}
case IrCmd::ADD_INT:
inst.regA64 = regs.allocReuse(KindA64::w, index, {inst.a, inst.b});
if (inst.b.kind == IrOpKind::Constant && unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate)
@ -705,15 +751,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, luaH_getn)));
build.blr(x1);
inst.regA64 = regs.allocReg(KindA64::d, index);
build.scvtf(inst.regA64, x0);
build.scvtf(inst.regA64, w0);
break;
}
case IrCmd::STRING_LEN:
{
RegisterA64 reg = regOp(inst.a);
inst.regA64 = regs.allocReg(KindA64::w, index);
build.ldr(inst.regA64, mem(reg, offsetof(TString, len)));
build.ldr(inst.regA64, mem(regOp(inst.a), offsetof(TString, len)));
break;
}
case IrCmd::NEW_TABLE:
@ -774,8 +819,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
regs.spill(build, index, {temp1});
build.mov(x0, temp1);
build.mov(w1, intOp(inst.b));
build.ldr(x2, mem(rState, offsetof(lua_State, global)));
build.ldr(x2, mem(x2, offsetof(global_State, tmname) + intOp(inst.b) * sizeof(TString*)));
build.ldr(x2, mem(rGlobalState, offsetof(global_State, tmname) + intOp(inst.b) * sizeof(TString*)));
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaT_gettm)));
build.blr(x3);
@ -972,8 +1016,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
case IrCmd::CONCAT:
regs.spill(build, index);
build.mov(x0, rState);
build.mov(x1, uintOp(inst.b));
build.mov(x2, vmRegOp(inst.a) + uintOp(inst.b) - 1);
build.mov(w1, uintOp(inst.b));
build.mov(w2, vmRegOp(inst.a) + uintOp(inst.b) - 1);
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaV_concat)));
build.blr(x3);
@ -1016,21 +1060,24 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.ldr(temp3, mem(rBase, vmRegOp(inst.b) * sizeof(TValue)));
build.str(temp3, temp2);
Label skip;
checkObjectBarrierConditions(build, temp1, temp2, vmRegOp(inst.b), /* ratag */ -1, skip);
if (inst.c.kind == IrOpKind::Undef || isGCO(tagOp(inst.c)))
{
Label skip;
checkObjectBarrierConditions(build, temp1, temp2, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
size_t spills = regs.spill(build, index, {temp1});
size_t spills = regs.spill(build, index, {temp1});
build.mov(x1, temp1);
build.mov(x0, rState);
build.ldr(x2, mem(rBase, vmRegOp(inst.b) * sizeof(TValue) + offsetof(TValue, value)));
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaC_barrierf)));
build.blr(x3);
build.mov(x1, temp1);
build.mov(x0, rState);
build.ldr(x2, mem(rBase, vmRegOp(inst.b) * sizeof(TValue) + offsetof(TValue, value)));
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaC_barrierf)));
build.blr(x3);
regs.restore(build, spills); // need to restore before skip so that registers are in a consistent state
regs.restore(build, spills); // need to restore before skip so that registers are in a consistent state
// note: no emitUpdateBase necessary because luaC_ barriers do not reallocate stack
build.setLabel(skip);
// note: no emitUpdateBase necessary because luaC_ barriers do not reallocate stack
build.setLabel(skip);
}
break;
}
case IrCmd::PREPARE_FORN:
@ -1213,8 +1260,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
Label self;
build.ldr(x0, mem(rState, offsetof(lua_State, global)));
build.ldr(x0, mem(x0, offsetof(global_State, cb.interrupt)));
build.ldr(x0, mem(rGlobalState, offsetof(global_State, cb.interrupt)));
build.cbnz(x0, self);
Label next = build.setLabel();
@ -1227,11 +1273,9 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
LUAU_ASSERT(offsetof(global_State, totalbytes) == offsetof(global_State, GCthreshold) + 8);
Label skip;
build.ldr(temp1, mem(rState, offsetof(lua_State, global)));
// TODO: totalbytes and GCthreshold loads can be fused with ldp
build.ldr(temp2, mem(temp1, offsetof(global_State, totalbytes)));
build.ldr(temp1, mem(temp1, offsetof(global_State, GCthreshold)));
build.ldp(temp1, temp2, mem(rGlobalState, offsetof(global_State, GCthreshold)));
build.cmp(temp1, temp2);
build.b(ConditionA64::UnsignedGreater, skip);
@ -1239,8 +1283,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.mov(x0, rState);
build.mov(w1, 1);
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, luaC_step)));
build.blr(x1);
build.ldr(x2, mem(rNativeContext, offsetof(NativeContext, luaC_step)));
build.blr(x2);
emitUpdateBase(build);
@ -1450,9 +1494,9 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
// clear extra variables since we might have more than two
if (intOp(inst.b) > 2)
{
build.mov(w0, LUA_TNIL);
LUAU_ASSERT(LUA_TNIL == 0);
for (int i = 2; i < intOp(inst.b); ++i)
build.str(w0, mem(rBase, (vmRegOp(inst.a) + 3 + i) * sizeof(TValue) + offsetof(TValue, tt)));
build.str(wzr, mem(rBase, (vmRegOp(inst.a) + 3 + i) * sizeof(TValue) + offsetof(TValue, tt)));
}
// we use full iter fallback for now; in the future it could be worthwhile to accelerate array iteration here
build.mov(x0, rState);
@ -1561,7 +1605,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
{
emitAddOffset(build, x1, rCode, uintOp(inst.a) * sizeof(Instruction));
build.mov(x2, rBase);
build.mov(x3, vmRegOp(inst.b));
build.mov(w3, vmRegOp(inst.b));
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, executeGETVARARGSMultRet)));
build.blr(x4);
@ -1570,10 +1614,12 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
else
{
build.mov(x1, rBase);
build.mov(x2, vmRegOp(inst.b));
build.mov(x3, intOp(inst.c));
build.mov(w2, vmRegOp(inst.b));
build.mov(w3, intOp(inst.c));
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, executeGETVARARGSConst)));
build.blr(x4);
// note: no emitUpdateBase necessary because executeGETVARARGSConst does not reallocate stack
}
break;
case IrCmd::NEWCLOSURE:
@ -1790,13 +1836,12 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
{
inst.regA64 = regs.allocReg(KindA64::x, index);
build.ldr(inst.regA64, mem(rState, offsetof(lua_State, global)));
LUAU_ASSERT(sizeof(TString*) == 8);
if (inst.a.kind == IrOpKind::Inst)
build.add(inst.regA64, inst.regA64, zextReg(regOp(inst.a)), 3);
build.add(inst.regA64, rGlobalState, zextReg(regOp(inst.a)), 3);
else if (inst.a.kind == IrOpKind::Constant)
build.add(inst.regA64, inst.regA64, uint16_t(tagOp(inst.a)) * 8);
build.add(inst.regA64, rGlobalState, uint16_t(tagOp(inst.a)) * 8);
else
LUAU_ASSERT(!"Unsupported instruction form");
@ -1836,9 +1881,17 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
regs.freeTempRegs();
}
void IrLoweringA64::finishBlock()
void IrLoweringA64::finishBlock(const IrBlock& curr, const IrBlock& next)
{
regs.assertNoSpills();
if (!regs.spills.empty())
{
// If we have spills remaining, we have to immediately lower the successor block
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
LUAU_ASSERT(predIdx == function.getBlockIndex(curr));
// And the next block cannot be a join block in cfg
LUAU_ASSERT(next.useCount == 1);
}
}
void IrLoweringA64::finishFunction()

View file

@ -26,7 +26,7 @@ struct IrLoweringA64
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, const IrBlock& next);
void finishBlock();
void finishBlock(const IrBlock& curr, const IrBlock& next);
void finishFunction();
bool hasError() const;

View file

@ -111,22 +111,21 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.mov(inst.regX64, luauRegValueInt(vmRegOp(inst.a)));
break;
case IrCmd::LOAD_TVALUE:
{
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
int addrOffset = inst.b.kind != IrOpKind::None ? intOp(inst.b) : 0;
if (inst.a.kind == IrOpKind::VmReg)
build.vmovups(inst.regX64, luauReg(vmRegOp(inst.a)));
else if (inst.a.kind == IrOpKind::VmConst)
build.vmovups(inst.regX64, luauConstant(vmConstOp(inst.a)));
else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(inst.regX64, xmmword[regOp(inst.a)]);
build.vmovups(inst.regX64, xmmword[regOp(inst.a) + addrOffset]);
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_NODE_VALUE_TV:
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
build.vmovups(inst.regX64, luauNodeValue(regOp(inst.a)));
break;
}
case IrCmd::LOAD_ENV:
inst.regX64 = regs.allocReg(SizeX64::qword, index);
@ -252,16 +251,59 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 2), inst.d);
break;
case IrCmd::STORE_TVALUE:
{
int addrOffset = inst.c.kind != IrOpKind::None ? intOp(inst.c) : 0;
if (inst.a.kind == IrOpKind::VmReg)
build.vmovups(luauReg(vmRegOp(inst.a)), regOp(inst.b));
else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(xmmword[regOp(inst.a)], regOp(inst.b));
build.vmovups(xmmword[regOp(inst.a) + addrOffset], regOp(inst.b));
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::STORE_NODE_VALUE_TV:
build.vmovups(luauNodeValue(regOp(inst.a)), regOp(inst.b));
}
case IrCmd::STORE_SPLIT_TVALUE:
{
int addrOffset = inst.d.kind != IrOpKind::None ? intOp(inst.d) : 0;
OperandX64 tagLhs = inst.a.kind == IrOpKind::Inst ? dword[regOp(inst.a) + offsetof(TValue, tt) + addrOffset] : luauRegTag(vmRegOp(inst.a));
build.mov(tagLhs, tagOp(inst.b));
if (tagOp(inst.b) == LUA_TBOOLEAN)
{
OperandX64 valueLhs =
inst.a.kind == IrOpKind::Inst ? dword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValueInt(vmRegOp(inst.a));
build.mov(valueLhs, inst.c.kind == IrOpKind::Constant ? OperandX64(intOp(inst.c)) : regOp(inst.c));
}
else if (tagOp(inst.b) == LUA_TNUMBER)
{
OperandX64 valueLhs =
inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValue(vmRegOp(inst.a));
if (inst.c.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.c)));
build.vmovsd(valueLhs, tmp.reg);
}
else
{
build.vmovsd(valueLhs, regOp(inst.c));
}
}
else if (isGCO(tagOp(inst.b)))
{
OperandX64 valueLhs =
inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValue(vmRegOp(inst.a));
build.mov(valueLhs, regOp(inst.c));
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
}
case IrCmd::ADD_INT:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
@ -951,7 +993,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
tmp1.free();
callBarrierObject(regs, build, tmp2.release(), {}, vmRegOp(inst.b), /* ratag */ -1);
if (inst.c.kind == IrOpKind::Undef || isGCO(tagOp(inst.c)))
callBarrierObject(regs, build, tmp2.release(), {}, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
break;
}
case IrCmd::PREPARE_FORN:
@ -1540,9 +1583,17 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
regs.freeLastUseRegs(inst, index);
}
void IrLoweringX64::finishBlock()
void IrLoweringX64::finishBlock(const IrBlock& curr, const IrBlock& next)
{
regs.assertNoSpills();
if (!regs.spills.empty())
{
// If we have spills remaining, we have to immediately lower the successor block
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
LUAU_ASSERT(predIdx == function.getBlockIndex(curr));
// And the next block cannot be a join block in cfg
LUAU_ASSERT(next.useCount == 1);
}
}
void IrLoweringX64::finishFunction()

View file

@ -28,7 +28,7 @@ struct IrLoweringX64
IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, const IrBlock& next);
void finishBlock();
void finishBlock(const IrBlock& curr, const IrBlock& next);
void finishFunction();
bool hasError() const;

View file

@ -411,11 +411,6 @@ void IrRegAllocA64::restoreReg(AssemblyBuilderA64& build, IrInst& inst)
LUAU_ASSERT(!"Expected to find a spill record");
}
void IrRegAllocA64::assertNoSpills() const
{
LUAU_ASSERT(spills.empty());
}
IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)
{
switch (kind)

View file

@ -43,8 +43,6 @@ struct IrRegAllocA64
// Restores register for a single instruction; may not assign the previously used register!
void restoreReg(AssemblyBuilderA64& build, IrInst& inst);
void assertNoSpills() const;
struct Set
{
// which registers are in the set that the allocator manages (initialized at construction)

View file

@ -54,7 +54,12 @@ RegisterX64 IrRegAllocX64::allocReg(SizeX64 size, uint32_t instIdx)
// Out of registers, spill the value with the furthest next use
const std::array<uint32_t, 16>& regInstUsers = size == SizeX64::xmmword ? xmmInstUsers : gprInstUsers;
if (uint32_t furthestUseTarget = findInstructionWithFurthestNextUse(regInstUsers); furthestUseTarget != kInvalidInstIdx)
return takeReg(function.instructions[furthestUseTarget].regX64, instIdx);
{
RegisterX64 reg = function.instructions[furthestUseTarget].regX64;
reg.size = size; // Adjust size to the requested
return takeReg(reg, instIdx);
}
LUAU_ASSERT(!"Out of registers to allocate");
return noreg;

View file

@ -2,6 +2,7 @@
#include "IrTranslation.h"
#include "Luau/Bytecode.h"
#include "Luau/BytecodeUtils.h"
#include "Luau/IrBuilder.h"
#include "Luau/IrUtils.h"
@ -526,7 +527,7 @@ void translateInstSetUpval(IrBuilder& build, const Instruction* pc, int pcpos)
int ra = LUAU_INSN_A(*pc);
int up = LUAU_INSN_B(*pc);
build.inst(IrCmd::SET_UPVALUE, build.vmUpvalue(up), build.vmReg(ra));
build.inst(IrCmd::SET_UPVALUE, build.vmUpvalue(up), build.vmReg(ra), build.undef());
}
void translateInstCloseUpvals(IrBuilder& build, const Instruction* pc)
@ -988,7 +989,7 @@ void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
IrOp tvn = build.inst(IrCmd::LOAD_NODE_VALUE_TV, addrSlotEl);
IrOp tvn = build.inst(IrCmd::LOAD_TVALUE, addrSlotEl, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), tvn);
IrOp next = build.blockAtInst(pcpos + 2);
@ -1017,7 +1018,7 @@ void translateInstSetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::CHECK_READONLY, vb, fallback);
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva);
build.inst(IrCmd::STORE_TVALUE, addrSlotEl, tva, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
@ -1040,7 +1041,7 @@ void translateInstGetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
IrOp tvn = build.inst(IrCmd::LOAD_NODE_VALUE_TV, addrSlotEl);
IrOp tvn = build.inst(IrCmd::LOAD_TVALUE, addrSlotEl, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), tvn);
IrOp next = build.blockAtInst(pcpos + 2);
@ -1064,7 +1065,7 @@ void translateInstSetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::CHECK_READONLY, env, fallback);
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva);
build.inst(IrCmd::STORE_TVALUE, addrSlotEl, tva, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::BARRIER_TABLE_FORWARD, env, build.vmReg(ra), build.undef());
@ -1136,7 +1137,7 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 1), table);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TTABLE));
IrOp nodeEl = build.inst(IrCmd::LOAD_NODE_VALUE_TV, addrNodeEl);
IrOp nodeEl = build.inst(IrCmd::LOAD_TVALUE, addrNodeEl, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), nodeEl);
build.inst(IrCmd::JUMP, next);
@ -1158,7 +1159,7 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos)
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 1), table2);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TTABLE));
IrOp indexNodeEl = build.inst(IrCmd::LOAD_NODE_VALUE_TV, addrIndexNodeEl);
IrOp indexNodeEl = build.inst(IrCmd::LOAD_TVALUE, addrIndexNodeEl, build.constInt(offsetof(LuaNode, val)));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), indexNodeEl);
build.inst(IrCmd::JUMP, next);

View file

@ -1,8 +1,6 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include <stdint.h>
#include "ltm.h"
@ -67,38 +65,5 @@ void translateInstAndX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp
void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos);
inline int getOpLength(LuauOpcode op)
{
switch (op)
{
case LOP_GETGLOBAL:
case LOP_SETGLOBAL:
case LOP_GETIMPORT:
case LOP_GETTABLEKS:
case LOP_SETTABLEKS:
case LOP_NAMECALL:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_NEWTABLE:
case LOP_SETLIST:
case LOP_FORGLOOP:
case LOP_LOADKX:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return 2;
default:
return 1;
}
}
} // namespace CodeGen
} // namespace Luau

View file

@ -32,7 +32,6 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::LOAD_INT:
return IrValueKind::Int;
case IrCmd::LOAD_TVALUE:
case IrCmd::LOAD_NODE_VALUE_TV:
return IrValueKind::Tvalue;
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
@ -46,7 +45,7 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
case IrCmd::STORE_NODE_VALUE_TV:
case IrCmd::STORE_SPLIT_TVALUE:
return IrValueKind::None;
case IrCmd::ADD_INT:
case IrCmd::SUB_INT:
@ -216,6 +215,8 @@ void removeUse(IrFunction& function, IrOp op)
bool isGCO(uint8_t tag)
{
LUAU_ASSERT(tag < LUA_T_COUNT);
// mirrors iscollectable(o) from VM/lobject.h
return tag >= LUA_TSTRING;
}
@ -388,6 +389,7 @@ void applySubstitutions(IrFunction& function, IrInst& inst)
bool compare(double a, double b, IrCondition cond)
{
// Note: redundant bool() casts work around invalid MSVC optimization that merges cases in this switch, violating IEEE754 comparison semantics
switch (cond)
{
case IrCondition::Equal:
@ -397,19 +399,19 @@ bool compare(double a, double b, IrCondition cond)
case IrCondition::Less:
return a < b;
case IrCondition::NotLess:
return !(a < b);
return !bool(a < b);
case IrCondition::LessEqual:
return a <= b;
case IrCondition::NotLessEqual:
return !(a <= b);
return !bool(a <= b);
case IrCondition::Greater:
return a > b;
case IrCondition::NotGreater:
return !(a > b);
return !bool(a > b);
case IrCondition::GreaterEqual:
return a >= b;
case IrCondition::NotGreaterEqual:
return !(a >= b);
return !bool(a >= b);
default:
LUAU_ASSERT(!"Unsupported condition");
}

View file

@ -28,6 +28,7 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
case IrCmd::STORE_SPLIT_TVALUE:
invalidateRestoreOp(inst.a);
break;
case IrCmd::ADJUST_STACK_TO_REG:

View file

@ -9,6 +9,7 @@
#include "lua.h"
#include <array>
#include <utility>
#include <vector>
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
@ -31,6 +32,7 @@ struct RegisterInfo
bool knownNotReadonly = false;
bool knownNoMetatable = false;
int knownTableArraySize = -1;
};
// Load instructions are linked to target register to carry knowledge about the target
@ -95,6 +97,7 @@ struct ConstPropState
info->value = value;
info->knownNotReadonly = false;
info->knownNoMetatable = false;
info->knownTableArraySize = -1;
info->version++;
}
}
@ -112,6 +115,7 @@ struct ConstPropState
reg.value = {};
reg.knownNotReadonly = false;
reg.knownNoMetatable = false;
reg.knownTableArraySize = -1;
}
reg.version++;
@ -176,6 +180,7 @@ struct ConstPropState
{
reg.knownNotReadonly = false;
reg.knownNoMetatable = false;
reg.knownTableArraySize = -1;
}
void invalidateUserCall()
@ -236,28 +241,66 @@ struct ConstPropState
return IrInst{loadCmd, op};
}
uint32_t* getPreviousInstIndex(const IrInst& inst)
{
LUAU_ASSERT(useValueNumbering);
if (uint32_t* prevIdx = valueMap.find(inst))
{
// Previous load might have been removed as unused
if (function.instructions[*prevIdx].useCount != 0)
return prevIdx;
}
return nullptr;
}
uint32_t* getPreviousVersionedLoadIndex(IrCmd cmd, IrOp vmReg)
{
LUAU_ASSERT(vmReg.kind == IrOpKind::VmReg);
return getPreviousInstIndex(versionedVmRegLoad(cmd, vmReg));
}
std::pair<IrCmd, uint32_t> getPreviousVersionedLoadForTag(uint8_t tag, IrOp vmReg)
{
if (useValueNumbering && !function.cfg.captured.regs.test(vmRegOp(vmReg)))
{
if (tag == LUA_TBOOLEAN)
{
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_INT, vmReg))
return std::make_pair(IrCmd::LOAD_INT, *prevIdx);
}
else if (tag == LUA_TNUMBER)
{
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_DOUBLE, vmReg))
return std::make_pair(IrCmd::LOAD_DOUBLE, *prevIdx);
}
else if (isGCO(tag))
{
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_POINTER, vmReg))
return std::make_pair(IrCmd::LOAD_POINTER, *prevIdx);
}
}
return std::make_pair(IrCmd::NOP, kInvalidInstIdx);
}
// Find existing value of the instruction that is exactly the same, or record current on for future lookups
void substituteOrRecord(IrInst& inst, uint32_t instIdx)
{
if (!useValueNumbering)
return;
if (uint32_t* prevIdx = valueMap.find(inst))
if (uint32_t* prevIdx = getPreviousInstIndex(inst))
{
const IrInst& prev = function.instructions[*prevIdx];
// Previous load might have been removed as unused
if (prev.useCount != 0)
{
substitute(function, inst, IrOp{IrOpKind::Inst, *prevIdx});
return;
}
substitute(function, inst, IrOp{IrOpKind::Inst, *prevIdx});
return;
}
valueMap[inst] = instIdx;
}
// Vm register load can be replaced by a previous load of the same version of the register
// VM register load can be replaced by a previous load of the same version of the register
// If there is no previous load, we record the current one for future lookups
void substituteOrRecordVmRegLoad(IrInst& loadInst)
{
@ -274,22 +317,16 @@ struct ConstPropState
IrInst versionedLoad = versionedVmRegLoad(loadInst.cmd, loadInst.a);
// Check if there is a value that already has this version of the register
if (uint32_t* prevIdx = valueMap.find(versionedLoad))
if (uint32_t* prevIdx = getPreviousInstIndex(versionedLoad))
{
const IrInst& prev = function.instructions[*prevIdx];
// Previous value might not be linked to a register yet
// For example, it could be a NEW_TABLE stored into a register and we might need to track guards made with this value
if (!instLink.contains(*prevIdx))
createRegLink(*prevIdx, loadInst.a);
// Previous load might have been removed as unused
if (prev.useCount != 0)
{
// Previous value might not be linked to a register yet
// For example, it could be a NEW_TABLE stored into a register and we might need to track guards made with this value
if (!instLink.contains(*prevIdx))
createRegLink(*prevIdx, loadInst.a);
// Substitute load instructon with the previous value
substitute(function, loadInst, IrOp{IrOpKind::Inst, *prevIdx});
return;
}
// Substitute load instructon with the previous value
substitute(function, loadInst, IrOp{IrOpKind::Inst, *prevIdx});
return;
}
uint32_t instIdx = function.getInstIndex(loadInst);
@ -403,10 +440,8 @@ static void handleBuiltinEffects(ConstPropState& state, LuauBuiltinFunction bfid
case LBF_MATH_CLAMP:
case LBF_MATH_SIGN:
case LBF_MATH_ROUND:
case LBF_RAWSET:
case LBF_RAWGET:
case LBF_RAWEQUAL:
case LBF_TABLE_INSERT:
case LBF_TABLE_UNPACK:
case LBF_VECTOR:
case LBF_BIT32_COUNTLZ:
@ -418,6 +453,12 @@ static void handleBuiltinEffects(ConstPropState& state, LuauBuiltinFunction bfid
case LBF_TONUMBER:
case LBF_TOSTRING:
break;
case LBF_TABLE_INSERT:
state.invalidateHeap();
return; // table.insert does not modify result registers.
case LBF_RAWSET:
state.invalidateHeap();
break;
case LBF_SETMETATABLE:
state.invalidateHeap(); // TODO: only knownNoMetatable is affected and we might know which one
break;
@ -470,21 +511,18 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
if (inst.a.kind == IrOpKind::VmReg)
{
const IrOp source = inst.a;
uint32_t activeLoadDoubleValue = kInvalidInstIdx;
IrCmd activeLoadCmd = IrCmd::NOP;
uint32_t activeLoadValue = kInvalidInstIdx;
if (inst.b.kind == IrOpKind::Constant)
{
uint8_t value = function.tagOp(inst.b);
// STORE_TAG usually follows a store of the value, but it also bumps the version of the whole register
// To be able to propagate STORE_DOUBLE into LOAD_DOUBLE, we find active LOAD_DOUBLE value and recreate it with updated version
// To be able to propagate STORE_*** into LOAD_***, we find active LOAD_*** value and recreate it with updated version
// Register in this optimization cannot be captured to avoid complications in lowering (IrValueLocationTracking doesn't model it)
// If stored tag is not a number, we can skip the lookup as there won't be future loads of this register as a number
if (value == LUA_TNUMBER && !function.cfg.captured.regs.test(vmRegOp(source)))
{
if (uint32_t* prevIdx = state.valueMap.find(state.versionedVmRegLoad(IrCmd::LOAD_DOUBLE, source)))
activeLoadDoubleValue = *prevIdx;
}
std::tie(activeLoadCmd, activeLoadValue) = state.getPreviousVersionedLoadForTag(value, source);
if (state.tryGetTag(source) == value)
{
@ -503,9 +541,9 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
state.invalidateTag(source);
}
// Future LOAD_DOUBLE instructions can re-use previous register version load
if (activeLoadDoubleValue != kInvalidInstIdx)
state.valueMap[state.versionedVmRegLoad(IrCmd::LOAD_DOUBLE, source)] = activeLoadDoubleValue;
// Future LOAD_*** instructions can re-use previous register version load
if (activeLoadValue != kInvalidInstIdx)
state.valueMap[state.versionedVmRegLoad(activeLoadCmd, source)] = activeLoadValue;
}
break;
case IrCmd::STORE_POINTER:
@ -520,6 +558,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
{
info->knownNotReadonly = true;
info->knownNoMetatable = true;
info->knownTableArraySize = function.uintOp(instOp->a);
}
}
}
@ -562,17 +601,62 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
state.invalidateValue(inst.a);
break;
case IrCmd::STORE_TVALUE:
if (inst.a.kind == IrOpKind::VmReg || inst.a.kind == IrOpKind::Inst)
{
if (inst.a.kind == IrOpKind::VmReg)
state.invalidate(inst.a);
uint8_t tag = state.tryGetTag(inst.b);
IrOp value = state.tryGetValue(inst.b);
if (inst.a.kind == IrOpKind::VmReg)
{
if (tag != 0xff)
state.saveTag(inst.a, tag);
if (value.kind != IrOpKind::None)
state.saveValue(inst.a, value);
}
IrCmd activeLoadCmd = IrCmd::NOP;
uint32_t activeLoadValue = kInvalidInstIdx;
if (tag != 0xff)
{
// If we know the tag, try to extract the value from a register used by LOAD_TVALUE
if (IrInst* arg = function.asInstOp(inst.b); arg && arg->cmd == IrCmd::LOAD_TVALUE && arg->a.kind == IrOpKind::VmReg)
{
std::tie(activeLoadCmd, activeLoadValue) = state.getPreviousVersionedLoadForTag(tag, arg->a);
if (activeLoadValue != kInvalidInstIdx)
value = IrOp{IrOpKind::Inst, activeLoadValue};
}
}
// If we have constant tag and value, replace TValue store with tag/value pair store
if (tag != 0xff && value.kind != IrOpKind::None && (tag == LUA_TBOOLEAN || tag == LUA_TNUMBER || isGCO(tag)))
{
replace(function, block, index, {IrCmd::STORE_SPLIT_TVALUE, inst.a, build.constTag(tag), value, inst.c});
// Value can be propagated to future loads of the same register
if (inst.a.kind == IrOpKind::VmReg && activeLoadValue != kInvalidInstIdx)
state.valueMap[state.versionedVmRegLoad(activeLoadCmd, inst.a)] = activeLoadValue;
}
else if (inst.a.kind == IrOpKind::VmReg)
{
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_TVALUE);
}
}
break;
case IrCmd::STORE_SPLIT_TVALUE:
if (inst.a.kind == IrOpKind::VmReg)
{
state.invalidate(inst.a);
if (uint8_t tag = state.tryGetTag(inst.b); tag != 0xff)
state.saveTag(inst.a, tag);
state.saveTag(inst.a, function.tagOp(inst.b));
if (IrOp value = state.tryGetValue(inst.b); value.kind != IrOpKind::None)
state.saveValue(inst.a, value);
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_TVALUE);
if (inst.c.kind == IrOpKind::Constant)
state.saveValue(inst.a, inst.c);
}
break;
case IrCmd::JUMP_IF_TRUTHY:
@ -666,6 +750,15 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::GET_UPVALUE:
state.invalidate(inst.a);
break;
case IrCmd::SET_UPVALUE:
if (inst.b.kind == IrOpKind::VmReg)
{
if (uint8_t tag = state.tryGetTag(inst.b); tag != 0xff)
{
replace(function, inst.c, build.constTag(tag));
}
}
break;
case IrCmd::CHECK_TAG:
{
uint8_t b = function.tagOp(inst.b);
@ -768,8 +861,6 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
// These instructions don't have an effect on register/memory state we are tracking
case IrCmd::NOP:
case IrCmd::LOAD_NODE_VALUE_TV:
case IrCmd::STORE_NODE_VALUE_TV:
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
case IrCmd::GET_SLOT_NODE_ADDR:
@ -824,12 +915,33 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
state.substituteOrRecord(inst, index);
break;
case IrCmd::CHECK_ARRAY_SIZE:
{
std::optional<int> arrayIndex = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a); info && arrayIndex)
{
if (info->knownTableArraySize >= 0)
{
if (unsigned(*arrayIndex) < unsigned(info->knownTableArraySize))
{
if (FFlag::DebugLuauAbortingChecks)
replace(function, inst.c, build.undef());
else
kill(function, inst);
}
else
{
replace(function, block, index, {IrCmd::JUMP, inst.c});
}
}
}
break;
}
case IrCmd::CHECK_SLOT_MATCH:
case IrCmd::CHECK_NODE_NO_NEXT:
case IrCmd::BARRIER_TABLE_BACK:
case IrCmd::RETURN:
case IrCmd::COVERAGE:
case IrCmd::SET_UPVALUE:
case IrCmd::SET_SAVEDPC: // TODO: we may be able to remove some updates to PC
case IrCmd::CLOSE_UPVALS: // Doesn't change memory that we track
case IrCmd::CAPTURE:

View file

@ -0,0 +1,42 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
namespace Luau
{
inline int getOpLength(LuauOpcode op)
{
switch (op)
{
case LOP_GETGLOBAL:
case LOP_SETGLOBAL:
case LOP_GETIMPORT:
case LOP_GETTABLEKS:
case LOP_SETTABLEKS:
case LOP_NAMECALL:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_NEWTABLE:
case LOP_SETLIST:
case LOP_FORGLOOP:
case LOP_LOADKX:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return 2;
default:
return 1;
}
}
} // namespace Luau

View file

@ -16,6 +16,7 @@ public:
virtual ~BytecodeEncoder() {}
virtual uint8_t encodeOp(uint8_t op) = 0;
virtual void encode(uint32_t* data, size_t count) = 0;
};
class BytecodeBuilder

View file

@ -1,12 +1,14 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/BytecodeBuilder.h"
#include "Luau/BytecodeUtils.h"
#include "Luau/StringUtils.h"
#include <algorithm>
#include <string.h>
LUAU_FASTFLAGVARIABLE(BytecodeVersion4, false)
LUAU_FASTFLAGVARIABLE(BytecodeEnc, false)
namespace Luau
{
@ -55,39 +57,6 @@ static void writeVarInt(std::string& ss, unsigned int value)
} while (value);
}
static int getOpLength(LuauOpcode op)
{
switch (op)
{
case LOP_GETGLOBAL:
case LOP_SETGLOBAL:
case LOP_GETIMPORT:
case LOP_GETTABLEKS:
case LOP_SETTABLEKS:
case LOP_NAMECALL:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_NEWTABLE:
case LOP_SETLIST:
case LOP_FORGLOOP:
case LOP_LOADKX:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return 2;
default:
return 1;
}
}
inline bool isJumpD(LuauOpcode op)
{
switch (op)
@ -262,17 +231,20 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues, uin
validate();
#endif
// this call is indirect to make sure we only gain link time dependency on dumpCurrentFunction when needed
if (dumpFunctionPtr)
func.dump = (this->*dumpFunctionPtr)(func.dumpinstoffs);
// very approximate: 4 bytes per instruction for code, 1 byte for debug line, and 1-2 bytes for aux data like constants plus overhead
func.data.reserve(32 + insns.size() * 7);
if (FFlag::BytecodeEnc && encoder)
encoder->encode(insns.data(), insns.size());
writeFunction(func.data, currentFunction, flags);
currentFunction = ~0u;
// this call is indirect to make sure we only gain link time dependency on dumpCurrentFunction when needed
if (dumpFunctionPtr)
func.dump = (this->*dumpFunctionPtr)(func.dumpinstoffs);
insns.clear();
lines.clear();
constants.clear();
@ -653,20 +625,28 @@ void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags)
// instructions
writeVarInt(ss, uint32_t(insns.size()));
for (size_t i = 0; i < insns.size();)
if (encoder && !FFlag::BytecodeEnc)
{
uint8_t op = LUAU_INSN_OP(insns[i]);
LUAU_ASSERT(op < LOP__COUNT);
for (size_t i = 0; i < insns.size();)
{
uint8_t op = LUAU_INSN_OP(insns[i]);
LUAU_ASSERT(op < LOP__COUNT);
int oplen = getOpLength(LuauOpcode(op));
uint8_t openc = encoder ? encoder->encodeOp(op) : op;
int oplen = getOpLength(LuauOpcode(op));
uint8_t openc = encoder->encodeOp(op);
writeInt(ss, openc | (insns[i] & ~0xff));
writeInt(ss, openc | (insns[i] & ~0xff));
for (int j = 1; j < oplen; ++j)
writeInt(ss, insns[i + j]);
for (int j = 1; j < oplen; ++j)
writeInt(ss, insns[i + j]);
i += oplen;
i += oplen;
}
}
else
{
for (uint32_t insn : insns)
writeInt(ss, insn);
}
// constants

View file

@ -26,8 +26,6 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25)
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
LUAU_FASTFLAGVARIABLE(LuauCompileFixBuiltinArity, false)
LUAU_FASTFLAGVARIABLE(LuauCompileFoldMathK, false)
namespace Luau
@ -793,15 +791,10 @@ struct Compiler
return compileExprFastcallN(expr, target, targetCount, targetTop, multRet, regs, bfid);
else if (options.optimizationLevel >= 2)
{
if (FFlag::LuauCompileFixBuiltinArity)
{
// when a builtin is none-safe with matching arity, even if the last expression returns 0 or >1 arguments,
// we can rely on the behavior of the function being the same (none-safe means nil and none are interchangeable)
BuiltinInfo info = getBuiltinInfo(bfid);
if (int(expr->args.size) == info.params && (info.flags & BuiltinInfo::Flag_NoneSafe) != 0)
return compileExprFastcallN(expr, target, targetCount, targetTop, multRet, regs, bfid);
}
else if (int(expr->args.size) == getBuiltinInfo(bfid).params)
// when a builtin is none-safe with matching arity, even if the last expression returns 0 or >1 arguments,
// we can rely on the behavior of the function being the same (none-safe means nil and none are interchangeable)
BuiltinInfo info = getBuiltinInfo(bfid);
if (int(expr->args.size) == info.params && (info.flags & BuiltinInfo::Flag_NoneSafe) != 0)
return compileExprFastcallN(expr, target, targetCount, targetTop, multRet, regs, bfid);
}
}

View file

@ -6,8 +6,6 @@
#include <limits.h>
LUAU_FASTFLAGVARIABLE(LuauAssignmentHasCost, false)
namespace Luau
{
namespace Compile
@ -308,10 +306,7 @@ struct CostVisitor : AstVisitor
{
// unconditional 'else' may require a jump after the 'if' body
// note: this ignores cases when 'then' always terminates and also assumes comparison requires an extra instruction which may be false
if (!FFlag::LuauAssignmentHasCost)
result += 2;
else
result += 1 + (node->elsebody && !node->elsebody->is<AstStatIf>());
result += 1 + (node->elsebody && !node->elsebody->is<AstStatIf>());
return true;
}
@ -337,9 +332,6 @@ struct CostVisitor : AstVisitor
for (size_t i = 0; i < node->vars.size; ++i)
assign(node->vars.data[i]);
if (!FFlag::LuauAssignmentHasCost)
return true;
for (size_t i = 0; i < node->vars.size || i < node->values.size; ++i)
{
Cost ac;

View file

@ -6,6 +6,8 @@
#include <string>
#include <vector>
#include <stdint.h>
namespace Luau
{

View file

@ -16,6 +16,10 @@ COMPILER_SOURCES=$(wildcard Compiler/src/*.cpp)
COMPILER_OBJECTS=$(COMPILER_SOURCES:%=$(BUILD)/%.o)
COMPILER_TARGET=$(BUILD)/libluaucompiler.a
CONFIG_SOURCES=$(wildcard Config/src/*.cpp)
CONFIG_OBJECTS=$(CONFIG_SOURCES:%=$(BUILD)/%.o)
CONFIG_TARGET=$(BUILD)/libluauconfig.a
ANALYSIS_SOURCES=$(wildcard Analysis/src/*.cpp)
ANALYSIS_OBJECTS=$(ANALYSIS_SOURCES:%=$(BUILD)/%.o)
ANALYSIS_TARGET=$(BUILD)/libluauanalysis.a
@ -59,7 +63,7 @@ ifneq ($(opt),)
TESTS_ARGS+=-O$(opt)
endif
OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(ANALYSIS_OBJECTS) $(CODEGEN_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(REPL_CLI_OBJECTS) $(ANALYZE_CLI_OBJECTS) $(COMPILE_CLI_OBJECTS) $(FUZZ_OBJECTS)
OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(CONFIG_OBJECTS) $(ANALYSIS_OBJECTS) $(CODEGEN_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(REPL_CLI_OBJECTS) $(ANALYZE_CLI_OBJECTS) $(COMPILE_CLI_OBJECTS) $(FUZZ_OBJECTS)
EXECUTABLE_ALIASES = luau luau-analyze luau-compile luau-tests
# common flags
@ -129,13 +133,14 @@ endif
# target-specific flags
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include
$(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include
$(CONFIG_OBJECTS): CXXFLAGS+=-std=c++17 -IConfig/include -ICommon/include -IAst/include
$(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -IConfig/include
$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include -IVM/include -IVM/src # Code generation needs VM internals
$(VM_OBJECTS): CXXFLAGS+=-std=c++11 -ICommon/include -IVM/include
$(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IConfig/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include -Iextern -Iextern/isocline/include
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -Iextern
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -IConfig/include -Iextern
$(COMPILE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include
$(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -IVM/include -ICodeGen/include
@ -205,9 +210,9 @@ luau-tests: $(TESTS_TARGET)
ln -fs $^ $@
# executable targets
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(CONFIG_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET)
$(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET) $(CONFIG_TARGET)
$(COMPILE_CLI_TARGET): $(COMPILE_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET)
$(TESTS_TARGET) $(REPL_CLI_TARGET) $(ANALYZE_CLI_TARGET) $(COMPILE_CLI_TARGET):
@ -223,12 +228,13 @@ fuzz-prototest: $(BUILD)/fuzz/prototest.cpp.o $(BUILD)/fuzz/protoprint.cpp.o $(B
# static library targets
$(AST_TARGET): $(AST_OBJECTS)
$(COMPILER_TARGET): $(COMPILER_OBJECTS)
$(CONFIG_TARGET): $(CONFIG_OBJECTS)
$(ANALYSIS_TARGET): $(ANALYSIS_OBJECTS)
$(CODEGEN_TARGET): $(CODEGEN_OBJECTS)
$(VM_TARGET): $(VM_OBJECTS)
$(ISOCLINE_TARGET): $(ISOCLINE_OBJECTS)
$(AST_TARGET) $(COMPILER_TARGET) $(ANALYSIS_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET):
$(AST_TARGET) $(COMPILER_TARGET) $(CONFIG_TARGET) $(ANALYSIS_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET):
ar rcs $@ $^
# object file targets

View file

@ -4,6 +4,7 @@ if(NOT ${CMAKE_VERSION} VERSION_LESS "3.19")
target_sources(Luau.Common PRIVATE
Common/include/Luau/Common.h
Common/include/Luau/Bytecode.h
Common/include/Luau/BytecodeUtils.h
Common/include/Luau/DenseHash.h
Common/include/Luau/ExperimentalFlags.h
)
@ -55,6 +56,15 @@ target_sources(Luau.Compiler PRIVATE
Compiler/src/ValueTracking.h
)
# Luau.Config Sources
target_sources(Luau.Config PRIVATE
Config/include/Luau/Config.h
Config/include/Luau/LinterConfig.h
Config/src/Config.cpp
Config/src/LinterConfig.cpp
)
# Luau.CodeGen Sources
target_sources(Luau.CodeGen PRIVATE
CodeGen/include/Luau/AddressA64.h
@ -145,7 +155,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/BuiltinDefinitions.h
Analysis/include/Luau/Cancellation.h
Analysis/include/Luau/Clone.h
Analysis/include/Luau/Config.h
Analysis/include/Luau/Constraint.h
Analysis/include/Luau/ConstraintGraphBuilder.h
Analysis/include/Luau/ConstraintSolver.h
@ -163,7 +172,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/IostreamHelpers.h
Analysis/include/Luau/JsonEmitter.h
Analysis/include/Luau/Linter.h
Analysis/include/Luau/LinterConfig.h
Analysis/include/Luau/LValue.h
Analysis/include/Luau/Metamethods.h
Analysis/include/Luau/Module.h
@ -177,6 +185,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Scope.h
Analysis/include/Luau/Simplify.h
Analysis/include/Luau/Substitution.h
Analysis/include/Luau/Subtyping.h
Analysis/include/Luau/Symbol.h
Analysis/include/Luau/ToDot.h
Analysis/include/Luau/TopoSortStatements.h
@ -207,7 +216,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/Autocomplete.cpp
Analysis/src/BuiltinDefinitions.cpp
Analysis/src/Clone.cpp
Analysis/src/Config.cpp
Analysis/src/Constraint.cpp
Analysis/src/ConstraintGraphBuilder.cpp
Analysis/src/ConstraintSolver.cpp
@ -222,7 +230,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/IostreamHelpers.cpp
Analysis/src/JsonEmitter.cpp
Analysis/src/Linter.cpp
Analysis/src/LinterConfig.cpp
Analysis/src/LValue.cpp
Analysis/src/Module.cpp
Analysis/src/Normalize.cpp
@ -232,6 +239,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/Scope.cpp
Analysis/src/Simplify.cpp
Analysis/src/Substitution.cpp
Analysis/src/Subtyping.cpp
Analysis/src/Symbol.cpp
Analysis/src/ToDot.cpp
Analysis/src/TopoSortStatements.cpp
@ -350,33 +358,32 @@ endif()
if(TARGET Luau.UnitTest)
# Luau.UnitTest Sources
target_sources(Luau.UnitTest PRIVATE
tests/AstQueryDsl.cpp
tests/AstQueryDsl.h
tests/ClassFixture.cpp
tests/ClassFixture.h
tests/ConstraintGraphBuilderFixture.cpp
tests/ConstraintGraphBuilderFixture.h
tests/Fixture.cpp
tests/Fixture.h
tests/IostreamOptional.h
tests/ScopedFlags.h
tests/AssemblyBuilderA64.test.cpp
tests/AssemblyBuilderX64.test.cpp
tests/AstJsonEncoder.test.cpp
tests/AstQuery.test.cpp
tests/AstQueryDsl.cpp
tests/AstQueryDsl.h
tests/AstVisitor.test.cpp
tests/Autocomplete.test.cpp
tests/BuiltinDefinitions.test.cpp
tests/ClassFixture.cpp
tests/ClassFixture.h
tests/CodeAllocator.test.cpp
tests/Compiler.test.cpp
tests/Config.test.cpp
tests/ConstraintGraphBuilderFixture.cpp
tests/ConstraintGraphBuilderFixture.h
tests/ConstraintSolver.test.cpp
tests/CostModel.test.cpp
tests/DataFlowGraph.test.cpp
tests/DenseHash.test.cpp
tests/Differ.test.cpp
tests/Differ.test.cpp
tests/Error.test.cpp
tests/Fixture.cpp
tests/Fixture.h
tests/Frontend.test.cpp
tests/IostreamOptional.h
tests/IrBuilder.test.cpp
tests/IrCallWrapperX64.test.cpp
tests/IrRegAllocX64.test.cpp
@ -391,8 +398,10 @@ if(TARGET Luau.UnitTest)
tests/Parser.test.cpp
tests/RequireTracer.test.cpp
tests/RuntimeLimits.test.cpp
tests/ScopedFlags.h
tests/Simplify.test.cpp
tests/StringUtils.test.cpp
tests/Subtyping.test.cpp
tests/Symbol.test.cpp
tests/ToDot.test.cpp
tests/TopoSort.test.cpp

View file

@ -17,6 +17,8 @@
#include <string.h>
LUAU_FASTFLAGVARIABLE(LuauPCallDebuggerFix, false)
/*
** {======================================================
** Error-recovery functions
@ -576,11 +578,13 @@ int luaD_pcall(lua_State* L, Pfunc func, void* u, ptrdiff_t old_top, ptrdiff_t e
if (!oldactive)
L->isactive = false;
bool yieldable = L->nCcalls <= L->baseCcalls; // Inlined logic from 'lua_isyieldable' to avoid potential for an out of line call.
// restore nCcalls before calling the debugprotectederror callback which may rely on the proper value to have been restored.
L->nCcalls = oldnCcalls;
// an error occurred, check if we have a protected error callback
if (L->global->cb.debugprotectederror)
if ((!FFlag::LuauPCallDebuggerFix || yieldable) && L->global->cb.debugprotectederror)
{
L->global->cb.debugprotectederror(L);

View file

@ -288,17 +288,17 @@ static const unsigned char kPerlinHash[257] = {151, 160, 137, 91, 90, 15, 131, 1
const float kPerlinGrad[16][3] = {{1, 1, 0}, {-1, 1, 0}, {1, -1, 0}, {-1, -1, 0}, {1, 0, 1}, {-1, 0, 1}, {1, 0, -1}, {-1, 0, -1}, {0, 1, 1},
{0, -1, 1}, {0, 1, -1}, {0, -1, -1}, {1, 1, 0}, {0, -1, 1}, {-1, 1, 0}, {0, -1, -1}};
static float perlin_fade(float t)
inline float perlin_fade(float t)
{
return t * t * t * (t * (t * 6 - 15) + 10);
}
static float perlin_lerp(float t, float a, float b)
inline float perlin_lerp(float t, float a, float b)
{
return a + t * (b - a);
}
static float perlin_grad(int hash, float x, float y, float z)
inline float perlin_grad(int hash, float x, float y, float z)
{
const float* g = kPerlinGrad[hash & 15];
return g[0] * x + g[1] * y + g[2] * z;

View file

@ -3556,8 +3556,6 @@ TEST_CASE_FIXTURE(ACFixture, "frontend_use_correct_global_scope")
TEST_CASE_FIXTURE(ACFixture, "string_completion_outside_quotes")
{
ScopedFastFlag flag{"LuauDisableCompletionOutsideQuotes", true};
loadDefinition(R"(
declare function require(path: string): any
)");

View file

@ -6978,8 +6978,6 @@ L3: RETURN R0 0
TEST_CASE("BuiltinArity")
{
ScopedFastFlag sff("LuauCompileFixBuiltinArity", true);
// by default we can't assume that we know parameter/result count for builtins as they can be overridden at runtime
CHECK_EQ("\n" + compileFunction(R"(
return math.abs(unknown())

View file

@ -8,7 +8,6 @@
#include "Luau/DenseHash.h"
#include "Luau/ModuleResolver.h"
#include "Luau/TypeInfer.h"
#include "Luau/StringUtils.h"
#include "Luau/BytecodeBuilder.h"
#include "Luau/Frontend.h"
@ -24,6 +23,8 @@ extern bool verbose;
extern bool codegen;
extern int optimizationLevel;
LUAU_FASTFLAG(LuauPCallDebuggerFix);
static lua_CompileOptions defaultOptions()
{
lua_CompileOptions copts = {};
@ -279,8 +280,6 @@ TEST_CASE("Assert")
TEST_CASE("Basic")
{
ScopedFastFlag sff("LuauCompileFixBuiltinArity", true);
runConformance("basic.lua");
}
@ -334,8 +333,6 @@ TEST_CASE("Clear")
TEST_CASE("Strings")
{
ScopedFastFlag sff("LuauCompileFixBuiltinArity", true);
runConformance("strings.lua");
}
@ -1217,15 +1214,90 @@ TEST_CASE("IfElseExpression")
runConformance("ifelseexpr.lua");
}
// Optionally returns debug info for the first Luau stack frame that is encountered on the callstack.
static std::optional<lua_Debug> getFirstLuauFrameDebugInfo(lua_State* L)
{
static std::string_view kLua = "Lua";
lua_Debug ar;
for (int i = 0; lua_getinfo(L, i, "sl", &ar); i++)
{
if (kLua == ar.what)
return ar;
}
return std::nullopt;
}
TEST_CASE("TagMethodError")
{
runConformance("tmerror.lua", [](lua_State* L) {
auto* cb = lua_callbacks(L);
static std::vector<int> expectedHits;
cb->debugprotectederror = [](lua_State* L) {
CHECK(lua_isyieldable(L));
};
});
// Loop over two modes:
// when doLuaBreak is false the test only verifies that callbacks occur on the expected lines in the Luau source
// when doLuaBreak is true the test additionally calls lua_break to ensure breaking the debugger doesn't cause the VM to crash
for (bool doLuaBreak : {false, true})
{
std::optional<ScopedFastFlag> sff;
if (doLuaBreak)
{
// If doLuaBreak is true then LuauPCallDebuggerFix must be enabled to avoid crashing the tests.
sff = {"LuauPCallDebuggerFix", true};
}
if (FFlag::LuauPCallDebuggerFix)
{
expectedHits = {22, 32};
}
else
{
expectedHits = {
9,
17,
17,
22,
27,
27,
32,
37,
};
}
static int index;
static bool luaBreak;
index = 0;
luaBreak = doLuaBreak;
// 'yieldCallback' doesn't do anything, but providing the callback to runConformance
// ensures that the call to lua_break doesn't cause an error to be generated because
// runConformance doesn't expect the VM to be in the state LUA_BREAK.
auto yieldCallback = [](lua_State* L) {};
runConformance(
"tmerror.lua",
[](lua_State* L) {
auto* cb = lua_callbacks(L);
cb->debugprotectederror = [](lua_State* L) {
std::optional<lua_Debug> ar = getFirstLuauFrameDebugInfo(L);
CHECK(lua_isyieldable(L));
REQUIRE(ar.has_value());
REQUIRE(index < int(std::size(expectedHits)));
CHECK(ar->currentline == expectedHits[index++]);
if (luaBreak)
{
// Cause luau execution to break when 'error' is called via 'pcall'
// This call to lua_break is a regression test for an issue where debugprotectederror
// was called on a thread that couldn't be yielded even though lua_isyieldable was true.
lua_break(L);
}
};
},
yieldCallback);
// Make sure the number of break points hit was the expected number
CHECK(index == std::size(expectedHits));
}
}
TEST_CASE("Coverage")

View file

@ -133,8 +133,6 @@ end
TEST_CASE("ControlFlow")
{
ScopedFastFlag sff("LuauAssignmentHasCost", true);
uint64_t model = modelFunction(R"(
function test(a)
while a < 0 do
@ -244,8 +242,6 @@ end
TEST_CASE("MultipleAssignments")
{
ScopedFastFlag sff("LuauAssignmentHasCost", true);
uint64_t model = modelFunction(R"(
function test(a)
local x = 0

View file

@ -1,6 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Error.h"
#include "Fixture.h"
#include "doctest.h"
using namespace Luau;
@ -13,4 +14,24 @@ TEST_CASE("TypeError_code_should_return_nonzero_code")
CHECK_GE(e.code(), 1000);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "metatable_names_show_instead_of_tables")
{
frontend.options.retainFullTypeGraphs = false;
ScopedFastFlag sff{"LuauStacklessTypeClone2", true};
CheckResult result = check(R"(
--!strict
local Account = {}
Account.__index = Account
function Account.deposit(self: Account, x: number)
self.balance += x
end
type Account = typeof(setmetatable({} :: { balance: number }, Account))
local x: Account = 5
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Type 'number' could not be converted into 'Account'", toString(result.errors[0]));
}
TEST_SUITE_END();

View file

@ -883,8 +883,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "PropagateThroughTvalue")
bb_0:
STORE_TAG R0, tnumber
STORE_DOUBLE R0, 0.5
%2 = LOAD_TVALUE R0
STORE_TVALUE R1, %2
STORE_SPLIT_TVALUE R1, tnumber, 0.5
STORE_TAG R3, tnumber
STORE_DOUBLE R3, 0.5
RETURN 0u
@ -991,6 +990,52 @@ bb_fallback_1:
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "RememberNewTableState")
{
IrOp block = build.block(IrBlockKind::Internal);
IrOp fallback = build.block(IrBlockKind::Fallback);
build.beginBlock(block);
IrOp newtable = build.inst(IrCmd::NEW_TABLE, build.constUint(16), build.constUint(32));
build.inst(IrCmd::STORE_POINTER, build.vmReg(0), newtable);
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(0));
build.inst(IrCmd::CHECK_NO_METATABLE, table, fallback);
build.inst(IrCmd::CHECK_READONLY, table, fallback);
build.inst(IrCmd::CHECK_ARRAY_SIZE, table, build.constInt(14), fallback);
build.inst(IrCmd::SET_TABLE, build.vmReg(1), build.vmReg(0), build.constUint(13)); // Invalidate table knowledge
build.inst(IrCmd::CHECK_NO_METATABLE, table, fallback);
build.inst(IrCmd::CHECK_READONLY, table, fallback);
build.inst(IrCmd::CHECK_ARRAY_SIZE, table, build.constInt(14), fallback);
build.inst(IrCmd::RETURN, build.constUint(0));
build.beginBlock(fallback);
build.inst(IrCmd::RETURN, build.constUint(1));
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
%0 = NEW_TABLE 16u, 32u
STORE_POINTER R0, %0
SET_TABLE R1, R0, 13u
CHECK_NO_METATABLE %0, bb_fallback_1
CHECK_READONLY %0, bb_fallback_1
CHECK_ARRAY_SIZE %0, 14i, bb_fallback_1
RETURN 0u
bb_fallback_1:
RETURN 1u
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "SkipUselessBarriers")
{
IrOp block = build.block(IrBlockKind::Internal);
@ -1586,7 +1631,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "InvalidateReglinkVersion")
build.inst(IrCmd::STORE_TAG, build.vmReg(2), build.constTag(tstring));
IrOp tv2 = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(2));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), tv2);
IrOp ft = build.inst(IrCmd::NEW_TABLE);
IrOp ft = build.inst(IrCmd::NEW_TABLE, build.constUint(0), build.constUint(0));
build.inst(IrCmd::STORE_POINTER, build.vmReg(2), ft);
build.inst(IrCmd::STORE_TAG, build.vmReg(2), build.constTag(ttable));
IrOp tv1 = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(1));
@ -1606,7 +1651,7 @@ bb_0:
STORE_TAG R2, tstring
%1 = LOAD_TVALUE R2
STORE_TVALUE R1, %1
%3 = NEW_TABLE
%3 = NEW_TABLE 0u, 0u
STORE_POINTER R2, %3
STORE_TAG R2, ttable
STORE_TVALUE R0, %1
@ -1811,8 +1856,8 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "PartialStoreInvalidation")
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), build.inst(IrCmd::LOAD_TVALUE, build.vmReg(0)));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(0), build.constDouble(0.5));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), build.inst(IrCmd::LOAD_TVALUE, build.vmReg(0))); // Should be reloaded
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tboolean));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), build.inst(IrCmd::LOAD_TVALUE, build.vmReg(0))); // Should be reloaded
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(1), build.inst(IrCmd::LOAD_TVALUE, build.vmReg(0)));
build.inst(IrCmd::RETURN, build.constUint(0));
@ -1826,9 +1871,8 @@ bb_0:
STORE_DOUBLE R0, 0.5
%3 = LOAD_TVALUE R0
STORE_TVALUE R1, %3
STORE_TAG R0, tboolean
%6 = LOAD_TVALUE R0
STORE_TVALUE R1, %6
STORE_TAG R0, tnumber
STORE_SPLIT_TVALUE R1, tnumber, 0.5
RETURN 0u
)");
@ -2239,6 +2283,32 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification3")
CHECK(build.function.cfg.idoms == std::vector<uint32_t>{~0u, 0, 0, 0, 2, 0, 4, 0});
}
// 'Static Single Assignment Book' Figure 4.1
TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification4")
{
defineCfgTree({{1}, {2, 10}, {3, 7}, {4}, {5}, {4, 6}, {1}, {8}, {5, 9}, {7}, {}});
IdfContext ctx;
computeIteratedDominanceFrontierForDefs(ctx, build.function, {0, 2, 3, 6}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
CHECK(ctx.idf == std::vector<uint32_t>{1, 4, 5});
}
// 'Static Single Assignment Book' Figure 4.5
TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification4")
{
defineCfgTree({{1}, {2}, {3, 7}, {4, 5}, {6}, {6}, {8}, {8}, {9}, {10, 11}, {11}, {9, 12}, {2}});
IdfContext ctx;
computeIteratedDominanceFrontierForDefs(ctx, build.function, {4, 5, 7, 12}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
CHECK(ctx.idf == std::vector<uint32_t>{2, 6, 8});
// Pruned form, when variable is only live-in in limited set of blocks
computeIteratedDominanceFrontierForDefs(ctx, build.function, {4, 5, 7, 12}, {6, 8, 9});
CHECK(ctx.idf == std::vector<uint32_t>{6, 8});
}
TEST_SUITE_END();
TEST_SUITE_BEGIN("ValueNumbering");
@ -2484,4 +2554,105 @@ bb_0:
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "TValueLoadToSplitStore")
{
IrOp entry = build.block(IrBlockKind::Internal);
IrOp fallback = build.block(IrBlockKind::Fallback);
build.beginBlock(entry);
IrOp op1 = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(0));
IrOp op1v2 = build.inst(IrCmd::ADD_NUM, op1, build.constDouble(4.0));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(1), op1v2);
build.inst(IrCmd::STORE_TAG, build.vmReg(1), build.constTag(tnumber));
// Check that this TValue store will be replaced by a split store
IrOp tv = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(1));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(2), tv);
// Check that tag and value can be extracted from R2 now (removing the fallback)
IrOp tag2 = build.inst(IrCmd::LOAD_TAG, build.vmReg(2));
build.inst(IrCmd::CHECK_TAG, tag2, build.constTag(tnumber), fallback);
IrOp op2 = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(2));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(3), op2);
build.inst(IrCmd::STORE_TAG, build.vmReg(3), build.constTag(tnumber));
build.inst(IrCmd::RETURN, build.vmReg(1), build.constInt(1));
build.beginBlock(fallback);
build.inst(IrCmd::RETURN, build.vmReg(2), build.constInt(1));
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
%1 = ADD_NUM %0, 4
STORE_DOUBLE R1, %1
STORE_TAG R1, tnumber
STORE_SPLIT_TVALUE R2, tnumber, %1
STORE_DOUBLE R3, %1
STORE_TAG R3, tnumber
RETURN R1, 1i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "TagStoreUpdatesValueVersion")
{
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
IrOp op1 = build.inst(IrCmd::LOAD_POINTER, build.vmReg(0));
build.inst(IrCmd::STORE_POINTER, build.vmReg(1), op1);
build.inst(IrCmd::STORE_TAG, build.vmReg(1), build.constTag(tstring));
IrOp str = build.inst(IrCmd::LOAD_POINTER, build.vmReg(1));
build.inst(IrCmd::STORE_POINTER, build.vmReg(2), str);
build.inst(IrCmd::STORE_TAG, build.vmReg(2), build.constTag(tstring));
build.inst(IrCmd::RETURN, build.vmReg(1), build.constInt(1));
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
%0 = LOAD_POINTER R0
STORE_POINTER R1, %0
STORE_TAG R1, tstring
STORE_POINTER R2, %0
STORE_TAG R2, tstring
RETURN R1, 1i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "TagStoreUpdatesSetUpval")
{
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(0), build.constDouble(0.5));
build.inst(IrCmd::SET_UPVALUE, build.vmUpvalue(0), build.vmReg(0), build.undef());
build.inst(IrCmd::RETURN, build.vmReg(0), build.constInt(0));
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
STORE_TAG R0, tnumber
STORE_DOUBLE R0, 0.5
SET_UPVALUE U0, R0, tnumber
RETURN R0, 0i
)");
}
TEST_SUITE_END();

View file

@ -14,7 +14,7 @@
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauStacklessTypeClone)
LUAU_FASTFLAG(LuauStacklessTypeClone2)
TEST_SUITE_BEGIN("ModuleTests");
@ -150,8 +150,8 @@ TEST_CASE_FIXTURE(Fixture, "deepClone_cyclic_table")
REQUIRE(methodReturnType);
CHECK_MESSAGE(methodReturnType == cloneTy, toString(methodType, {true}) << " should be pointer identical to " << toString(cloneTy, {true}));
CHECK_EQ(FFlag::LuauStacklessTypeClone ? 1 : 2, dest.typePacks.size()); // one for the function args, and another for its return type
CHECK_EQ(2, dest.types.size()); // One table and one function
CHECK_EQ(2, dest.typePacks.size()); // one for the function args, and another for its return type
CHECK_EQ(2, dest.types.size()); // One table and one function
}
TEST_CASE_FIXTURE(Fixture, "deepClone_cyclic_table_2")
@ -336,7 +336,7 @@ TEST_CASE_FIXTURE(Fixture, "clone_recursion_limit")
int limit = 400;
#endif
ScopedFastFlag sff{"LuauStacklessTypeClone", false};
ScopedFastFlag sff{"LuauStacklessTypeClone2", false};
ScopedFastInt luauTypeCloneRecursionLimit{"LuauTypeCloneRecursionLimit", limit};
TypeArena src;
@ -360,7 +360,7 @@ TEST_CASE_FIXTURE(Fixture, "clone_recursion_limit")
TEST_CASE_FIXTURE(Fixture, "clone_iteration_limit")
{
ScopedFastFlag sff{"LuauStacklessTypeClone", true};
ScopedFastFlag sff{"LuauStacklessTypeClone2", true};
ScopedFastInt sfi{"LuauTypeCloneIterationLimit", 500};
TypeArena src;
@ -501,4 +501,32 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_types_of_reexported_values")
CHECK(tableA->props["a"].type() == tableB->props["b"].type());
}
TEST_CASE_FIXTURE(BuiltinsFixture, "clone_table_bound_to_table_bound_to_table")
{
TypeArena arena;
TypeId a = arena.addType(TableType{TableState::Free, TypeLevel{}});
getMutable<TableType>(a)->name = "a";
TypeId b = arena.addType(TableType{TableState::Free, TypeLevel{}});
getMutable<TableType>(b)->name = "b";
TypeId c = arena.addType(TableType{TableState::Free, TypeLevel{}});
getMutable<TableType>(c)->name = "c";
getMutable<TableType>(a)->boundTo = b;
getMutable<TableType>(b)->boundTo = c;
TypeArena dest;
CloneState state{builtinTypes};
TypeId res = clone(a, dest, state);
REQUIRE(dest.types.size() == 1);
auto tableA = get<TableType>(res);
REQUIRE_MESSAGE(tableA, "Expected table, got " << res);
REQUIRE(tableA->name == "c");
REQUIRE(!tableA->boundTo);
}
TEST_SUITE_END();

352
tests/Subtyping.test.cpp Normal file
View file

@ -0,0 +1,352 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "doctest.h"
#include "Fixture.h"
#include "Luau/Subtyping.h"
using namespace Luau;
struct SubtypeFixture : Fixture
{
TypeArena arena;
InternalErrorReporter ice;
UnifierSharedState sharedState{&ice};
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
Subtyping subtyping{builtinTypes, NotNull{&normalizer}};
TypePackId pack(std::initializer_list<TypeId> tys)
{
return arena.addTypePack(tys);
}
TypePackId pack(std::initializer_list<TypeId> tys, TypePackVariant tail)
{
return arena.addTypePack(tys, arena.addTypePack(std::move(tail)));
}
TypeId fn(std::initializer_list<TypeId> args, std::initializer_list<TypeId> rets)
{
return arena.addType(FunctionType{pack(args), pack(rets)});
}
TypeId fn(std::initializer_list<TypeId> argHead, TypePackVariant argTail, std::initializer_list<TypeId> rets)
{
return arena.addType(FunctionType{pack(argHead, std::move(argTail)), pack(rets)});
}
TypeId fn(std::initializer_list<TypeId> args, std::initializer_list<TypeId> retHead, TypePackVariant retTail)
{
return arena.addType(FunctionType{pack(args), pack(retHead, std::move(retTail))});
}
TypeId fn(std::initializer_list<TypeId> argHead, TypePackVariant argTail, std::initializer_list<TypeId> retHead, TypePackVariant retTail)
{
return arena.addType(FunctionType{pack(argHead, std::move(argTail)), pack(retHead, std::move(retTail))});
}
SubtypingGraph isSubtype(TypeId subTy, TypeId superTy)
{
return subtyping.isSubtype(subTy, superTy);
}
TypeId helloType = arena.addType(SingletonType{StringSingleton{"hello"}});
TypeId helloType2 = arena.addType(SingletonType{StringSingleton{"hello"}});
TypeId worldType = arena.addType(SingletonType{StringSingleton{"world"}});
TypeId helloOrWorldType = arena.addType(UnionType{{helloType, worldType}});
TypeId trueOrFalseType = arena.addType(UnionType{{builtinTypes->trueType, builtinTypes->falseType}});
TypeId helloAndWorldType = arena.addType(IntersectionType{{helloType, worldType}});
TypeId booleanAndTrueType = arena.addType(IntersectionType{{builtinTypes->booleanType, builtinTypes->trueType}});
// (number) -> string
const TypeId numberToStringType = fn(
{builtinTypes->numberType},
{builtinTypes->stringType}
);
// (unknown) -> string
const TypeId unknownToStringType = fn(
{builtinTypes->unknownType},
{builtinTypes->stringType}
);
// (number) -> unknown
const TypeId numberToUnknownType = fn(
{builtinTypes->numberType},
{builtinTypes->unknownType}
);
// (number) -> (string, string)
const TypeId numberToTwoStringsType = fn(
{builtinTypes->numberType},
{builtinTypes->stringType, builtinTypes->stringType}
);
// (number) -> (string, unknown)
const TypeId numberToStringAndUnknownType = fn(
{builtinTypes->numberType},
{builtinTypes->stringType, builtinTypes->unknownType}
);
// (number, number) -> string
const TypeId numberNumberToStringType = fn(
{builtinTypes->numberType, builtinTypes->numberType},
{builtinTypes->stringType}
);
// (unknown, number) -> string
const TypeId unknownNumberToStringType = fn(
{builtinTypes->unknownType, builtinTypes->numberType},
{builtinTypes->stringType}
);
// (number, string) -> string
const TypeId numberAndStringToStringType = fn(
{builtinTypes->numberType, builtinTypes->stringType},
{builtinTypes->stringType}
);
// (number, ...string) -> string
const TypeId numberAndStringsToStringType = fn(
{builtinTypes->numberType}, VariadicTypePack{builtinTypes->stringType},
{builtinTypes->stringType}
);
// (number, ...string?) -> string
const TypeId numberAndOptionalStringsToStringType = fn(
{builtinTypes->numberType}, VariadicTypePack{builtinTypes->optionalStringType},
{builtinTypes->stringType}
);
};
#define CHECK_IS_SUBTYPE(left, right) \
do \
{ \
const auto& leftTy = (left); \
const auto& rightTy = (right); \
SubtypingGraph result = isSubtype(leftTy, rightTy); \
CHECK_MESSAGE(result.isSubtype, "Expected " << leftTy << " <: " << rightTy); \
} while (0)
#define CHECK_IS_NOT_SUBTYPE(left, right) \
do \
{ \
const auto& leftTy = (left); \
const auto& rightTy = (right); \
SubtypingGraph result = isSubtype(leftTy, rightTy); \
CHECK_MESSAGE(!result.isSubtype, "Expected " << leftTy << " </: " << rightTy); \
} while (0)
#define CHECK_IS_ERROR_SUPPRESSING(left, right) \
do \
{ \
const auto& leftTy = (left); \
const auto& rightTy = (right); \
SubtypingGraph result = isSubtype(leftTy, rightTy); \
CHECK_MESSAGE(result.isErrorSuppressing, "Expected " << leftTy << " to error-suppress " << rightTy); \
} while (0)
#define CHECK_IS_NOT_ERROR_SUPPRESSING(left, right) \
do \
{ \
const auto& leftTy = (left); \
const auto& rightTy = (right); \
SubtypingGraph result = isSubtype(leftTy, rightTy); \
CHECK_MESSAGE(!result.isErrorSuppressing, "Expected " << leftTy << " to error-suppress " << rightTy); \
} while (0)
TEST_SUITE_BEGIN("Subtyping");
// We would like to write </: to mean "is not a subtype," but rotest does not like that at all, so we instead use <!:
TEST_CASE_FIXTURE(SubtypeFixture, "number <: any")
{
CHECK_IS_SUBTYPE(builtinTypes->numberType, builtinTypes->anyType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "any <!: unknown")
{
SubtypingGraph result = isSubtype(builtinTypes->anyType, builtinTypes->unknownType);
CHECK(!result.isSubtype);
CHECK(result.isErrorSuppressing);
}
TEST_CASE_FIXTURE(SubtypeFixture, "number? <: unknown")
{
CHECK_IS_SUBTYPE(builtinTypes->optionalNumberType, builtinTypes->unknownType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "number <: unknown")
{
CHECK_IS_SUBTYPE(builtinTypes->numberType, builtinTypes->unknownType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "number <: number")
{
CHECK_IS_SUBTYPE(builtinTypes->numberType, builtinTypes->numberType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "number <!: string")
{
CHECK_IS_NOT_SUBTYPE(builtinTypes->numberType, builtinTypes->stringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "number <: number?")
{
CHECK_IS_SUBTYPE(builtinTypes->numberType, builtinTypes->optionalNumberType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" <: string")
{
CHECK_IS_SUBTYPE(helloType, builtinTypes->stringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "string <!: \"hello\"")
{
CHECK_IS_NOT_SUBTYPE(builtinTypes->stringType, helloType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" <: \"hello\"")
{
CHECK_IS_SUBTYPE(helloType, helloType2);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true <: boolean")
{
CHECK_IS_SUBTYPE(builtinTypes->trueType, builtinTypes->booleanType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true <: true | false")
{
CHECK_IS_SUBTYPE(builtinTypes->trueType, trueOrFalseType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true | false <!: true")
{
CHECK_IS_NOT_SUBTYPE(trueOrFalseType, builtinTypes->trueType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true | false <: boolean")
{
CHECK_IS_SUBTYPE(trueOrFalseType, builtinTypes->booleanType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true | false <: true | false")
{
CHECK_IS_SUBTYPE(trueOrFalseType, trueOrFalseType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" | \"world\" <: number")
{
CHECK_IS_NOT_SUBTYPE(helloOrWorldType, builtinTypes->numberType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "true <: boolean & true")
{
CHECK_IS_SUBTYPE(builtinTypes->trueType, booleanAndTrueType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "boolean & true <: true")
{
CHECK_IS_SUBTYPE(booleanAndTrueType, builtinTypes->trueType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "boolean & true <: boolean & true")
{
CHECK_IS_SUBTYPE(booleanAndTrueType, booleanAndTrueType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" & \"world\" <: number")
{
CHECK_IS_SUBTYPE(helloAndWorldType, builtinTypes->numberType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "false <!: boolean & true")
{
CHECK_IS_NOT_SUBTYPE(builtinTypes->falseType, booleanAndTrueType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(unknown) -> string <: (number) -> string")
{
CHECK_IS_SUBTYPE(unknownToStringType, numberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> string <!: (unknown) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberToStringType, unknownToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, number) -> string <!: (number) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberNumberToStringType, numberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> string <!: (number, number) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberToStringType, numberNumberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, number) -> string <!: (unknown, number) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberNumberToStringType, unknownNumberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(unknown, number) -> string <: (number, number) -> string")
{
CHECK_IS_SUBTYPE(unknownNumberToStringType, numberNumberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> (string, unknown) <!: (number) -> (string, string)")
{
CHECK_IS_NOT_SUBTYPE(numberToStringAndUnknownType, numberToTwoStringsType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> (string, string) <: (number) -> (string, unknown)")
{
CHECK_IS_SUBTYPE(numberToTwoStringsType, numberToStringAndUnknownType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> (string, string) <!: (number) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberToTwoStringsType, numberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> string <!: (number) -> (string, string)")
{
CHECK_IS_NOT_SUBTYPE(numberToStringType, numberToTwoStringsType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, ...string) -> string <: (number) -> string")
{
CHECK_IS_SUBTYPE(numberAndStringsToStringType, numberToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number) -> string <!: (number, ...string) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberToStringType, numberAndStringsToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, ...string?) -> string <: (number, ...string) -> string")
{
CHECK_IS_SUBTYPE(numberAndOptionalStringsToStringType, numberAndStringsToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, ...string) -> string <!: (number, ...string?) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberAndStringsToStringType, numberAndOptionalStringsToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, ...string) -> string <: (number, string) -> string")
{
CHECK_IS_SUBTYPE(numberAndStringsToStringType, numberAndStringToStringType);
}
TEST_CASE_FIXTURE(SubtypeFixture, "(number, string) -> string <!: (number, ...string) -> string")
{
CHECK_IS_NOT_SUBTYPE(numberAndStringToStringType, numberAndStringsToStringType);
}
TEST_SUITE_END();

View file

@ -1212,6 +1212,71 @@ f(function(x) return x * 2 end)
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "infer_generic_function_function_argument")
{
CheckResult result = check(R"(
local function sum<a>(x: a, y: a, f: (a, a) -> a) return f(x, y) end
return sum(2, 3, function(a, b) return a + b end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
result = check(R"(
local function map<a, b>(arr: {a}, f: (a) -> b) local r = {} for i,v in ipairs(arr) do table.insert(r, f(v)) end return r end
local a = {1, 2, 3}
local r = map(a, function(a) return a + a > 100 end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
REQUIRE_EQ("{boolean}", toString(requireType("r")));
check(R"(
local function foldl<a, b>(arr: {a}, init: b, f: (b, a) -> b) local r = init for i,v in ipairs(arr) do r = f(r, v) end return r end
local a = {1, 2, 3}
local r = foldl(a, {s=0,c=0}, function(a, b) return {s = a.s + b, c = a.c + 1} end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
REQUIRE_EQ("{ c: number, s: number }", toString(requireType("r")));
}
TEST_CASE_FIXTURE(Fixture, "infer_generic_function_function_argument_overloaded")
{
CheckResult result = check(R"(
local function g1<T>(a: T, f: (T) -> T) return f(a) end
local function g2<T>(a: T, b: T, f: (T, T) -> T) return f(a, b) end
local g12: typeof(g1) & typeof(g2)
g12(1, function(x) return x + x end)
g12(1, 2, function(x, y) return x + y end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
result = check(R"(
local function g1<T>(a: T, f: (T) -> T) return f(a) end
local function g2<T>(a: T, b: T, f: (T, T) -> T) return f(a, b) end
local g12: typeof(g1) & typeof(g2)
g12({x=1}, function(x) return {x=-x.x} end)
g12({x=1}, {x=2}, function(x, y) return {x=x.x + y.x} end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "infer_generic_lib_function_function_argument")
{
CheckResult result = check(R"(
local a = {{x=4}, {x=7}, {x=1}}
table.sort(a, function(x, y) return x.x < y.x end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "variadic_any_is_compatible_with_a_generic_TypePack")
{
CheckResult result = check(R"(

View file

@ -407,7 +407,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "cycle_between_object_constructor_and_alias")
TEST_CASE_FIXTURE(BuiltinsFixture, "promise_type_error_too_complex")
{
ScopedFastFlag sff{"LuauStacklessTypeClone", true};
ScopedFastFlag sff{"LuauStacklessTypeClone2", true};
frontend.options.retainFullTypeGraphs = false;

View file

@ -112,4 +112,22 @@ end
assert(pcall(fuzzfail10) == false)
local function fuzzfail11(x, ...)
return bit32.arshift(bit32.bnot(x),(...))
end
assert(fuzzfail11(0xffff0000, 8) == 0xff)
local function fuzzfail12()
_,_,_,_,_,_,_,_ = not _, not _, not _, not _, not _, not _, not _, not _
end
assert(pcall(fuzzfail12) == true)
local function fuzzfail13()
_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ = not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _, not _
end
assert(pcall(fuzzfail13) == true)
return('OK')

View file

@ -12,4 +12,30 @@ pcall(function()
testtable.missingmethod()
end)
--
local testtable2 = {}
setmetatable(testtable2, { __index = function() pcall(function() error("Error") end) end })
local m2 = testtable2.missingmethod
pcall(function()
testtable2.missingmethod()
end)
--
local testtable3 = {}
setmetatable(testtable3, { __index = function() pcall(error, "Error") end })
local m3 = testtable3.missingmethod
pcall(function()
testtable3.missingmethod()
end)
--
local testtable4 = {}
setmetatable(testtable4, { __index = function() pcall(error) end })
local m4 = testtable4.missingmember
return('OK')

View file

@ -254,6 +254,9 @@ TypeInferFunctions.higher_order_function_2
TypeInferFunctions.higher_order_function_4
TypeInferFunctions.improved_function_arg_mismatch_errors
TypeInferFunctions.infer_anonymous_function_arguments
TypeInferFunctions.infer_generic_function_function_argument
TypeInferFunctions.infer_generic_function_function_argument_overloaded
TypeInferFunctions.infer_generic_lib_function_function_argument
TypeInferFunctions.infer_anonymous_function_arguments_outside_call
TypeInferFunctions.infer_that_function_does_not_return_a_table
TypeInferFunctions.luau_subtyping_is_np_hard