mirror of
https://github.com/luau-lang/luau.git
synced 2024-12-12 13:00:38 +00:00
Sync to upstream/release/593
This commit is contained in:
parent
a811050505
commit
397dbb1188
91 changed files with 1619 additions and 382 deletions
|
@ -19,6 +19,7 @@ static const std::unordered_map<AstExprBinary::Op, const char*> kBinaryOpMetamet
|
|||
{AstExprBinary::Op::Sub, "__sub"},
|
||||
{AstExprBinary::Op::Mul, "__mul"},
|
||||
{AstExprBinary::Op::Div, "__div"},
|
||||
{AstExprBinary::Op::FloorDiv, "__idiv"},
|
||||
{AstExprBinary::Op::Pow, "__pow"},
|
||||
{AstExprBinary::Op::Mod, "__mod"},
|
||||
{AstExprBinary::Op::Concat, "__concat"},
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/UnifierSharedState.h"
|
||||
|
||||
#include <vector>
|
||||
|
@ -14,8 +15,10 @@ template<typename A, typename B>
|
|||
struct TryPair;
|
||||
struct InternalErrorReporter;
|
||||
|
||||
class TypeIds;
|
||||
class Normalizer;
|
||||
struct NormalizedType;
|
||||
struct NormalizedClassType;
|
||||
|
||||
struct SubtypingResult
|
||||
{
|
||||
|
@ -30,6 +33,8 @@ struct SubtypingResult
|
|||
void andAlso(const SubtypingResult& other);
|
||||
void orElse(const SubtypingResult& other);
|
||||
|
||||
// Only negates the `isSubtype`.
|
||||
static SubtypingResult negate(const SubtypingResult& result);
|
||||
static SubtypingResult all(const std::vector<SubtypingResult>& results);
|
||||
static SubtypingResult any(const std::vector<SubtypingResult>& results);
|
||||
};
|
||||
|
@ -63,7 +68,7 @@ struct Subtyping
|
|||
DenseHashMap<TypePackId, TypePackId> mappedGenericPacks{nullptr};
|
||||
|
||||
using SeenSet = std::unordered_set<std::pair<TypeId, TypeId>, TypeIdPairHash>;
|
||||
|
||||
|
||||
SeenSet seenTypes;
|
||||
|
||||
// TODO cache
|
||||
|
@ -88,8 +93,19 @@ private:
|
|||
SubtypingResult isSubtype_(const SingletonType* subSingleton, const PrimitiveType* superPrim);
|
||||
SubtypingResult isSubtype_(const SingletonType* subSingleton, const SingletonType* superSingleton);
|
||||
SubtypingResult isSubtype_(const TableType* subTable, const TableType* superTable);
|
||||
SubtypingResult isSubtype_(const MetatableType* subMt, const MetatableType* superMt);
|
||||
SubtypingResult isSubtype_(const MetatableType* subMt, const TableType* superTable);
|
||||
SubtypingResult isSubtype_(const ClassType* subClass, const ClassType* superClass);
|
||||
SubtypingResult isSubtype_(const ClassType* subClass, const TableType* superTable); // Actually a class <: shape.
|
||||
SubtypingResult isSubtype_(const FunctionType* subFunction, const FunctionType* superFunction);
|
||||
SubtypingResult isSubtype_(const PrimitiveType* subPrim, const TableType* superTable);
|
||||
SubtypingResult isSubtype_(const SingletonType* subSingleton, const TableType* superTable);
|
||||
|
||||
SubtypingResult isSubtype_(const NormalizedType* subNorm, const NormalizedType* superNorm);
|
||||
SubtypingResult isSubtype_(const NormalizedClassType& subClass, const NormalizedClassType& superClass, const TypeIds& superTables);
|
||||
SubtypingResult isSubtype_(const TypeIds& subTypes, const TypeIds& superTypes);
|
||||
|
||||
SubtypingResult isSubtype_(const VariadicTypePack* subVariadic, const VariadicTypePack* superVariadic);
|
||||
|
||||
bool bindGeneric(TypeId subTp, TypeId superTp);
|
||||
bool bindGeneric(TypePackId subTp, TypePackId superTp);
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
#include <math.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
|
@ -514,6 +516,9 @@ struct AstJsonEncoder : public AstVisitor
|
|||
return writeString("Mul");
|
||||
case AstExprBinary::Div:
|
||||
return writeString("Div");
|
||||
case AstExprBinary::FloorDiv:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
return writeString("FloorDiv");
|
||||
case AstExprBinary::Mod:
|
||||
return writeString("Mod");
|
||||
case AstExprBinary::Pow:
|
||||
|
@ -536,6 +541,8 @@ struct AstJsonEncoder : public AstVisitor
|
|||
return writeString("And");
|
||||
case AstExprBinary::Or:
|
||||
return writeString("Or");
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown Op");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
LUAU_FASTFLAGVARIABLE(LuauAnonymousAutofilled1, false);
|
||||
LUAU_FASTFLAGVARIABLE(LuauAutocompleteLastTypecheck, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauAutocompleteDoEnd, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauAutocompleteStringLiteralBounds, false);
|
||||
|
||||
static const std::unordered_set<std::string> kStatementStartingKeywords = {
|
||||
|
@ -1089,14 +1090,19 @@ static AutocompleteEntryMap autocompleteStatement(
|
|||
{
|
||||
if (AstStatForIn* statForIn = (*it)->as<AstStatForIn>(); statForIn && !statForIn->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
if (AstStatFor* statFor = (*it)->as<AstStatFor>(); statFor && !statFor->hasEnd)
|
||||
else if (AstStatFor* statFor = (*it)->as<AstStatFor>(); statFor && !statFor->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
if (AstStatIf* statIf = (*it)->as<AstStatIf>(); statIf && !statIf->hasEnd)
|
||||
else if (AstStatIf* statIf = (*it)->as<AstStatIf>(); statIf && !statIf->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
if (AstStatWhile* statWhile = (*it)->as<AstStatWhile>(); statWhile && !statWhile->hasEnd)
|
||||
else if (AstStatWhile* statWhile = (*it)->as<AstStatWhile>(); statWhile && !statWhile->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
if (AstExprFunction* exprFunction = (*it)->as<AstExprFunction>(); exprFunction && !exprFunction->hasEnd)
|
||||
else if (AstExprFunction* exprFunction = (*it)->as<AstExprFunction>(); exprFunction && !exprFunction->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
if (FFlag::LuauAutocompleteDoEnd)
|
||||
{
|
||||
if (AstStatBlock* exprBlock = (*it)->as<AstStatBlock>(); exprBlock && !exprBlock->hasEnd)
|
||||
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
|
||||
}
|
||||
}
|
||||
|
||||
if (ancestry.size() >= 2)
|
||||
|
|
|
@ -24,6 +24,7 @@ LUAU_FASTINT(LuauCheckRecursionLimit);
|
|||
LUAU_FASTFLAG(DebugLuauLogSolverToJson);
|
||||
LUAU_FASTFLAG(DebugLuauMagicTypes);
|
||||
LUAU_FASTFLAG(LuauParseDeclareClassIndexer);
|
||||
LUAU_FASTFLAG(LuauFloorDivision);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -1170,7 +1171,8 @@ static bool isMetamethod(const Name& name)
|
|||
{
|
||||
return name == "__index" || name == "__newindex" || name == "__call" || name == "__concat" || name == "__unm" || name == "__add" ||
|
||||
name == "__sub" || name == "__mul" || name == "__div" || name == "__mod" || name == "__pow" || name == "__tostring" ||
|
||||
name == "__metatable" || name == "__eq" || name == "__lt" || name == "__le" || name == "__mode" || name == "__iter" || name == "__len";
|
||||
name == "__metatable" || name == "__eq" || name == "__lt" || name == "__le" || name == "__mode" || name == "__iter" || name == "__len" ||
|
||||
(FFlag::LuauFloorDivision && name == "__idiv");
|
||||
}
|
||||
|
||||
ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatDeclareClass* declaredClass)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "Luau/VisitType.h"
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
|
||||
LUAU_FASTFLAG(LuauFloorDivision);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -719,6 +720,8 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
// Metatables go first, even if there is primitive behavior.
|
||||
if (auto it = kBinaryOpMetamethods.find(c.op); it != kBinaryOpMetamethods.end())
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || c.op != AstExprBinary::Op::FloorDiv);
|
||||
|
||||
// Metatables are not the same. The metamethod will not be invoked.
|
||||
if ((c.op == AstExprBinary::Op::CompareEq || c.op == AstExprBinary::Op::CompareNe) &&
|
||||
getMetatable(leftType, builtinTypes) != getMetatable(rightType, builtinTypes))
|
||||
|
@ -806,9 +809,12 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
|||
case AstExprBinary::Op::Sub:
|
||||
case AstExprBinary::Op::Mul:
|
||||
case AstExprBinary::Op::Div:
|
||||
case AstExprBinary::Op::FloorDiv:
|
||||
case AstExprBinary::Op::Pow:
|
||||
case AstExprBinary::Op::Mod:
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || c.op != AstExprBinary::Op::FloorDiv);
|
||||
|
||||
const NormalizedType* normLeftTy = normalizer->normalize(leftType);
|
||||
if (hasTypeInIntersection<FreeType>(leftType) && force)
|
||||
asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->numberType);
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <stdexcept>
|
||||
#include <type_traits>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauIndentTypeMismatch, false)
|
||||
LUAU_FASTINTVARIABLE(LuauIndentTypeMismatchMaxTypeLength, 10)
|
||||
|
||||
static std::string wrongNumberOfArgsString(
|
||||
|
@ -94,31 +93,18 @@ struct ErrorConverter
|
|||
{
|
||||
std::string givenModuleName = fileResolver->getHumanReadableModuleName(*givenDefinitionModule);
|
||||
std::string wantedModuleName = fileResolver->getHumanReadableModuleName(*wantedDefinitionModule);
|
||||
if (FFlag::LuauIndentTypeMismatch)
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, givenModuleName, wantedModuleName);
|
||||
else
|
||||
result = "Type '" + givenTypeName + "' from '" + givenModuleName + "' could not be converted into '" + wantedTypeName +
|
||||
"' from '" + wantedModuleName + "'";
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, givenModuleName, wantedModuleName);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (FFlag::LuauIndentTypeMismatch)
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, *givenDefinitionModule, *wantedDefinitionModule);
|
||||
else
|
||||
result = "Type '" + givenTypeName + "' from '" + *givenDefinitionModule + "' could not be converted into '" +
|
||||
wantedTypeName + "' from '" + *wantedDefinitionModule + "'";
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, *givenDefinitionModule, *wantedDefinitionModule);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result.empty())
|
||||
{
|
||||
if (FFlag::LuauIndentTypeMismatch)
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, std::nullopt, std::nullopt);
|
||||
else
|
||||
result = "Type '" + givenTypeName + "' could not be converted into '" + wantedTypeName + "'";
|
||||
}
|
||||
result = constructErrorMessage(givenTypeName, wantedTypeName, std::nullopt, std::nullopt);
|
||||
|
||||
|
||||
if (tm.error)
|
||||
|
@ -126,7 +112,7 @@ struct ErrorConverter
|
|||
result += "\ncaused by:\n ";
|
||||
|
||||
if (!tm.reason.empty())
|
||||
result += tm.reason + (FFlag::LuauIndentTypeMismatch ? " \n" : " ");
|
||||
result += tm.reason + " \n";
|
||||
|
||||
result += Luau::toString(*tm.error, TypeErrorToStringOptions{fileResolver});
|
||||
}
|
||||
|
|
|
@ -58,6 +58,15 @@ void SubtypingResult::orElse(const SubtypingResult& other)
|
|||
normalizationTooComplex |= other.normalizationTooComplex;
|
||||
}
|
||||
|
||||
SubtypingResult SubtypingResult::negate(const SubtypingResult& result)
|
||||
{
|
||||
return SubtypingResult{
|
||||
!result.isSubtype,
|
||||
result.isErrorSuppressing,
|
||||
result.normalizationTooComplex,
|
||||
};
|
||||
}
|
||||
|
||||
SubtypingResult SubtypingResult::all(const std::vector<SubtypingResult>& results)
|
||||
{
|
||||
SubtypingResult acc{true, false};
|
||||
|
@ -137,10 +146,10 @@ SubtypingResult Subtyping::isSubtype_(TypeId subTy, TypeId superTy)
|
|||
|
||||
SeenSetPopper ssp{&seenTypes, typePair};
|
||||
|
||||
if (auto superUnion = get<UnionType>(superTy))
|
||||
return isSubtype_(subTy, superUnion);
|
||||
else if (auto subUnion = get<UnionType>(subTy))
|
||||
if (auto subUnion = get<UnionType>(subTy))
|
||||
return isSubtype_(subUnion, superTy);
|
||||
else if (auto superUnion = get<UnionType>(superTy))
|
||||
return isSubtype_(subTy, superUnion);
|
||||
else if (auto superIntersection = get<IntersectionType>(superTy))
|
||||
return isSubtype_(subTy, superIntersection);
|
||||
else if (auto subIntersection = get<IntersectionType>(subTy))
|
||||
|
@ -196,6 +205,18 @@ SubtypingResult Subtyping::isSubtype_(TypeId subTy, TypeId superTy)
|
|||
return isSubtype_(p);
|
||||
else if (auto p = get2<TableType, TableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<MetatableType, MetatableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<MetatableType, TableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<ClassType, ClassType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<ClassType, TableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<PrimitiveType, TableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
else if (auto p = get2<SingletonType, TableType>(subTy, superTy))
|
||||
return isSubtype_(p);
|
||||
|
||||
return {false};
|
||||
}
|
||||
|
@ -323,7 +344,7 @@ SubtypingResult Subtyping::isSubtype_(TypePackId subTp, TypePackId superTp)
|
|||
{
|
||||
if (auto p = get2<VariadicTypePack, VariadicTypePack>(*subTail, *superTail))
|
||||
{
|
||||
results.push_back(isSubtype_(p.first->ty, p.second->ty));
|
||||
results.push_back(isSubtype_(p));
|
||||
}
|
||||
else if (auto p = get2<GenericTypePack, GenericTypePack>(*subTail, *superTail))
|
||||
{
|
||||
|
@ -472,7 +493,6 @@ SubtypingResult Subtyping::isSubtype_(TypeId subTy, const IntersectionType* supe
|
|||
|
||||
SubtypingResult Subtyping::isSubtype_(const IntersectionType* subIntersection, TypeId superTy)
|
||||
{
|
||||
// TODO: Semantic subtyping here.
|
||||
// As per TAPL: A & B <: T iff A <: T || B <: T
|
||||
std::vector<SubtypingResult> subtypings;
|
||||
for (TypeId ty : subIntersection)
|
||||
|
@ -520,6 +540,59 @@ SubtypingResult Subtyping::isSubtype_(const TableType* subTable, const TableType
|
|||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const MetatableType* subMt, const MetatableType* superMt)
|
||||
{
|
||||
return SubtypingResult::all({
|
||||
isSubtype_(subMt->table, superMt->table),
|
||||
isSubtype_(subMt->metatable, superMt->metatable),
|
||||
});
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const MetatableType* subMt, const TableType* superTable)
|
||||
{
|
||||
if (auto subTable = get<TableType>(subMt->table)) {
|
||||
// Metatables cannot erase properties from the table they're attached to, so
|
||||
// the subtyping rule for this is just if the table component is a subtype
|
||||
// of the supertype table.
|
||||
//
|
||||
// There's a flaw here in that if the __index metamethod contributes a new
|
||||
// field that would satisfy the subtyping relationship, we'll erronously say
|
||||
// that the metatable isn't a subtype of the table, even though they have
|
||||
// compatible properties/shapes. We'll revisit this later when we have a
|
||||
// better understanding of how important this is.
|
||||
return isSubtype_(subTable, superTable);
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: This may be a case we actually hit?
|
||||
return {false};
|
||||
}
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const ClassType* subClass, const ClassType* superClass)
|
||||
{
|
||||
return {isSubclass(subClass, superClass)};
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const ClassType* subClass, const TableType* superTable)
|
||||
{
|
||||
SubtypingResult result{true};
|
||||
|
||||
for (const auto& [name, prop]: superTable->props)
|
||||
{
|
||||
if (auto classProp = lookupClassProp(subClass, name))
|
||||
{
|
||||
// Table properties are invariant
|
||||
result.andAlso(isSubtype_(classProp->type(), prop.type()));
|
||||
result.andAlso(isSubtype_(prop.type(), classProp->type()));
|
||||
}
|
||||
else
|
||||
return SubtypingResult{false};
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const FunctionType* subFunction, const FunctionType* superFunction)
|
||||
{
|
||||
SubtypingResult result;
|
||||
|
@ -533,6 +606,47 @@ SubtypingResult Subtyping::isSubtype_(const FunctionType* subFunction, const Fun
|
|||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const PrimitiveType* subPrim, const TableType* superTable)
|
||||
{
|
||||
SubtypingResult result{false};
|
||||
if (subPrim->type == PrimitiveType::String)
|
||||
{
|
||||
if (auto metatable = getMetatable(builtinTypes->stringType, builtinTypes))
|
||||
{
|
||||
if (auto mttv = get<TableType>(follow(metatable)))
|
||||
{
|
||||
if (auto it = mttv->props.find("__index"); it != mttv->props.end())
|
||||
{
|
||||
if (auto stringTable = get<TableType>(it->second.type()))
|
||||
result.orElse(isSubtype_(stringTable, superTable));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const SingletonType* subSingleton, const TableType* superTable)
|
||||
{
|
||||
SubtypingResult result{false};
|
||||
if (auto stringleton = get<StringSingleton>(subSingleton))
|
||||
{
|
||||
if (auto metatable = getMetatable(builtinTypes->stringType, builtinTypes))
|
||||
{
|
||||
if (auto mttv = get<TableType>(follow(metatable)))
|
||||
{
|
||||
if (auto it = mttv->props.find("__index"); it != mttv->props.end())
|
||||
{
|
||||
if (auto stringTable = get<TableType>(it->second.type()))
|
||||
result.orElse(isSubtype_(stringTable, superTable));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const NormalizedType* subNorm, const NormalizedType* superNorm)
|
||||
{
|
||||
if (!subNorm || !superNorm)
|
||||
|
@ -540,15 +654,14 @@ SubtypingResult Subtyping::isSubtype_(const NormalizedType* subNorm, const Norma
|
|||
|
||||
SubtypingResult result = isSubtype_(subNorm->tops, superNorm->tops);
|
||||
result.andAlso(isSubtype_(subNorm->booleans, superNorm->booleans));
|
||||
// isSubtype_(subNorm->classes, superNorm->classes);
|
||||
// isSubtype_(subNorm->classes, superNorm->tables);
|
||||
result.andAlso(isSubtype_(subNorm->classes, superNorm->classes, superNorm->tables));
|
||||
result.andAlso(isSubtype_(subNorm->errors, superNorm->errors));
|
||||
result.andAlso(isSubtype_(subNorm->nils, superNorm->nils));
|
||||
result.andAlso(isSubtype_(subNorm->numbers, superNorm->numbers));
|
||||
result.isSubtype &= Luau::isSubtype(subNorm->strings, superNorm->strings);
|
||||
// isSubtype_(subNorm->strings, superNorm->tables);
|
||||
result.andAlso(isSubtype_(subNorm->threads, superNorm->threads));
|
||||
// isSubtype_(subNorm->tables, superNorm->tables);
|
||||
result.andAlso(isSubtype_(subNorm->tables, superNorm->tables));
|
||||
// isSubtype_(subNorm->tables, superNorm->strings);
|
||||
// isSubtype_(subNorm->tables, superNorm->classes);
|
||||
// isSubtype_(subNorm->functions, superNorm->functions);
|
||||
|
@ -557,6 +670,58 @@ SubtypingResult Subtyping::isSubtype_(const NormalizedType* subNorm, const Norma
|
|||
return result;
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const NormalizedClassType& subClass, const NormalizedClassType& superClass, const TypeIds& superTables)
|
||||
{
|
||||
for (const auto& [subClassTy, _] : subClass.classes)
|
||||
{
|
||||
SubtypingResult result;
|
||||
|
||||
for (const auto& [superClassTy, superNegations] : superClass.classes)
|
||||
{
|
||||
result.orElse(isSubtype_(subClassTy, superClassTy));
|
||||
if (!result.isSubtype)
|
||||
continue;
|
||||
|
||||
for (TypeId negation : superNegations)
|
||||
{
|
||||
result.andAlso(SubtypingResult::negate(isSubtype_(subClassTy, negation)));
|
||||
if (result.isSubtype)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result.isSubtype)
|
||||
continue;
|
||||
|
||||
for (TypeId superTableTy : superTables)
|
||||
result.orElse(isSubtype_(subClassTy, superTableTy));
|
||||
|
||||
if (!result.isSubtype)
|
||||
return result;
|
||||
}
|
||||
|
||||
return {true};
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const TypeIds& subTypes, const TypeIds& superTypes)
|
||||
{
|
||||
std::vector<SubtypingResult> results;
|
||||
|
||||
for (TypeId subTy : subTypes)
|
||||
{
|
||||
results.emplace_back();
|
||||
for (TypeId superTy : superTypes)
|
||||
results.back().orElse(isSubtype_(subTy, superTy));
|
||||
}
|
||||
|
||||
return SubtypingResult::all(results);
|
||||
}
|
||||
|
||||
SubtypingResult Subtyping::isSubtype_(const VariadicTypePack* subVariadic, const VariadicTypePack* superVariadic)
|
||||
{
|
||||
return isSubtype_(subVariadic->ty, superVariadic->ty);
|
||||
}
|
||||
|
||||
bool Subtyping::bindGeneric(TypeId subTy, TypeId superTy)
|
||||
{
|
||||
if (variance == Variance::Covariant)
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include <limits>
|
||||
#include <math.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace
|
||||
{
|
||||
bool isIdentifierStartChar(char c)
|
||||
|
@ -467,10 +469,13 @@ struct Printer
|
|||
case AstExprBinary::Sub:
|
||||
case AstExprBinary::Mul:
|
||||
case AstExprBinary::Div:
|
||||
case AstExprBinary::FloorDiv:
|
||||
case AstExprBinary::Mod:
|
||||
case AstExprBinary::Pow:
|
||||
case AstExprBinary::CompareLt:
|
||||
case AstExprBinary::CompareGt:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || a->op != AstExprBinary::FloorDiv);
|
||||
|
||||
writer.maybeSpace(a->right->location.begin, 2);
|
||||
writer.symbol(toString(a->op));
|
||||
break;
|
||||
|
@ -487,6 +492,8 @@ struct Printer
|
|||
writer.maybeSpace(a->right->location.begin, 4);
|
||||
writer.keyword(toString(a->op));
|
||||
break;
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown Op");
|
||||
}
|
||||
|
||||
visualize(*a->right);
|
||||
|
@ -753,6 +760,12 @@ struct Printer
|
|||
writer.maybeSpace(a->value->location.begin, 2);
|
||||
writer.symbol("/=");
|
||||
break;
|
||||
case AstExprBinary::FloorDiv:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
writer.maybeSpace(a->value->location.begin, 2);
|
||||
writer.symbol("//=");
|
||||
break;
|
||||
case AstExprBinary::Mod:
|
||||
writer.maybeSpace(a->value->location.begin, 2);
|
||||
writer.symbol("%=");
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <algorithm>
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauMagicTypes)
|
||||
LUAU_FASTFLAG(LuauFloorDivision);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -1817,6 +1818,8 @@ struct TypeChecker2
|
|||
bool typesHaveIntersection = normalizer.isIntersectionInhabited(leftType, rightType);
|
||||
if (auto it = kBinaryOpMetamethods.find(expr->op); it != kBinaryOpMetamethods.end())
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || expr->op != AstExprBinary::Op::FloorDiv);
|
||||
|
||||
std::optional<TypeId> leftMt = getMetatable(leftType, builtinTypes);
|
||||
std::optional<TypeId> rightMt = getMetatable(rightType, builtinTypes);
|
||||
bool matches = leftMt == rightMt;
|
||||
|
@ -2002,8 +2005,11 @@ struct TypeChecker2
|
|||
case AstExprBinary::Op::Sub:
|
||||
case AstExprBinary::Op::Mul:
|
||||
case AstExprBinary::Op::Div:
|
||||
case AstExprBinary::Op::FloorDiv:
|
||||
case AstExprBinary::Op::Pow:
|
||||
case AstExprBinary::Op::Mod:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || expr->op != AstExprBinary::Op::FloorDiv);
|
||||
|
||||
reportErrors(tryUnify(scope, expr->left->location, leftType, builtinTypes->numberType));
|
||||
reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->numberType));
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure)
|
|||
LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauAlwaysCommitInferencesOfFunctionCalls, false)
|
||||
LUAU_FASTFLAG(LuauParseDeclareClassIndexer)
|
||||
LUAU_FASTFLAG(LuauFloorDivision);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -200,7 +201,8 @@ static bool isMetamethod(const Name& name)
|
|||
{
|
||||
return name == "__index" || name == "__newindex" || name == "__call" || name == "__concat" || name == "__unm" || name == "__add" ||
|
||||
name == "__sub" || name == "__mul" || name == "__div" || name == "__mod" || name == "__pow" || name == "__tostring" ||
|
||||
name == "__metatable" || name == "__eq" || name == "__lt" || name == "__le" || name == "__mode" || name == "__iter" || name == "__len";
|
||||
name == "__metatable" || name == "__eq" || name == "__lt" || name == "__le" || name == "__mode" || name == "__iter" || name == "__len" ||
|
||||
(FFlag::LuauFloorDivision && name == "__idiv");
|
||||
}
|
||||
|
||||
size_t HashBoolNamePair::operator()(const std::pair<bool, Name>& pair) const
|
||||
|
@ -2571,6 +2573,9 @@ std::string opToMetaTableEntry(const AstExprBinary::Op& op)
|
|||
return "__mul";
|
||||
case AstExprBinary::Div:
|
||||
return "__div";
|
||||
case AstExprBinary::FloorDiv:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
return "__idiv";
|
||||
case AstExprBinary::Mod:
|
||||
return "__mod";
|
||||
case AstExprBinary::Pow:
|
||||
|
@ -3064,8 +3069,11 @@ TypeId TypeChecker::checkBinaryOperation(
|
|||
case AstExprBinary::Sub:
|
||||
case AstExprBinary::Mul:
|
||||
case AstExprBinary::Div:
|
||||
case AstExprBinary::FloorDiv:
|
||||
case AstExprBinary::Mod:
|
||||
case AstExprBinary::Pow:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || expr.op != AstExprBinary::FloorDiv);
|
||||
|
||||
reportErrors(tryUnify(lhsType, numberType, scope, expr.left->location));
|
||||
reportErrors(tryUnify(rhsType, numberType, scope, expr.right->location));
|
||||
return numberType;
|
||||
|
|
|
@ -605,6 +605,10 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
|
|||
{
|
||||
// TODO: there are probably cheaper ways to check if any <: T.
|
||||
const NormalizedType* superNorm = normalizer->normalize(superTy);
|
||||
|
||||
if (!superNorm)
|
||||
return reportError(location, UnificationTooComplex{});
|
||||
|
||||
if (!log.get<AnyType>(superNorm->tops))
|
||||
failure = true;
|
||||
}
|
||||
|
|
|
@ -457,6 +457,7 @@ public:
|
|||
Sub,
|
||||
Mul,
|
||||
Div,
|
||||
FloorDiv,
|
||||
Mod,
|
||||
Pow,
|
||||
Concat,
|
||||
|
@ -467,7 +468,9 @@ public:
|
|||
CompareGt,
|
||||
CompareGe,
|
||||
And,
|
||||
Or
|
||||
Or,
|
||||
|
||||
Op__Count
|
||||
};
|
||||
|
||||
AstExprBinary(const Location& location, Op op, AstExpr* left, AstExpr* right);
|
||||
|
@ -531,11 +534,12 @@ class AstStatBlock : public AstStat
|
|||
public:
|
||||
LUAU_RTTI(AstStatBlock)
|
||||
|
||||
AstStatBlock(const Location& location, const AstArray<AstStat*>& body);
|
||||
AstStatBlock(const Location& location, const AstArray<AstStat*>& body, bool hasEnd=true);
|
||||
|
||||
void visit(AstVisitor* visitor) override;
|
||||
|
||||
AstArray<AstStat*> body;
|
||||
bool hasEnd = false;
|
||||
};
|
||||
|
||||
class AstStatIf : public AstStat
|
||||
|
|
|
@ -62,6 +62,7 @@ struct Lexeme
|
|||
Dot3,
|
||||
SkinnyArrow,
|
||||
DoubleColon,
|
||||
FloorDiv,
|
||||
|
||||
InterpStringBegin,
|
||||
InterpStringMid,
|
||||
|
@ -73,6 +74,7 @@ struct Lexeme
|
|||
SubAssign,
|
||||
MulAssign,
|
||||
DivAssign,
|
||||
FloorDivAssign,
|
||||
ModAssign,
|
||||
PowAssign,
|
||||
ConcatAssign,
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include "Luau/Common.h"
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
|
@ -279,6 +281,9 @@ std::string toString(AstExprBinary::Op op)
|
|||
return "*";
|
||||
case AstExprBinary::Div:
|
||||
return "/";
|
||||
case AstExprBinary::FloorDiv:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
return "//";
|
||||
case AstExprBinary::Mod:
|
||||
return "%";
|
||||
case AstExprBinary::Pow:
|
||||
|
@ -375,9 +380,10 @@ void AstExprError::visit(AstVisitor* visitor)
|
|||
}
|
||||
}
|
||||
|
||||
AstStatBlock::AstStatBlock(const Location& location, const AstArray<AstStat*>& body)
|
||||
AstStatBlock::AstStatBlock(const Location& location, const AstArray<AstStat*>& body, bool hasEnd)
|
||||
: AstStat(ClassIndex(), location)
|
||||
, body(body)
|
||||
, hasEnd(hasEnd)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
|
||||
#include <limits.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauFloorDivision, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauLexerConsumeFast, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauLexerLookaheadRemembersBraceType, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -138,6 +140,9 @@ std::string Lexeme::toString() const
|
|||
case DoubleColon:
|
||||
return "'::'";
|
||||
|
||||
case FloorDiv:
|
||||
return FFlag::LuauFloorDivision ? "'//'" : "<unknown>";
|
||||
|
||||
case AddAssign:
|
||||
return "'+='";
|
||||
|
||||
|
@ -150,6 +155,9 @@ std::string Lexeme::toString() const
|
|||
case DivAssign:
|
||||
return "'/='";
|
||||
|
||||
case FloorDivAssign:
|
||||
return FFlag::LuauFloorDivision ? "'//='" : "<unknown>";
|
||||
|
||||
case ModAssign:
|
||||
return "'%='";
|
||||
|
||||
|
@ -402,6 +410,8 @@ Lexeme Lexer::lookahead()
|
|||
unsigned int currentLineOffset = lineOffset;
|
||||
Lexeme currentLexeme = lexeme;
|
||||
Location currentPrevLocation = prevLocation;
|
||||
size_t currentBraceStackSize = braceStack.size();
|
||||
BraceType currentBraceType = braceStack.empty() ? BraceType::Normal : braceStack.back();
|
||||
|
||||
Lexeme result = next();
|
||||
|
||||
|
@ -410,6 +420,13 @@ Lexeme Lexer::lookahead()
|
|||
lineOffset = currentLineOffset;
|
||||
lexeme = currentLexeme;
|
||||
prevLocation = currentPrevLocation;
|
||||
if (FFlag::LuauLexerLookaheadRemembersBraceType)
|
||||
{
|
||||
if (braceStack.size() < currentBraceStackSize)
|
||||
braceStack.push_back(currentBraceType);
|
||||
else if (braceStack.size() > currentBraceStackSize)
|
||||
braceStack.pop_back();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -901,15 +918,46 @@ Lexeme Lexer::readNext()
|
|||
return Lexeme(Location(start, 1), '+');
|
||||
|
||||
case '/':
|
||||
consume();
|
||||
|
||||
if (peekch() == '=')
|
||||
{
|
||||
if (FFlag::LuauFloorDivision)
|
||||
{
|
||||
consume();
|
||||
return Lexeme(Location(start, 2), Lexeme::DivAssign);
|
||||
|
||||
char ch = peekch();
|
||||
|
||||
if (ch == '=')
|
||||
{
|
||||
consume();
|
||||
return Lexeme(Location(start, 2), Lexeme::DivAssign);
|
||||
}
|
||||
else if (ch == '/')
|
||||
{
|
||||
consume();
|
||||
|
||||
if (peekch() == '=')
|
||||
{
|
||||
consume();
|
||||
return Lexeme(Location(start, 3), Lexeme::FloorDivAssign);
|
||||
}
|
||||
else
|
||||
return Lexeme(Location(start, 2), Lexeme::FloorDiv);
|
||||
}
|
||||
else
|
||||
return Lexeme(Location(start, 1), '/');
|
||||
}
|
||||
else
|
||||
return Lexeme(Location(start, 1), '/');
|
||||
{
|
||||
consume();
|
||||
|
||||
if (peekch() == '=')
|
||||
{
|
||||
consume();
|
||||
return Lexeme(Location(start, 2), Lexeme::DivAssign);
|
||||
}
|
||||
else
|
||||
return Lexeme(Location(start, 1), '/');
|
||||
}
|
||||
}
|
||||
|
||||
case '*':
|
||||
consume();
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
LUAU_FASTINTVARIABLE(LuauRecursionLimit, 1000)
|
||||
LUAU_FASTINTVARIABLE(LuauParseErrorLimit, 100)
|
||||
LUAU_FASTFLAGVARIABLE(LuauParseDeclareClassIndexer, false)
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -460,11 +461,11 @@ AstStat* Parser::parseDo()
|
|||
Lexeme matchDo = lexer.current();
|
||||
nextLexeme(); // do
|
||||
|
||||
AstStat* body = parseBlock();
|
||||
AstStatBlock* body = parseBlock();
|
||||
|
||||
body->location.begin = start.begin;
|
||||
|
||||
expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
|
||||
body->hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
|
||||
|
||||
return body;
|
||||
}
|
||||
|
@ -1766,6 +1767,12 @@ std::optional<AstExprBinary::Op> Parser::parseBinaryOp(const Lexeme& l)
|
|||
return AstExprBinary::Mul;
|
||||
else if (l.type == '/')
|
||||
return AstExprBinary::Div;
|
||||
else if (l.type == Lexeme::FloorDiv)
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
return AstExprBinary::FloorDiv;
|
||||
}
|
||||
else if (l.type == '%')
|
||||
return AstExprBinary::Mod;
|
||||
else if (l.type == '^')
|
||||
|
@ -1802,6 +1809,12 @@ std::optional<AstExprBinary::Op> Parser::parseCompoundOp(const Lexeme& l)
|
|||
return AstExprBinary::Mul;
|
||||
else if (l.type == Lexeme::DivAssign)
|
||||
return AstExprBinary::Div;
|
||||
else if (l.type == Lexeme::FloorDivAssign)
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
return AstExprBinary::FloorDiv;
|
||||
}
|
||||
else if (l.type == Lexeme::ModAssign)
|
||||
return AstExprBinary::Mod;
|
||||
else if (l.type == Lexeme::PowAssign)
|
||||
|
@ -1872,12 +1885,13 @@ std::optional<AstExprBinary::Op> Parser::checkBinaryConfusables(const BinaryOpPr
|
|||
AstExpr* Parser::parseExpr(unsigned int limit)
|
||||
{
|
||||
static const BinaryOpPriority binaryPriority[] = {
|
||||
{6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, // `+' `-' `*' `/' `%'
|
||||
{10, 9}, {5, 4}, // power and concat (right associative)
|
||||
{3, 3}, {3, 3}, // equality and inequality
|
||||
{3, 3}, {3, 3}, {3, 3}, {3, 3}, // order
|
||||
{2, 2}, {1, 1} // logical (and/or)
|
||||
{6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, // `+' `-' `*' `/' `//' `%'
|
||||
{10, 9}, {5, 4}, // power and concat (right associative)
|
||||
{3, 3}, {3, 3}, // equality and inequality
|
||||
{3, 3}, {3, 3}, {3, 3}, {3, 3}, // order
|
||||
{2, 2}, {1, 1} // logical (and/or)
|
||||
};
|
||||
static_assert(sizeof(binaryPriority) / sizeof(binaryPriority[0]) == size_t(AstExprBinary::Op__Count), "binaryPriority needs an entry per op");
|
||||
|
||||
unsigned int recursionCounterOld = recursionCounter;
|
||||
|
||||
|
|
|
@ -221,6 +221,7 @@ private:
|
|||
void placeFMOV(const char* name, RegisterA64 dst, double src, uint32_t op);
|
||||
void placeBM(const char* name, RegisterA64 dst, RegisterA64 src1, uint32_t src2, uint8_t op);
|
||||
void placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op, int immr, int imms);
|
||||
void placeER(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift);
|
||||
|
||||
void place(uint32_t word);
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ enum class IrCmd : uint8_t
|
|||
// Get pointer (LuaNode) to table node element at the active cached slot index
|
||||
// A: pointer (Table)
|
||||
// B: unsigned int (pcpos)
|
||||
// C: Kn
|
||||
GET_SLOT_NODE_ADDR,
|
||||
|
||||
// Get pointer (LuaNode) to table node element at the main position of the specified key hash
|
||||
|
@ -132,6 +133,7 @@ enum class IrCmd : uint8_t
|
|||
SUB_NUM,
|
||||
MUL_NUM,
|
||||
DIV_NUM,
|
||||
IDIV_NUM,
|
||||
MOD_NUM,
|
||||
|
||||
// Get the minimum/maximum of two numbers
|
||||
|
@ -253,6 +255,11 @@ enum class IrCmd : uint8_t
|
|||
// A: pointer (Table)
|
||||
DUP_TABLE,
|
||||
|
||||
// Insert an integer key into a table
|
||||
// A: pointer (Table)
|
||||
// B: int (key)
|
||||
TABLE_SETNUM,
|
||||
|
||||
// Try to convert a double number into a table index (int) or jump if it's not an integer
|
||||
// A: double
|
||||
// B: block
|
||||
|
@ -411,6 +418,12 @@ enum class IrCmd : uint8_t
|
|||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NODE_NO_NEXT,
|
||||
|
||||
// Guard against table node with 'nil' value
|
||||
// A: pointer (LuaNode)
|
||||
// B: block/vmexit/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NODE_VALUE,
|
||||
|
||||
// Special operations
|
||||
|
||||
// Check interrupt handler
|
||||
|
@ -832,6 +845,8 @@ struct IrBlock
|
|||
uint32_t finish = ~0u;
|
||||
|
||||
uint32_t sortkey = ~0u;
|
||||
uint32_t chainkey = 0;
|
||||
uint32_t expectedNextBlock = ~0u;
|
||||
|
||||
Label label;
|
||||
};
|
||||
|
@ -993,23 +1008,26 @@ struct IrFunction
|
|||
valueRestoreOps[instIdx] = location;
|
||||
}
|
||||
|
||||
IrOp findRestoreOp(uint32_t instIdx) const
|
||||
IrOp findRestoreOp(uint32_t instIdx, bool limitToCurrentBlock) const
|
||||
{
|
||||
if (instIdx >= valueRestoreOps.size())
|
||||
return {};
|
||||
|
||||
const IrBlock& block = blocks[validRestoreOpBlockIdx];
|
||||
|
||||
// Values can only reference restore operands in the current block
|
||||
if (instIdx < block.start || instIdx > block.finish)
|
||||
return {};
|
||||
// When spilled, values can only reference restore operands in the current block
|
||||
if (limitToCurrentBlock)
|
||||
{
|
||||
if (instIdx < block.start || instIdx > block.finish)
|
||||
return {};
|
||||
}
|
||||
|
||||
return valueRestoreOps[instIdx];
|
||||
}
|
||||
|
||||
IrOp findRestoreOp(const IrInst& inst) const
|
||||
IrOp findRestoreOp(const IrInst& inst, bool limitToCurrentBlock) const
|
||||
{
|
||||
return findRestoreOp(getInstIndex(inst));
|
||||
return findRestoreOp(getInstIndex(inst), limitToCurrentBlock);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ inline bool isNonTerminatingJump(IrCmd cmd)
|
|||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
|
@ -156,6 +157,7 @@ inline bool hasResult(IrCmd cmd)
|
|||
case IrCmd::SUB_NUM:
|
||||
case IrCmd::MUL_NUM:
|
||||
case IrCmd::DIV_NUM:
|
||||
case IrCmd::IDIV_NUM:
|
||||
case IrCmd::MOD_NUM:
|
||||
case IrCmd::MIN_NUM:
|
||||
case IrCmd::MAX_NUM:
|
||||
|
@ -168,6 +170,7 @@ inline bool hasResult(IrCmd cmd)
|
|||
case IrCmd::NOT_ANY:
|
||||
case IrCmd::CMP_ANY:
|
||||
case IrCmd::TABLE_LEN:
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
case IrCmd::STRING_LEN:
|
||||
case IrCmd::NEW_TABLE:
|
||||
case IrCmd::DUP_TABLE:
|
||||
|
|
|
@ -47,18 +47,6 @@ constexpr RegisterA64 castReg(KindA64 kind, RegisterA64 reg)
|
|||
return RegisterA64{kind, reg.index};
|
||||
}
|
||||
|
||||
// This is equivalent to castReg(KindA64::x), but is separate because it implies different semantics
|
||||
// Specifically, there are cases when it's useful to treat a wN register as an xN register *after* it has been assigned a value
|
||||
// Since all A64 instructions that write to wN implicitly zero the top half, this works when we need zero extension semantics
|
||||
// Crucially, this is *not* safe on an ABI boundary - an int parameter in wN register may have anything in its top half in certain cases
|
||||
// However, as long as our codegen doesn't use 32-bit truncation by using castReg x=>w, we can safely rely on this.
|
||||
constexpr RegisterA64 zextReg(RegisterA64 reg)
|
||||
{
|
||||
LUAU_ASSERT(reg.kind == KindA64::w);
|
||||
|
||||
return RegisterA64{KindA64::x, reg.index};
|
||||
}
|
||||
|
||||
constexpr RegisterA64 noreg{KindA64::none, 0};
|
||||
|
||||
constexpr RegisterA64 w0{KindA64::w, 0};
|
||||
|
|
|
@ -105,7 +105,10 @@ void AssemblyBuilderA64::movk(RegisterA64 dst, uint16_t src, int shift)
|
|||
|
||||
void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
|
||||
{
|
||||
placeSR3("add", dst, src1, src2, 0b00'01011, shift);
|
||||
if (src1.kind == KindA64::x && src2.kind == KindA64::w)
|
||||
placeER("add", dst, src1, src2, 0b00'01011, shift);
|
||||
else
|
||||
placeSR3("add", dst, src1, src2, 0b00'01011, shift);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, uint16_t src2)
|
||||
|
@ -115,7 +118,10 @@ void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, uint16_t src2)
|
|||
|
||||
void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
|
||||
{
|
||||
placeSR3("sub", dst, src1, src2, 0b10'01011, shift);
|
||||
if (src1.kind == KindA64::x && src2.kind == KindA64::w)
|
||||
placeER("sub", dst, src1, src2, 0b10'01011, shift);
|
||||
else
|
||||
placeSR3("sub", dst, src1, src2, 0b10'01011, shift);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, uint16_t src2)
|
||||
|
@ -1075,6 +1081,22 @@ void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64
|
|||
commit();
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::placeER(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift)
|
||||
{
|
||||
if (logText)
|
||||
log(name, dst, src1, src2, shift);
|
||||
|
||||
LUAU_ASSERT(dst.kind == KindA64::x && src1.kind == KindA64::x);
|
||||
LUAU_ASSERT(src2.kind == KindA64::w);
|
||||
LUAU_ASSERT(shift >= 0 && shift <= 4);
|
||||
|
||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; // could be useful in the future for byte->word extends
|
||||
int option = 0b010; // UXTW
|
||||
|
||||
place(dst.index | (src1.index << 5) | (shift << 10) | (option << 13) | (src2.index << 16) | (1 << 21) | (op << 24) | sf);
|
||||
commit();
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::place(uint32_t word)
|
||||
{
|
||||
LUAU_ASSERT(codePos < codeEnd);
|
||||
|
@ -1167,7 +1189,9 @@ void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 sr
|
|||
log(src1);
|
||||
text.append(",");
|
||||
log(src2);
|
||||
if (shift > 0)
|
||||
if (src1.kind == KindA64::x && src2.kind == KindA64::w)
|
||||
logAppend(" UXTW #%d", shift);
|
||||
else if (shift > 0)
|
||||
logAppend(" LSL #%d", shift);
|
||||
else if (shift < 0)
|
||||
logAppend(" LSR #%d", -shift);
|
||||
|
|
|
@ -71,10 +71,12 @@ static uint8_t* allocatePagesImpl(size_t size)
|
|||
LUAU_ASSERT(size == alignToPageSize(size));
|
||||
|
||||
#ifdef __APPLE__
|
||||
return (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0);
|
||||
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0);
|
||||
#else
|
||||
return (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
#endif
|
||||
|
||||
return (result == MAP_FAILED) ? nullptr : static_cast<uint8_t*>(result);
|
||||
}
|
||||
|
||||
static void freePagesImpl(uint8_t* mem, size_t size)
|
||||
|
|
|
@ -74,7 +74,11 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
|
||||
|
||||
// Try to order by instruction order
|
||||
return a.sortkey < b.sortkey;
|
||||
if (a.sortkey != b.sortkey)
|
||||
return a.sortkey < b.sortkey;
|
||||
|
||||
// Chains of blocks are merged together by having the same sort key and consecutive chain key
|
||||
return a.chainkey < b.chainkey;
|
||||
});
|
||||
|
||||
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
|
||||
|
@ -100,6 +104,9 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||
IrBlock dummy;
|
||||
dummy.start = ~0u;
|
||||
|
||||
// Make sure entry block is first
|
||||
LUAU_ASSERT(sortedBlocks[0] == 0);
|
||||
|
||||
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
||||
{
|
||||
uint32_t blockIndex = sortedBlocks[i];
|
||||
|
@ -137,6 +144,11 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||
|
||||
IrBlock& nextBlock = getNextBlock(function, sortedBlocks, dummy, i);
|
||||
|
||||
// Optimizations often propagate information between blocks
|
||||
// To make sure the register and spill state is correct when blocks are lowered, we check that sorted block order matches the expected one
|
||||
if (block.expectedNextBlock != ~0u)
|
||||
LUAU_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock);
|
||||
|
||||
for (uint32_t index = block.start; index <= block.finish; index++)
|
||||
{
|
||||
LUAU_ASSERT(index < function.instructions.size());
|
||||
|
|
|
@ -147,13 +147,14 @@ void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, Operan
|
|||
emitUpdateBase(build);
|
||||
}
|
||||
|
||||
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, int ratag, Label& skip)
|
||||
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, IrOp ra, int ratag, Label& skip)
|
||||
{
|
||||
// Barrier should've been optimized away if we know that it's not collectable, checking for correctness
|
||||
if (ratag == -1 || !isGCO(ratag))
|
||||
{
|
||||
// iscollectable(ra)
|
||||
build.cmp(luauRegTag(ra), LUA_TSTRING);
|
||||
OperandX64 tag = (ra.kind == IrOpKind::VmReg) ? luauRegTag(vmRegOp(ra)) : luauConstantTag(vmConstOp(ra));
|
||||
build.cmp(tag, LUA_TSTRING);
|
||||
build.jcc(ConditionX64::Less, skip);
|
||||
}
|
||||
|
||||
|
@ -162,12 +163,14 @@ void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, Re
|
|||
build.jcc(ConditionX64::Zero, skip);
|
||||
|
||||
// iswhite(gcvalue(ra))
|
||||
build.mov(tmp, luauRegValue(ra));
|
||||
OperandX64 value = (ra.kind == IrOpKind::VmReg) ? luauRegValue(vmRegOp(ra)) : luauConstantValue(vmConstOp(ra));
|
||||
build.mov(tmp, value);
|
||||
build.test(byte[tmp + offsetof(GCheader, marked)], bit2mask(WHITE0BIT, WHITE1BIT));
|
||||
build.jcc(ConditionX64::Zero, skip);
|
||||
}
|
||||
|
||||
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra, int ratag)
|
||||
|
||||
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, IrOp ra, int ratag)
|
||||
{
|
||||
Label skip;
|
||||
|
||||
|
|
|
@ -59,7 +59,8 @@ inline uint8_t getXmmRegisterCount(ABIX64 abi)
|
|||
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
|
||||
// Stack is separated into sections for different data. See CodeGenX64.cpp for layout overview
|
||||
constexpr unsigned kStackAlign = 8; // Bytes we need to align the stack for non-vol xmm register storage
|
||||
constexpr unsigned kStackLocalStorage = 8 * kExtraLocals + 8 * kSpillSlots;
|
||||
constexpr unsigned kStackLocalStorage = 8 * kExtraLocals;
|
||||
constexpr unsigned kStackSpillStorage = 8 * kSpillSlots;
|
||||
constexpr unsigned kStackExtraArgumentStorage = 2 * 8; // Bytes for 5th and 6th function call arguments used under Windows ABI
|
||||
constexpr unsigned kStackRegHomeStorage = 4 * 8; // Register 'home' locations that can be used by callees under Windows ABI
|
||||
|
||||
|
@ -82,7 +83,7 @@ constexpr unsigned kStackOffsetToSpillSlots = kStackOffsetToLocals + kStackLocal
|
|||
|
||||
inline unsigned getFullStackSize(ABIX64 abi, uint8_t xmmRegCount)
|
||||
{
|
||||
return kStackOffsetToSpillSlots + getNonVolXmmStorageSize(abi, xmmRegCount) + kStackAlign;
|
||||
return kStackOffsetToSpillSlots + kStackSpillStorage + getNonVolXmmStorageSize(abi, xmmRegCount) + kStackAlign;
|
||||
}
|
||||
|
||||
constexpr OperandX64 sClosure = qword[rsp + kStackOffsetToLocals + 0]; // Closure* cl
|
||||
|
@ -201,8 +202,8 @@ void callArithHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int
|
|||
void callLengthHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb);
|
||||
void callGetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
|
||||
void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
|
||||
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, int ratag, Label& skip);
|
||||
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra, int ratag);
|
||||
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, IrOp ra, int ratag, Label& skip);
|
||||
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, IrOp ra, int ratag);
|
||||
void callBarrierTableFast(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 table, IrOp tableOp);
|
||||
void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build);
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrBlock&
|
|||
break;
|
||||
case IrCmd::BARRIER_OBJ:
|
||||
case IrCmd::BARRIER_TABLE_FORWARD:
|
||||
visitor.use(inst.b);
|
||||
visitor.maybeUse(inst.b);
|
||||
break;
|
||||
case IrCmd::CLOSE_UPVALS:
|
||||
// Closing an upvalue should be counted as a register use (it copies the fresh register value)
|
||||
|
|
|
@ -333,6 +333,9 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|||
case LOP_DIV:
|
||||
translateInstBinary(*this, pc, i, TM_DIV);
|
||||
break;
|
||||
case LOP_IDIV:
|
||||
translateInstBinary(*this, pc, i, TM_IDIV);
|
||||
break;
|
||||
case LOP_MOD:
|
||||
translateInstBinary(*this, pc, i, TM_MOD);
|
||||
break;
|
||||
|
@ -351,6 +354,9 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|||
case LOP_DIVK:
|
||||
translateInstBinaryK(*this, pc, i, TM_DIV);
|
||||
break;
|
||||
case LOP_IDIVK:
|
||||
translateInstBinaryK(*this, pc, i, TM_IDIV);
|
||||
break;
|
||||
case LOP_MODK:
|
||||
translateInstBinaryK(*this, pc, i, TM_MOD);
|
||||
break;
|
||||
|
|
|
@ -125,6 +125,8 @@ const char* getCmdName(IrCmd cmd)
|
|||
return "MUL_NUM";
|
||||
case IrCmd::DIV_NUM:
|
||||
return "DIV_NUM";
|
||||
case IrCmd::IDIV_NUM:
|
||||
return "IDIV_NUM";
|
||||
case IrCmd::MOD_NUM:
|
||||
return "MOD_NUM";
|
||||
case IrCmd::MIN_NUM:
|
||||
|
@ -169,6 +171,8 @@ const char* getCmdName(IrCmd cmd)
|
|||
return "JUMP_SLOT_MATCH";
|
||||
case IrCmd::TABLE_LEN:
|
||||
return "TABLE_LEN";
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
return "TABLE_SETNUM";
|
||||
case IrCmd::STRING_LEN:
|
||||
return "STRING_LEN";
|
||||
case IrCmd::NEW_TABLE:
|
||||
|
@ -229,6 +233,8 @@ const char* getCmdName(IrCmd cmd)
|
|||
return "CHECK_SLOT_MATCH";
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
return "CHECK_NODE_NO_NEXT";
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
return "CHECK_NODE_VALUE";
|
||||
case IrCmd::INTERRUPT:
|
||||
return "INTERRUPT";
|
||||
case IrCmd::CHECK_GC:
|
||||
|
|
|
@ -58,30 +58,6 @@ inline ConditionA64 getConditionFP(IrCondition cond)
|
|||
}
|
||||
}
|
||||
|
||||
static void checkObjectBarrierConditions(AssemblyBuilderA64& build, RegisterA64 object, RegisterA64 temp, int ra, int ratag, Label& skip)
|
||||
{
|
||||
RegisterA64 tempw = castReg(KindA64::w, temp);
|
||||
|
||||
// Barrier should've been optimized away if we know that it's not collectable, checking for correctness
|
||||
if (ratag == -1 || !isGCO(ratag))
|
||||
{
|
||||
// iscollectable(ra)
|
||||
build.ldr(tempw, mem(rBase, ra * sizeof(TValue) + offsetof(TValue, tt)));
|
||||
build.cmp(tempw, LUA_TSTRING);
|
||||
build.b(ConditionA64::Less, skip);
|
||||
}
|
||||
|
||||
// isblack(obj2gco(o))
|
||||
build.ldrb(tempw, mem(object, offsetof(GCheader, marked)));
|
||||
build.tbz(tempw, BLACKBIT, skip);
|
||||
|
||||
// iswhite(gcvalue(ra))
|
||||
build.ldr(temp, mem(rBase, ra * sizeof(TValue) + offsetof(TValue, value)));
|
||||
build.ldrb(tempw, mem(temp, offsetof(GCheader, marked)));
|
||||
build.tst(tempw, bit2mask(WHITE0BIT, WHITE1BIT));
|
||||
build.b(ConditionA64::Equal, skip); // Equal = Zero after tst
|
||||
}
|
||||
|
||||
static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA64 src, size_t offset)
|
||||
{
|
||||
LUAU_ASSERT(dst != src);
|
||||
|
@ -98,6 +74,47 @@ static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA6
|
|||
}
|
||||
}
|
||||
|
||||
static void checkObjectBarrierConditions(AssemblyBuilderA64& build, RegisterA64 object, RegisterA64 temp, IrOp ra, int ratag, Label& skip)
|
||||
{
|
||||
RegisterA64 tempw = castReg(KindA64::w, temp);
|
||||
AddressA64 addr = temp;
|
||||
|
||||
// iscollectable(ra)
|
||||
if (ratag == -1 || !isGCO(ratag))
|
||||
{
|
||||
if (ra.kind == IrOpKind::VmReg)
|
||||
{
|
||||
addr = mem(rBase, vmRegOp(ra) * sizeof(TValue) + offsetof(TValue, tt));
|
||||
}
|
||||
else if (ra.kind == IrOpKind::VmConst)
|
||||
{
|
||||
emitAddOffset(build, temp, rConstants, vmConstOp(ra) * sizeof(TValue) + offsetof(TValue, tt));
|
||||
}
|
||||
|
||||
build.ldr(tempw, addr);
|
||||
build.cmp(tempw, LUA_TSTRING);
|
||||
build.b(ConditionA64::Less, skip);
|
||||
}
|
||||
|
||||
// isblack(obj2gco(o))
|
||||
build.ldrb(tempw, mem(object, offsetof(GCheader, marked)));
|
||||
build.tbz(tempw, BLACKBIT, skip);
|
||||
|
||||
// iswhite(gcvalue(ra))
|
||||
if (ra.kind == IrOpKind::VmReg)
|
||||
{
|
||||
addr = mem(rBase, vmRegOp(ra) * sizeof(TValue) + offsetof(TValue, value));
|
||||
}
|
||||
else if (ra.kind == IrOpKind::VmConst)
|
||||
{
|
||||
emitAddOffset(build, temp, rConstants, vmConstOp(ra) * sizeof(TValue) + offsetof(TValue, value));
|
||||
}
|
||||
build.ldr(temp, addr);
|
||||
build.ldrb(tempw, mem(temp, offsetof(GCheader, marked)));
|
||||
build.tst(tempw, bit2mask(WHITE0BIT, WHITE1BIT));
|
||||
build.b(ConditionA64::Equal, skip); // Equal = Zero after tst
|
||||
}
|
||||
|
||||
static void emitAbort(AssemblyBuilderA64& build, Label& abort)
|
||||
{
|
||||
Label skip;
|
||||
|
@ -242,7 +259,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
|
||||
if (inst.b.kind == IrOpKind::Inst)
|
||||
{
|
||||
build.add(inst.regA64, inst.regA64, zextReg(regOp(inst.b)), kTValueSizeLog2);
|
||||
build.add(inst.regA64, inst.regA64, regOp(inst.b), kTValueSizeLog2);
|
||||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
|
@ -271,6 +288,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::w);
|
||||
RegisterA64 temp2x = castReg(KindA64::x, temp2);
|
||||
|
||||
// note: since the stride of the load is the same as the destination register size, we can range check the array index, not the byte offset
|
||||
if (uintOp(inst.b) <= AddressA64::kMaxOffset)
|
||||
|
@ -288,7 +306,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
|
||||
// note: this may clobber inst.a, so it's important that we don't use it after this
|
||||
build.ldr(inst.regA64, mem(regOp(inst.a), offsetof(Table, node)));
|
||||
build.add(inst.regA64, inst.regA64, zextReg(temp2), kLuaNodeSizeLog2);
|
||||
build.add(inst.regA64, inst.regA64, temp2x, kLuaNodeSizeLog2); // "zero extend" temp2 to get a larger shift (top 32 bits are zero)
|
||||
break;
|
||||
}
|
||||
case IrCmd::GET_HASH_NODE_ADDR:
|
||||
|
@ -296,6 +314,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
inst.regA64 = regs.allocReuse(KindA64::x, index, {inst.a});
|
||||
RegisterA64 temp1 = regs.allocTemp(KindA64::w);
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::w);
|
||||
RegisterA64 temp2x = castReg(KindA64::x, temp2);
|
||||
|
||||
// hash & ((1 << lsizenode) - 1) == hash & ~(-1 << lsizenode)
|
||||
build.mov(temp1, -1);
|
||||
|
@ -306,7 +325,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
|
||||
// note: this may clobber inst.a, so it's important that we don't use it after this
|
||||
build.ldr(inst.regA64, mem(regOp(inst.a), offsetof(Table, node)));
|
||||
build.add(inst.regA64, inst.regA64, zextReg(temp2), kLuaNodeSizeLog2);
|
||||
build.add(inst.regA64, inst.regA64, temp2x, kLuaNodeSizeLog2); // "zero extend" temp2 to get a larger shift (top 32 bits are zero)
|
||||
break;
|
||||
}
|
||||
case IrCmd::GET_CLOSURE_UPVAL_ADDR:
|
||||
|
@ -477,6 +496,15 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
build.fdiv(inst.regA64, temp1, temp2);
|
||||
break;
|
||||
}
|
||||
case IrCmd::IDIV_NUM:
|
||||
{
|
||||
inst.regA64 = regs.allocReuse(KindA64::d, index, {inst.a, inst.b});
|
||||
RegisterA64 temp1 = tempDouble(inst.a);
|
||||
RegisterA64 temp2 = tempDouble(inst.b);
|
||||
build.fdiv(inst.regA64, temp1, temp2);
|
||||
build.frintm(inst.regA64, inst.regA64);
|
||||
break;
|
||||
}
|
||||
case IrCmd::MOD_NUM:
|
||||
{
|
||||
inst.regA64 = regs.allocReg(KindA64::d, index); // can't allocReuse because both A and B are used twice
|
||||
|
@ -604,9 +632,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
|
||||
emitUpdateBase(build);
|
||||
|
||||
// since w0 came from a call, we need to move it so that we don't violate zextReg safety contract
|
||||
inst.regA64 = regs.allocReg(KindA64::w, index);
|
||||
build.mov(inst.regA64, w0);
|
||||
inst.regA64 = regs.takeReg(w0, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP:
|
||||
|
@ -750,8 +776,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
build.mov(x0, reg);
|
||||
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, luaH_getn)));
|
||||
build.blr(x1);
|
||||
inst.regA64 = regs.allocReg(KindA64::d, index);
|
||||
build.scvtf(inst.regA64, w0);
|
||||
|
||||
inst.regA64 = regs.takeReg(w0, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::STRING_LEN:
|
||||
|
@ -761,6 +787,33 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
build.ldr(inst.regA64, mem(regOp(inst.a), offsetof(TString, len)));
|
||||
break;
|
||||
}
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
{
|
||||
// note: we need to call regOp before spill so that we don't do redundant reloads
|
||||
RegisterA64 table = regOp(inst.a);
|
||||
RegisterA64 key = regOp(inst.b);
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
|
||||
regs.spill(build, index, {table, key});
|
||||
|
||||
if (w1 != key)
|
||||
{
|
||||
build.mov(x1, table);
|
||||
build.mov(w2, key);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.mov(temp, w1);
|
||||
build.mov(x1, table);
|
||||
build.mov(w2, temp);
|
||||
}
|
||||
|
||||
build.mov(x0, rState);
|
||||
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaH_setnum)));
|
||||
build.blr(x3);
|
||||
inst.regA64 = regs.takeReg(x0, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::NEW_TABLE:
|
||||
{
|
||||
regs.spill(build, index);
|
||||
|
@ -854,8 +907,6 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
inst.regA64 = regs.allocReg(KindA64::w, index);
|
||||
RegisterA64 temp = tempDouble(inst.a);
|
||||
build.fcvtzs(castReg(KindA64::x, inst.regA64), temp);
|
||||
// truncation needs to clear high bits to preserve zextReg safety contract
|
||||
build.mov(inst.regA64, inst.regA64);
|
||||
break;
|
||||
}
|
||||
case IrCmd::ADJUST_STACK_TO_REG:
|
||||
|
@ -870,7 +921,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
else if (inst.b.kind == IrOpKind::Inst)
|
||||
{
|
||||
build.add(temp, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
||||
build.add(temp, temp, zextReg(regOp(inst.b)), kTValueSizeLog2);
|
||||
build.add(temp, temp, regOp(inst.b), kTValueSizeLog2);
|
||||
build.str(temp, mem(rState, offsetof(lua_State, top)));
|
||||
}
|
||||
else
|
||||
|
@ -919,9 +970,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
build.ldr(x6, mem(rNativeContext, offsetof(NativeContext, luauF_table) + uintOp(inst.a) * sizeof(luau_FastFunction)));
|
||||
build.blr(x6);
|
||||
|
||||
// since w0 came from a call, we need to move it so that we don't violate zextReg safety contract
|
||||
inst.regA64 = regs.allocReg(KindA64::w, index);
|
||||
build.mov(inst.regA64, w0);
|
||||
inst.regA64 = regs.takeReg(w0, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_FASTCALL_RES:
|
||||
|
@ -1063,7 +1112,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
if (inst.c.kind == IrOpKind::Undef || isGCO(tagOp(inst.c)))
|
||||
{
|
||||
Label skip;
|
||||
checkObjectBarrierConditions(build, temp1, temp2, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
checkObjectBarrierConditions(build, temp1, temp2, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
|
||||
size_t spills = regs.spill(build, index, {temp1});
|
||||
|
||||
|
@ -1244,6 +1293,17 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
finalizeTargetLabel(inst.b, fresh);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
{
|
||||
Label fresh; // used when guard aborts execution or jumps to a VM exit
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
||||
LUAU_ASSERT(LUA_TNIL == 0);
|
||||
build.cbz(temp, getTargetLabel(inst.b, fresh));
|
||||
finalizeTargetLabel(inst.b, fresh);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
{
|
||||
regs.spill(build, index);
|
||||
|
@ -1288,7 +1348,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
|
||||
Label skip;
|
||||
checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
checkObjectBarrierConditions(build, regOp(inst.a), temp, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
|
||||
RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads
|
||||
size_t spills = regs.spill(build, index, {reg});
|
||||
|
@ -1332,13 +1392,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
|
||||
Label skip;
|
||||
checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
checkObjectBarrierConditions(build, regOp(inst.a), temp, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
|
||||
RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads
|
||||
AddressA64 addr = tempAddr(inst.b, offsetof(TValue, value));
|
||||
size_t spills = regs.spill(build, index, {reg});
|
||||
build.mov(x1, reg);
|
||||
build.mov(x0, rState);
|
||||
build.ldr(x2, mem(rBase, vmRegOp(inst.b) * sizeof(TValue) + offsetof(TValue, value)));
|
||||
build.ldr(x2, addr);
|
||||
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaC_barriertable)));
|
||||
build.blr(x3);
|
||||
|
||||
|
@ -1829,7 +1890,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
LUAU_ASSERT(sizeof(TString*) == 8);
|
||||
|
||||
if (inst.a.kind == IrOpKind::Inst)
|
||||
build.add(inst.regA64, rGlobalState, zextReg(regOp(inst.a)), 3);
|
||||
build.add(inst.regA64, rGlobalState, regOp(inst.a), 3);
|
||||
else if (inst.a.kind == IrOpKind::Constant)
|
||||
build.add(inst.regA64, rGlobalState, uint16_t(tagOp(inst.a)) * 8);
|
||||
else
|
||||
|
|
|
@ -407,6 +407,22 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
build.vdivsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
||||
}
|
||||
break;
|
||||
case IrCmd::IDIV_NUM:
|
||||
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
||||
|
||||
if (inst.a.kind == IrOpKind::Constant)
|
||||
{
|
||||
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
||||
|
||||
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
||||
build.vdivsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
||||
}
|
||||
else
|
||||
{
|
||||
build.vdivsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
||||
}
|
||||
build.vroundsd(inst.regX64, inst.regX64, inst.regX64, RoundingModeX64::RoundToNegativeInfinity);
|
||||
break;
|
||||
case IrCmd::MOD_NUM:
|
||||
{
|
||||
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
||||
|
@ -697,9 +713,17 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
IrCallWrapperX64 callWrap(regs, build, index);
|
||||
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_getn)]);
|
||||
|
||||
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
||||
build.vcvtsi2sd(inst.regX64, inst.regX64, eax);
|
||||
inst.regX64 = regs.takeReg(eax, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
{
|
||||
IrCallWrapperX64 callWrap(regs, build, index);
|
||||
callWrap.addArgument(SizeX64::qword, rState);
|
||||
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
||||
callWrap.addArgument(SizeX64::dword, regOp(inst.b), inst.b);
|
||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_setnum)]);
|
||||
inst.regX64 = regs.takeReg(rax, index);
|
||||
break;
|
||||
}
|
||||
case IrCmd::STRING_LEN:
|
||||
|
@ -997,7 +1021,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
tmp1.free();
|
||||
|
||||
if (inst.c.kind == IrOpKind::Undef || isGCO(tagOp(inst.c)))
|
||||
callBarrierObject(regs, build, tmp2.release(), {}, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
||||
callBarrierObject(regs, build, tmp2.release(), {}, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_TAG:
|
||||
|
@ -1106,6 +1130,12 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
jumpOrAbortOnUndef(ConditionX64::NotZero, inst.b, next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
{
|
||||
build.cmp(dword[regOp(inst.a) + offsetof(LuaNode, val) + offsetof(TValue, tt)], LUA_TNIL);
|
||||
jumpOrAbortOnUndef(ConditionX64::Equal, inst.b, next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
{
|
||||
unsigned pcpos = uintOp(inst.a);
|
||||
|
@ -1132,7 +1162,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
callStepGc(regs, build);
|
||||
break;
|
||||
case IrCmd::BARRIER_OBJ:
|
||||
callBarrierObject(regs, build, regOp(inst.a), inst.a, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
||||
callBarrierObject(regs, build, regOp(inst.a), inst.a, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
||||
break;
|
||||
case IrCmd::BARRIER_TABLE_BACK:
|
||||
callBarrierTableFast(regs, build, regOp(inst.a), inst.a);
|
||||
|
@ -1142,7 +1172,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||
Label skip;
|
||||
|
||||
ScopedRegX64 tmp{regs, SizeX64::qword};
|
||||
checkObjectBarrierConditions(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
|
||||
checkObjectBarrierConditions(build, tmp.reg, regOp(inst.a), inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
||||
|
||||
{
|
||||
ScopedSpills spillGuard(regs);
|
||||
|
|
|
@ -70,9 +70,9 @@ static int getReloadOffset(IrCmd cmd)
|
|||
LUAU_UNREACHABLE();
|
||||
}
|
||||
|
||||
static AddressA64 getReloadAddress(const IrFunction& function, const IrInst& inst)
|
||||
static AddressA64 getReloadAddress(const IrFunction& function, const IrInst& inst, bool limitToCurrentBlock)
|
||||
{
|
||||
IrOp location = function.findRestoreOp(inst);
|
||||
IrOp location = function.findRestoreOp(inst, limitToCurrentBlock);
|
||||
|
||||
if (location.kind == IrOpKind::VmReg)
|
||||
return mem(rBase, vmRegOp(location) * sizeof(TValue) + getReloadOffset(inst.cmd));
|
||||
|
@ -99,7 +99,7 @@ static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrF
|
|||
else
|
||||
{
|
||||
LUAU_ASSERT(!inst.spilled && inst.needsReload);
|
||||
AddressA64 addr = getReloadAddress(function, function.instructions[s.inst]);
|
||||
AddressA64 addr = getReloadAddress(function, function.instructions[s.inst], /*limitToCurrentBlock*/ false);
|
||||
LUAU_ASSERT(addr.base != xzr);
|
||||
build.ldr(reg, addr);
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init
|
|||
{
|
||||
// instead of spilling the register to never reload it, we assume the register is not needed anymore
|
||||
}
|
||||
else if (getReloadAddress(function, def).base != xzr)
|
||||
else if (getReloadAddress(function, def, /*limitToCurrentBlock*/ true).base != xzr)
|
||||
{
|
||||
// instead of spilling the register to stack, we can reload it from VM stack/constants
|
||||
// we still need to record the spill for restore(start) to work
|
||||
|
|
|
@ -338,7 +338,9 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
|
|||
|
||||
IrOp IrRegAllocX64::getRestoreOp(const IrInst& inst) const
|
||||
{
|
||||
if (IrOp location = function.findRestoreOp(inst); location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst)
|
||||
// When restoring the value, we allow cross-block restore because we have commited to the target location at spill time
|
||||
if (IrOp location = function.findRestoreOp(inst, /*limitToCurrentBlock*/ false);
|
||||
location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst)
|
||||
return location;
|
||||
|
||||
return IrOp();
|
||||
|
@ -346,11 +348,16 @@ IrOp IrRegAllocX64::getRestoreOp(const IrInst& inst) const
|
|||
|
||||
bool IrRegAllocX64::hasRestoreOp(const IrInst& inst) const
|
||||
{
|
||||
return getRestoreOp(inst).kind != IrOpKind::None;
|
||||
// When checking if value has a restore operation to spill it, we only allow it in the same block
|
||||
IrOp location = function.findRestoreOp(inst, /*limitToCurrentBlock*/ true);
|
||||
|
||||
return location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst;
|
||||
}
|
||||
|
||||
OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
|
||||
{
|
||||
LUAU_ASSERT(restoreOp.kind != IrOpKind::None);
|
||||
|
||||
switch (getCmdValueKind(inst.cmd))
|
||||
{
|
||||
case IrValueKind::Unknown:
|
||||
|
|
|
@ -748,6 +748,28 @@ static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, i
|
|||
return {BuiltinImplType::Full, 1};
|
||||
}
|
||||
|
||||
static BuiltinImplResult translateBuiltinTableInsert(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
|
||||
{
|
||||
if (nparams != 2 || nresults > 0)
|
||||
return {BuiltinImplType::None, -1};
|
||||
|
||||
build.loadAndCheckTag(build.vmReg(arg), LUA_TTABLE, build.vmExit(pcpos));
|
||||
|
||||
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(arg));
|
||||
build.inst(IrCmd::CHECK_READONLY, table, build.vmExit(pcpos));
|
||||
|
||||
IrOp pos = build.inst(IrCmd::ADD_INT, build.inst(IrCmd::TABLE_LEN, table), build.constInt(1));
|
||||
|
||||
IrOp setnum = build.inst(IrCmd::TABLE_SETNUM, table, pos);
|
||||
|
||||
IrOp va = build.inst(IrCmd::LOAD_TVALUE, args);
|
||||
build.inst(IrCmd::STORE_TVALUE, setnum, va);
|
||||
|
||||
build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, args, build.undef());
|
||||
|
||||
return {BuiltinImplType::Full, 0};
|
||||
}
|
||||
|
||||
static BuiltinImplResult translateBuiltinStringLen(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
|
||||
{
|
||||
if (nparams < 1 || nresults > 1)
|
||||
|
@ -849,6 +871,8 @@ BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg,
|
|||
return translateBuiltinTypeof(build, nparams, ra, arg, args, nresults);
|
||||
case LBF_VECTOR:
|
||||
return translateBuiltinVector(build, nparams, ra, arg, args, nresults, pcpos);
|
||||
case LBF_TABLE_INSERT:
|
||||
return translateBuiltinTableInsert(build, nparams, ra, arg, args, nresults, pcpos);
|
||||
case LBF_STRING_LEN:
|
||||
return translateBuiltinStringLen(build, nparams, ra, arg, args, nresults, pcpos);
|
||||
default:
|
||||
|
|
|
@ -382,6 +382,9 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_NUM, vb, vc);
|
||||
break;
|
||||
case TM_IDIV:
|
||||
result = build.inst(IrCmd::IDIV_NUM, vb, vc);
|
||||
break;
|
||||
case TM_MOD:
|
||||
result = build.inst(IrCmd::MOD_NUM, vb, vc);
|
||||
break;
|
||||
|
@ -472,8 +475,9 @@ void translateInstLength(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
|
||||
|
||||
IrOp va = build.inst(IrCmd::TABLE_LEN, vb);
|
||||
IrOp vai = build.inst(IrCmd::INT_TO_NUM, va);
|
||||
|
||||
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), va);
|
||||
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), vai);
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
|
||||
|
||||
IrOp next = build.blockAtInst(pcpos + 1);
|
||||
|
@ -554,7 +558,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
|
|||
|
||||
IrOp builtinArgs = args;
|
||||
|
||||
if (customArgs.kind == IrOpKind::VmConst)
|
||||
if (customArgs.kind == IrOpKind::VmConst && bfid != LBF_TABLE_INSERT)
|
||||
{
|
||||
TValue protok = build.function.proto->k[customArgs.index];
|
||||
|
||||
|
@ -976,7 +980,7 @@ void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
|
||||
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos));
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos), build.vmConst(aux));
|
||||
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
|
||||
|
||||
|
@ -1003,7 +1007,7 @@ void translateInstSetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
|
||||
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos));
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos), build.vmConst(aux));
|
||||
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
|
||||
build.inst(IrCmd::CHECK_READONLY, vb, fallback);
|
||||
|
@ -1028,7 +1032,7 @@ void translateInstGetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
IrOp env = build.inst(IrCmd::LOAD_ENV);
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos));
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos), build.vmConst(aux));
|
||||
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
|
||||
|
||||
|
@ -1050,7 +1054,7 @@ void translateInstSetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
IrOp env = build.inst(IrCmd::LOAD_ENV);
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos));
|
||||
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos), build.vmConst(aux));
|
||||
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
|
||||
build.inst(IrCmd::CHECK_READONLY, env, fallback);
|
||||
|
@ -1141,7 +1145,7 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
build.loadAndCheckTag(indexPtr, LUA_TTABLE, fallback);
|
||||
IrOp index = build.inst(IrCmd::LOAD_POINTER, indexPtr);
|
||||
|
||||
IrOp addrIndexNodeEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, index, build.constUint(pcpos));
|
||||
IrOp addrIndexNodeEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, index, build.constUint(pcpos), build.vmConst(aux));
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, addrIndexNodeEl, build.vmConst(aux), fallback);
|
||||
|
||||
// TODO: original 'table' was clobbered by a call inside 'FASTGETTM'
|
||||
|
|
|
@ -54,6 +54,7 @@ IrValueKind getCmdValueKind(IrCmd cmd)
|
|||
case IrCmd::SUB_NUM:
|
||||
case IrCmd::MUL_NUM:
|
||||
case IrCmd::DIV_NUM:
|
||||
case IrCmd::IDIV_NUM:
|
||||
case IrCmd::MOD_NUM:
|
||||
case IrCmd::MIN_NUM:
|
||||
case IrCmd::MAX_NUM:
|
||||
|
@ -79,7 +80,9 @@ IrValueKind getCmdValueKind(IrCmd cmd)
|
|||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
return IrValueKind::None;
|
||||
case IrCmd::TABLE_LEN:
|
||||
return IrValueKind::Double;
|
||||
return IrValueKind::Int;
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
return IrValueKind::Pointer;
|
||||
case IrCmd::STRING_LEN:
|
||||
return IrValueKind::Int;
|
||||
case IrCmd::NEW_TABLE:
|
||||
|
@ -119,6 +122,7 @@ IrValueKind getCmdValueKind(IrCmd cmd)
|
|||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
case IrCmd::INTERRUPT:
|
||||
case IrCmd::CHECK_GC:
|
||||
case IrCmd::BARRIER_OBJ:
|
||||
|
@ -464,6 +468,10 @@ void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint3
|
|||
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
|
||||
substitute(function, inst, build.constDouble(function.doubleOp(inst.a) / function.doubleOp(inst.b)));
|
||||
break;
|
||||
case IrCmd::IDIV_NUM:
|
||||
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
|
||||
substitute(function, inst, build.constDouble(luai_numidiv(function.doubleOp(inst.a), function.doubleOp(inst.b))));
|
||||
break;
|
||||
case IrCmd::MOD_NUM:
|
||||
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
|
||||
substitute(function, inst, build.constDouble(luai_nummod(function.doubleOp(inst.a), function.doubleOp(inst.b))));
|
||||
|
|
|
@ -108,13 +108,14 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
|
|||
case IrCmd::FINDUPVAL:
|
||||
break;
|
||||
|
||||
// These instrucitons read VmReg only after optimizeMemoryOperandsX64
|
||||
// These instructions read VmReg only after optimizeMemoryOperandsX64
|
||||
case IrCmd::CHECK_TAG:
|
||||
case IrCmd::CHECK_TRUTHY:
|
||||
case IrCmd::ADD_NUM:
|
||||
case IrCmd::SUB_NUM:
|
||||
case IrCmd::MUL_NUM:
|
||||
case IrCmd::DIV_NUM:
|
||||
case IrCmd::IDIV_NUM:
|
||||
case IrCmd::MOD_NUM:
|
||||
case IrCmd::MIN_NUM:
|
||||
case IrCmd::MAX_NUM:
|
||||
|
|
|
@ -53,6 +53,7 @@ void initFunctions(NativeState& data)
|
|||
data.context.luaH_new = luaH_new;
|
||||
data.context.luaH_clone = luaH_clone;
|
||||
data.context.luaH_resizearray = luaH_resizearray;
|
||||
data.context.luaH_setnum = luaH_setnum;
|
||||
|
||||
data.context.luaC_barriertable = luaC_barriertable;
|
||||
data.context.luaC_barrierf = luaC_barrierf;
|
||||
|
|
|
@ -44,6 +44,7 @@ struct NativeContext
|
|||
Table* (*luaH_new)(lua_State* L, int narray, int lnhash) = nullptr;
|
||||
Table* (*luaH_clone)(lua_State* L, Table* tt) = nullptr;
|
||||
void (*luaH_resizearray)(lua_State* L, Table* t, int nasize) = nullptr;
|
||||
TValue* (*luaH_setnum)(lua_State* L, Table* t, int key);
|
||||
|
||||
void (*luaC_barriertable)(lua_State* L, Table* t, GCObject* v) = nullptr;
|
||||
void (*luaC_barrierf)(lua_State* L, GCObject* o, GCObject* v) = nullptr;
|
||||
|
|
|
@ -13,7 +13,10 @@
|
|||
#include <vector>
|
||||
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauReuseHashSlots2, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauKeepVmapLinear, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -174,6 +177,10 @@ struct ConstPropState
|
|||
{
|
||||
for (int i = 0; i <= maxReg; ++i)
|
||||
invalidateHeap(regs[i]);
|
||||
|
||||
// If table memory has changed, we can't reuse previously computed and validated table slot lookups
|
||||
getSlotNodeCache.clear();
|
||||
checkSlotMatchCache.clear();
|
||||
}
|
||||
|
||||
void invalidateHeap(RegisterInfo& reg)
|
||||
|
@ -190,6 +197,21 @@ struct ConstPropState
|
|||
inSafeEnv = false;
|
||||
}
|
||||
|
||||
void invalidateTableArraySize()
|
||||
{
|
||||
for (int i = 0; i <= maxReg; ++i)
|
||||
invalidateTableArraySize(regs[i]);
|
||||
|
||||
// If table memory has changed, we can't reuse previously computed and validated table slot lookups
|
||||
getSlotNodeCache.clear();
|
||||
checkSlotMatchCache.clear();
|
||||
}
|
||||
|
||||
void invalidateTableArraySize(RegisterInfo& reg)
|
||||
{
|
||||
reg.knownTableArraySize = -1;
|
||||
}
|
||||
|
||||
void createRegLink(uint32_t instIdx, IrOp regOp)
|
||||
{
|
||||
LUAU_ASSERT(!instLink.contains(instIdx));
|
||||
|
@ -367,6 +389,8 @@ struct ConstPropState
|
|||
|
||||
instLink.clear();
|
||||
valueMap.clear();
|
||||
getSlotNodeCache.clear();
|
||||
checkSlotMatchCache.clear();
|
||||
}
|
||||
|
||||
IrFunction& function;
|
||||
|
@ -384,6 +408,9 @@ struct ConstPropState
|
|||
DenseHashMap<uint32_t, RegisterLink> instLink{~0u};
|
||||
|
||||
DenseHashMap<IrInst, uint32_t, IrInstHash, IrInstEq> valueMap;
|
||||
|
||||
std::vector<uint32_t> getSlotNodeCache;
|
||||
std::vector<uint32_t> checkSlotMatchCache;
|
||||
};
|
||||
|
||||
static void handleBuiltinEffects(ConstPropState& state, LuauBuiltinFunction bfid, uint32_t firstReturnReg, int nresults)
|
||||
|
@ -863,7 +890,25 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
case IrCmd::NOP:
|
||||
case IrCmd::LOAD_ENV:
|
||||
case IrCmd::GET_ARR_ADDR:
|
||||
break;
|
||||
case IrCmd::GET_SLOT_NODE_ADDR:
|
||||
if (!FFlag::LuauReuseHashSlots2)
|
||||
break;
|
||||
|
||||
for (uint32_t prevIdx : state.getSlotNodeCache)
|
||||
{
|
||||
const IrInst& prev = function.instructions[prevIdx];
|
||||
|
||||
if (prev.a == inst.a && prev.c == inst.c)
|
||||
{
|
||||
substitute(function, inst, IrOp{IrOpKind::Inst, prevIdx});
|
||||
return; // Break out from both the loop and the switch
|
||||
}
|
||||
}
|
||||
|
||||
if (int(state.getSlotNodeCache.size()) < FInt::LuauCodeGenReuseSlotLimit)
|
||||
state.getSlotNodeCache.push_back(index);
|
||||
break;
|
||||
case IrCmd::GET_HASH_NODE_ADDR:
|
||||
case IrCmd::GET_CLOSURE_UPVAL_ADDR:
|
||||
break;
|
||||
|
@ -873,6 +918,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
case IrCmd::SUB_NUM:
|
||||
case IrCmd::MUL_NUM:
|
||||
case IrCmd::DIV_NUM:
|
||||
case IrCmd::IDIV_NUM:
|
||||
case IrCmd::MOD_NUM:
|
||||
case IrCmd::MIN_NUM:
|
||||
case IrCmd::MAX_NUM:
|
||||
|
@ -892,6 +938,10 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
case IrCmd::JUMP_EQ_POINTER:
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::TABLE_LEN:
|
||||
break;
|
||||
case IrCmd::TABLE_SETNUM:
|
||||
state.invalidateTableArraySize();
|
||||
break;
|
||||
case IrCmd::STRING_LEN:
|
||||
case IrCmd::NEW_TABLE:
|
||||
case IrCmd::DUP_TABLE:
|
||||
|
@ -938,7 +988,26 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
break;
|
||||
}
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
if (!FFlag::LuauReuseHashSlots2)
|
||||
break;
|
||||
|
||||
for (uint32_t prevIdx : state.checkSlotMatchCache)
|
||||
{
|
||||
const IrInst& prev = function.instructions[prevIdx];
|
||||
|
||||
if (prev.a == inst.a && prev.b == inst.b)
|
||||
{
|
||||
// Only a check for 'nil' value is left
|
||||
replace(function, block, index, {IrCmd::CHECK_NODE_VALUE, inst.a, inst.c});
|
||||
return; // Break out from both the loop and the switch
|
||||
}
|
||||
}
|
||||
|
||||
if (int(state.checkSlotMatchCache.size()) < FInt::LuauCodeGenReuseSlotLimit)
|
||||
state.checkSlotMatchCache.push_back(index);
|
||||
break;
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
case IrCmd::CHECK_NODE_VALUE:
|
||||
case IrCmd::BARRIER_TABLE_BACK:
|
||||
case IrCmd::RETURN:
|
||||
case IrCmd::COVERAGE:
|
||||
|
@ -999,7 +1068,10 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.b); info && info->knownTableArraySize >= 0)
|
||||
replace(function, inst.f, build.constUint(info->knownTableArraySize));
|
||||
|
||||
state.valueMap.clear(); // TODO: this can be relaxed when x64 emitInstSetList becomes aware of register allocator
|
||||
// TODO: this can be relaxed when x64 emitInstSetList becomes aware of register allocator
|
||||
state.valueMap.clear();
|
||||
state.getSlotNodeCache.clear();
|
||||
state.checkSlotMatchCache.clear();
|
||||
break;
|
||||
case IrCmd::CALL:
|
||||
state.invalidateRegistersFrom(vmRegOp(inst.a));
|
||||
|
@ -1012,7 +1084,11 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
break;
|
||||
case IrCmd::FORGLOOP:
|
||||
state.invalidateRegistersFrom(vmRegOp(inst.a) + 2); // Rn and Rn+1 are not modified
|
||||
state.valueMap.clear(); // TODO: this can be relaxed when x64 emitInstForGLoop becomes aware of register allocator
|
||||
|
||||
// TODO: this can be relaxed when x64 emitInstForGLoop becomes aware of register allocator
|
||||
state.valueMap.clear();
|
||||
state.getSlotNodeCache.clear();
|
||||
state.checkSlotMatchCache.clear();
|
||||
break;
|
||||
case IrCmd::FORGLOOP_FALLBACK:
|
||||
state.invalidateRegistersFrom(vmRegOp(inst.a) + 2); // Rn and Rn+1 are not modified
|
||||
|
@ -1076,8 +1152,15 @@ static void constPropInBlock(IrBuilder& build, IrBlock& block, ConstPropState& s
|
|||
constPropInInst(state, build, function, block, inst, index);
|
||||
}
|
||||
|
||||
// Value numbering and load/store propagation is not performed between blocks
|
||||
state.valueMap.clear();
|
||||
if (!FFlag::LuauKeepVmapLinear)
|
||||
{
|
||||
// Value numbering and load/store propagation is not performed between blocks
|
||||
state.valueMap.clear();
|
||||
|
||||
// Same for table slot data propagation
|
||||
state.getSlotNodeCache.clear();
|
||||
state.checkSlotMatchCache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visited, IrBlock* block, ConstPropState& state)
|
||||
|
@ -1086,6 +1169,9 @@ static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visite
|
|||
|
||||
state.clear();
|
||||
|
||||
const uint32_t startSortkey = block->sortkey;
|
||||
uint32_t chainPos = 0;
|
||||
|
||||
while (block)
|
||||
{
|
||||
uint32_t blockIdx = function.getBlockIndex(*block);
|
||||
|
@ -1094,19 +1180,40 @@ static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visite
|
|||
|
||||
constPropInBlock(build, *block, state);
|
||||
|
||||
if (FFlag::LuauKeepVmapLinear)
|
||||
{
|
||||
// Value numbering and load/store propagation is not performed between blocks right now
|
||||
// This is because cross-block value uses limit creation of linear block (restriction in collectDirectBlockJumpPath)
|
||||
state.valueMap.clear();
|
||||
|
||||
// Same for table slot data propagation
|
||||
state.getSlotNodeCache.clear();
|
||||
state.checkSlotMatchCache.clear();
|
||||
}
|
||||
|
||||
// Blocks in a chain are guaranteed to follow each other
|
||||
// We force that by giving all blocks the same sorting key, but consecutive chain keys
|
||||
block->sortkey = startSortkey;
|
||||
block->chainkey = chainPos++;
|
||||
|
||||
IrInst& termInst = function.instructions[block->finish];
|
||||
|
||||
IrBlock* nextBlock = nullptr;
|
||||
|
||||
// Unconditional jump into a block with a single user (current block) allows us to continue optimization
|
||||
// with the information we have gathered so far (unless we have already visited that block earlier)
|
||||
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
|
||||
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind == IrOpKind::Block)
|
||||
{
|
||||
IrBlock& target = function.blockOp(termInst.a);
|
||||
uint32_t targetIdx = function.getBlockIndex(target);
|
||||
|
||||
if (target.useCount == 1 && !visited[targetIdx] && target.kind != IrBlockKind::Fallback)
|
||||
{
|
||||
// Make sure block ordering guarantee is checked at lowering time
|
||||
block->expectedNextBlock = function.getBlockIndex(target);
|
||||
|
||||
nextBlock = ⌖
|
||||
}
|
||||
}
|
||||
|
||||
block = nextBlock;
|
||||
|
@ -1134,7 +1241,7 @@ static std::vector<uint32_t> collectDirectBlockJumpPath(IrFunction& function, st
|
|||
IrBlock* nextBlock = nullptr;
|
||||
|
||||
// A chain is made from internal blocks that were not a part of bytecode CFG
|
||||
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
|
||||
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind == IrOpKind::Block)
|
||||
{
|
||||
IrBlock& target = function.blockOp(termInst.a);
|
||||
uint32_t targetIdx = function.getBlockIndex(target);
|
||||
|
@ -1175,8 +1282,8 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||
if (termInst.cmd != IrCmd::JUMP)
|
||||
return;
|
||||
|
||||
// And it can't be jump to a VM exit
|
||||
if (termInst.a.kind == IrOpKind::VmExit)
|
||||
// And it can't be jump to a VM exit or undef
|
||||
if (termInst.a.kind != IrOpKind::Block)
|
||||
return;
|
||||
|
||||
// And it has to jump to a block with more than one user
|
||||
|
@ -1196,14 +1303,14 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||
// Initialize state with the knowledge of our current block
|
||||
state.clear();
|
||||
|
||||
// TODO: using values from the first block can cause 'live out' of the linear block predecessor to not have all required registers
|
||||
constPropInBlock(build, startingBlock, state);
|
||||
|
||||
// Verify that target hasn't changed
|
||||
LUAU_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx);
|
||||
|
||||
// Note: using startingBlock after this line is unsafe as the reference may be reallocated by build.block() below
|
||||
uint32_t startingInsn = startingBlock.start;
|
||||
const uint32_t startingSortKey = startingBlock.sortkey;
|
||||
const uint32_t startingChainKey = startingBlock.chainkey;
|
||||
|
||||
// Create new linearized block into which we are going to redirect starting block jump
|
||||
IrOp newBlock = build.block(IrBlockKind::Linearized);
|
||||
|
@ -1213,7 +1320,11 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||
|
||||
// By default, blocks are ordered according to start instruction; we alter sort order to make sure linearized block is placed right after the
|
||||
// starting block
|
||||
function.blocks[newBlock.index].sortkey = startingInsn + 1;
|
||||
function.blocks[newBlock.index].sortkey = startingSortKey;
|
||||
function.blocks[newBlock.index].chainkey = startingChainKey + 1;
|
||||
|
||||
// Make sure block ordering guarantee is checked at lowering time
|
||||
function.blocks[blockIdx].expectedNextBlock = newBlock.index;
|
||||
|
||||
replace(function, termInst.a, newBlock);
|
||||
|
||||
|
@ -1252,6 +1363,12 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||
def.varargStart = pathDef.varargStart;
|
||||
}
|
||||
}
|
||||
|
||||
// Update predecessors
|
||||
function.cfg.predecessorsOffsets.push_back(uint32_t(function.cfg.predecessors.size()));
|
||||
function.cfg.predecessors.push_back(blockIdx);
|
||||
|
||||
// Updating successors will require visiting the instructions again and we don't have a current use for linearized block successor list
|
||||
}
|
||||
|
||||
// Optimize our linear block
|
||||
|
|
|
@ -58,6 +58,7 @@ static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
|||
case IrCmd::SUB_NUM:
|
||||
case IrCmd::MUL_NUM:
|
||||
case IrCmd::DIV_NUM:
|
||||
case IrCmd::IDIV_NUM:
|
||||
case IrCmd::MOD_NUM:
|
||||
case IrCmd::MIN_NUM:
|
||||
case IrCmd::MAX_NUM:
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
// Version 1: Baseline version for the open-source release. Supported until 0.521.
|
||||
// Version 2: Adds Proto::linedefined. Supported until 0.544.
|
||||
// Version 3: Adds FORGPREP/JUMPXEQK* and enhances AUX encoding for FORGLOOP. Removes FORGLOOP_NEXT/INEXT and JUMPIFEQK/JUMPIFNOTEQK. Currently supported.
|
||||
// Version 4: Adds Proto::flags and typeinfo. Currently supported.
|
||||
// Version 4: Adds Proto::flags, typeinfo, and floor division opcodes IDIV/IDIVK. Currently supported.
|
||||
|
||||
// Bytecode opcode, part of the instruction header
|
||||
enum LuauOpcode
|
||||
|
@ -390,6 +390,18 @@ enum LuauOpcode
|
|||
LOP_JUMPXEQKN,
|
||||
LOP_JUMPXEQKS,
|
||||
|
||||
// IDIV: compute floor division between two source registers and put the result into target register
|
||||
// A: target register
|
||||
// B: source register 1
|
||||
// C: source register 2
|
||||
LOP_IDIV,
|
||||
|
||||
// IDIVK compute floor division between the source register and a constant and put the result into target register
|
||||
// A: target register
|
||||
// B: source register
|
||||
// C: constant table index (0..255)
|
||||
LOP_IDIVK,
|
||||
|
||||
// Enum entry for number of opcodes, not a valid opcode by itself!
|
||||
LOP__COUNT
|
||||
};
|
||||
|
|
|
@ -15,7 +15,6 @@ class BytecodeEncoder
|
|||
public:
|
||||
virtual ~BytecodeEncoder() {}
|
||||
|
||||
virtual uint8_t encodeOp(uint8_t op) = 0;
|
||||
virtual void encode(uint32_t* data, size_t count) = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(BytecodeVersion4, false)
|
||||
LUAU_FASTFLAGVARIABLE(BytecodeEnc, false)
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -238,7 +239,7 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues, uin
|
|||
// very approximate: 4 bytes per instruction for code, 1 byte for debug line, and 1-2 bytes for aux data like constants plus overhead
|
||||
func.data.reserve(32 + insns.size() * 7);
|
||||
|
||||
if (FFlag::BytecodeEnc && encoder)
|
||||
if (encoder)
|
||||
encoder->encode(insns.data(), insns.size());
|
||||
|
||||
writeFunction(func.data, currentFunction, flags);
|
||||
|
@ -625,29 +626,8 @@ void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags)
|
|||
// instructions
|
||||
writeVarInt(ss, uint32_t(insns.size()));
|
||||
|
||||
if (encoder && !FFlag::BytecodeEnc)
|
||||
{
|
||||
for (size_t i = 0; i < insns.size();)
|
||||
{
|
||||
uint8_t op = LUAU_INSN_OP(insns[i]);
|
||||
LUAU_ASSERT(op < LOP__COUNT);
|
||||
|
||||
int oplen = getOpLength(LuauOpcode(op));
|
||||
uint8_t openc = encoder->encodeOp(op);
|
||||
|
||||
writeInt(ss, openc | (insns[i] & ~0xff));
|
||||
|
||||
for (int j = 1; j < oplen; ++j)
|
||||
writeInt(ss, insns[i + j]);
|
||||
|
||||
i += oplen;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (uint32_t insn : insns)
|
||||
writeInt(ss, insn);
|
||||
}
|
||||
for (uint32_t insn : insns)
|
||||
writeInt(ss, insn);
|
||||
|
||||
// constants
|
||||
writeVarInt(ss, uint32_t(constants.size()));
|
||||
|
@ -1306,8 +1286,11 @@ void BytecodeBuilder::validateInstructions() const
|
|||
case LOP_SUB:
|
||||
case LOP_MUL:
|
||||
case LOP_DIV:
|
||||
case LOP_IDIV:
|
||||
case LOP_MOD:
|
||||
case LOP_POW:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || op != LOP_IDIV);
|
||||
|
||||
VREG(LUAU_INSN_A(insn));
|
||||
VREG(LUAU_INSN_B(insn));
|
||||
VREG(LUAU_INSN_C(insn));
|
||||
|
@ -1317,8 +1300,11 @@ void BytecodeBuilder::validateInstructions() const
|
|||
case LOP_SUBK:
|
||||
case LOP_MULK:
|
||||
case LOP_DIVK:
|
||||
case LOP_IDIVK:
|
||||
case LOP_MODK:
|
||||
case LOP_POWK:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || op != LOP_IDIVK);
|
||||
|
||||
VREG(LUAU_INSN_A(insn));
|
||||
VREG(LUAU_INSN_B(insn));
|
||||
VCONST(LUAU_INSN_C(insn), Number);
|
||||
|
@ -1885,6 +1871,12 @@ void BytecodeBuilder::dumpInstruction(const uint32_t* code, std::string& result,
|
|||
formatAppend(result, "DIV R%d R%d R%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
|
||||
break;
|
||||
|
||||
case LOP_IDIV:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
formatAppend(result, "IDIV R%d R%d R%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
|
||||
break;
|
||||
|
||||
case LOP_MOD:
|
||||
formatAppend(result, "MOD R%d R%d R%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
|
||||
break;
|
||||
|
@ -1917,6 +1909,14 @@ void BytecodeBuilder::dumpInstruction(const uint32_t* code, std::string& result,
|
|||
result.append("]\n");
|
||||
break;
|
||||
|
||||
case LOP_IDIVK:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
formatAppend(result, "IDIVK R%d R%d K%d [", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
|
||||
dumpConstant(result, LUAU_INSN_C(insn));
|
||||
result.append("]\n");
|
||||
break;
|
||||
|
||||
case LOP_MODK:
|
||||
formatAppend(result, "MODK R%d R%d K%d [", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
|
||||
dumpConstant(result, LUAU_INSN_C(insn));
|
||||
|
|
|
@ -26,6 +26,8 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25)
|
|||
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
|
||||
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
|
@ -1019,6 +1021,11 @@ struct Compiler
|
|||
case AstExprBinary::Div:
|
||||
return k ? LOP_DIVK : LOP_DIV;
|
||||
|
||||
case AstExprBinary::FloorDiv:
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision);
|
||||
|
||||
return k ? LOP_IDIVK : LOP_IDIV;
|
||||
|
||||
case AstExprBinary::Mod:
|
||||
return k ? LOP_MODK : LOP_MOD;
|
||||
|
||||
|
@ -1469,9 +1476,12 @@ struct Compiler
|
|||
case AstExprBinary::Sub:
|
||||
case AstExprBinary::Mul:
|
||||
case AstExprBinary::Div:
|
||||
case AstExprBinary::FloorDiv:
|
||||
case AstExprBinary::Mod:
|
||||
case AstExprBinary::Pow:
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || expr->op != AstExprBinary::FloorDiv);
|
||||
|
||||
int32_t rc = getConstantNumber(expr->right);
|
||||
|
||||
if (rc >= 0 && rc <= 255)
|
||||
|
@ -3192,9 +3202,12 @@ struct Compiler
|
|||
case AstExprBinary::Sub:
|
||||
case AstExprBinary::Mul:
|
||||
case AstExprBinary::Div:
|
||||
case AstExprBinary::FloorDiv:
|
||||
case AstExprBinary::Mod:
|
||||
case AstExprBinary::Pow:
|
||||
{
|
||||
LUAU_ASSERT(FFlag::LuauFloorDivision || stat->op != AstExprBinary::FloorDiv);
|
||||
|
||||
if (var.kind != LValue::Kind_Local)
|
||||
compileLValueUse(var, target, /* set= */ false);
|
||||
|
||||
|
|
|
@ -104,6 +104,14 @@ static void foldBinary(Constant& result, AstExprBinary::Op op, const Constant& l
|
|||
}
|
||||
break;
|
||||
|
||||
case AstExprBinary::FloorDiv:
|
||||
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
|
||||
{
|
||||
result.type = Constant::Type_Number;
|
||||
result.valueNumber = floor(la.valueNumber / ra.valueNumber);
|
||||
}
|
||||
break;
|
||||
|
||||
case AstExprBinary::Mod:
|
||||
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
|
||||
{
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "Luau/Lexer.h"
|
||||
#include "Luau/StringUtils.h"
|
||||
|
||||
LUAU_FASTFLAG(LuauFloorDivision)
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
|
@ -112,14 +113,23 @@ static void next(Lexer& lexer)
|
|||
lexer.next();
|
||||
|
||||
// skip C-style comments as Lexer only understands Lua-style comments atm
|
||||
while (lexer.current().type == '/')
|
||||
|
||||
if (FFlag::LuauFloorDivision)
|
||||
{
|
||||
Lexeme peek = lexer.lookahead();
|
||||
while (lexer.current().type == Luau::Lexeme::FloorDiv)
|
||||
lexer.nextline();
|
||||
}
|
||||
else
|
||||
{
|
||||
while (lexer.current().type == '/')
|
||||
{
|
||||
Lexeme peek = lexer.lookahead();
|
||||
|
||||
if (peek.type != '/' || peek.location.begin != lexer.current().location.end)
|
||||
break;
|
||||
if (peek.type != '/' || peek.location.begin != lexer.current().location.end)
|
||||
break;
|
||||
|
||||
lexer.nextline();
|
||||
lexer.nextline();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2019-2022 Roblox Corporation
|
||||
Copyright (c) 2019-2023 Roblox Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
|
1
Makefile
1
Makefile
|
@ -255,6 +255,7 @@ $(BUILD)/fuzz/protoprint.cpp.o: fuzz/luau.pb.cpp
|
|||
|
||||
build/libprotobuf-mutator:
|
||||
git clone https://github.com/google/libprotobuf-mutator build/libprotobuf-mutator
|
||||
git -C build/libprotobuf-mutator checkout 212a7be1eb08e7f9c79732d2aab9b2097085d936
|
||||
CXX= cmake -S build/libprotobuf-mutator -B build/libprotobuf-mutator $(DPROTOBUF)
|
||||
make -C build/libprotobuf-mutator -j8
|
||||
|
||||
|
|
|
@ -443,7 +443,7 @@ typedef struct lua_Callbacks lua_Callbacks;
|
|||
LUA_API lua_Callbacks* lua_callbacks(lua_State* L);
|
||||
|
||||
/******************************************************************************
|
||||
* Copyright (c) 2019-2022 Roblox Corporation
|
||||
* Copyright (c) 2019-2023 Roblox Corporation
|
||||
* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
|
|
|
@ -38,7 +38,7 @@ const char* lua_ident = "$Lua: Lua 5.1.4 Copyright (C) 1994-2008 Lua.org, PUC-Ri
|
|||
"$Authors: R. Ierusalimschy, L. H. de Figueiredo & W. Celes $\n"
|
||||
"$URL: www.lua.org $\n";
|
||||
|
||||
const char* luau_ident = "$Luau: Copyright (C) 2019-2022 Roblox Corporation $\n"
|
||||
const char* luau_ident = "$Luau: Copyright (C) 2019-2023 Roblox Corporation $\n"
|
||||
"$URL: luau-lang.org $\n";
|
||||
|
||||
#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base))
|
||||
|
|
|
@ -40,6 +40,13 @@ inline double luai_nummod(double a, double b)
|
|||
}
|
||||
LUAU_FASTMATH_END
|
||||
|
||||
LUAU_FASTMATH_BEGIN
|
||||
inline double luai_numidiv(double a, double b)
|
||||
{
|
||||
return floor(a / b);
|
||||
}
|
||||
LUAU_FASTMATH_END
|
||||
|
||||
#define luai_num2int(i, d) ((i) = (int)(d))
|
||||
|
||||
// On MSVC in 32-bit, double to unsigned cast compiles into a call to __dtoui3, so we invoke x87->int64 conversion path manually
|
||||
|
|
|
@ -48,6 +48,7 @@ const char* const luaT_eventname[] = {
|
|||
"__sub",
|
||||
"__mul",
|
||||
"__div",
|
||||
"__idiv",
|
||||
"__mod",
|
||||
"__pow",
|
||||
"__unm",
|
||||
|
|
|
@ -27,6 +27,7 @@ typedef enum
|
|||
TM_SUB,
|
||||
TM_MUL,
|
||||
TM_DIV,
|
||||
TM_IDIV,
|
||||
TM_MOD,
|
||||
TM_POW,
|
||||
TM_UNM,
|
||||
|
|
|
@ -103,7 +103,8 @@
|
|||
VM_DISPATCH_OP(LOP_LOADKX), VM_DISPATCH_OP(LOP_JUMPX), VM_DISPATCH_OP(LOP_FASTCALL), VM_DISPATCH_OP(LOP_COVERAGE), \
|
||||
VM_DISPATCH_OP(LOP_CAPTURE), VM_DISPATCH_OP(LOP_DEP_JUMPIFEQK), VM_DISPATCH_OP(LOP_DEP_JUMPIFNOTEQK), VM_DISPATCH_OP(LOP_FASTCALL1), \
|
||||
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K), VM_DISPATCH_OP(LOP_FORGPREP), VM_DISPATCH_OP(LOP_JUMPXEQKNIL), \
|
||||
VM_DISPATCH_OP(LOP_JUMPXEQKB), VM_DISPATCH_OP(LOP_JUMPXEQKN), VM_DISPATCH_OP(LOP_JUMPXEQKS),
|
||||
VM_DISPATCH_OP(LOP_JUMPXEQKB), VM_DISPATCH_OP(LOP_JUMPXEQKN), VM_DISPATCH_OP(LOP_JUMPXEQKS), VM_DISPATCH_OP(LOP_IDIV), \
|
||||
VM_DISPATCH_OP(LOP_IDIVK),
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define VM_USE_CGOTO 1
|
||||
|
@ -1660,6 +1661,54 @@ reentry:
|
|||
}
|
||||
}
|
||||
|
||||
VM_CASE(LOP_IDIV)
|
||||
{
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
StkId rc = VM_REG(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
|
||||
{
|
||||
setnvalue(ra, luai_numidiv(nvalue(rb), nvalue(rc)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else if (ttisvector(rb) && ttisnumber(rc))
|
||||
{
|
||||
const float* vb = vvalue(rb);
|
||||
float vc = cast_to(float, nvalue(rc));
|
||||
setvvalue(ra, float(luai_numidiv(vb[0], vc)), float(luai_numidiv(vb[1], vc)), float(luai_numidiv(vb[2], vc)),
|
||||
float(luai_numidiv(vb[3], vc)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path for userdata with C functions
|
||||
StkId rbc = ttisnumber(rb) ? rc : rb;
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_IDIV)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, rc);
|
||||
L->top = top + 3;
|
||||
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke C/Lua via metamethods
|
||||
VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_IDIV));
|
||||
VM_NEXT();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VM_CASE(LOP_MOD)
|
||||
{
|
||||
Instruction insn = *pc++;
|
||||
|
@ -1838,6 +1887,53 @@ reentry:
|
|||
}
|
||||
}
|
||||
|
||||
VM_CASE(LOP_IDIVK)
|
||||
{
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
TValue* kv = VM_KV(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (LUAU_LIKELY(ttisnumber(rb)))
|
||||
{
|
||||
setnvalue(ra, luai_numidiv(nvalue(rb), nvalue(kv)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else if (ttisvector(rb))
|
||||
{
|
||||
const float* vb = vvalue(rb);
|
||||
float vc = cast_to(float, nvalue(kv));
|
||||
setvvalue(ra, float(luai_numidiv(vb[0], vc)), float(luai_numidiv(vb[1], vc)), float(luai_numidiv(vb[2], vc)),
|
||||
float(luai_numidiv(vb[3], vc)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path for userdata with C functions
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_IDIV)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
L->top = top + 3;
|
||||
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke C/Lua via metamethods
|
||||
VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_IDIV));
|
||||
VM_NEXT();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VM_CASE(LOP_MODK)
|
||||
{
|
||||
Instruction insn = *pc++;
|
||||
|
|
|
@ -394,6 +394,9 @@ void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TM
|
|||
case TM_DIV:
|
||||
setnvalue(ra, luai_numdiv(nb, nc));
|
||||
break;
|
||||
case TM_IDIV:
|
||||
setnvalue(ra, luai_numidiv(nb, nc));
|
||||
break;
|
||||
case TM_MOD:
|
||||
setnvalue(ra, luai_nummod(nb, nc));
|
||||
break;
|
||||
|
@ -410,7 +413,12 @@ void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TM
|
|||
}
|
||||
else
|
||||
{
|
||||
// vector operations that we support: v + v, v - v, v * v, s * v, v * s, v / v, s / v, v / s, -v
|
||||
// vector operations that we support:
|
||||
// v+v v-v -v (add/sub/neg)
|
||||
// v*v s*v v*s (mul)
|
||||
// v/v s/v v/s (div)
|
||||
// v//v s//v v//s (floor div)
|
||||
|
||||
const float* vb = luaV_tovector(rb);
|
||||
const float* vc = luaV_tovector(rc);
|
||||
|
||||
|
@ -430,6 +438,10 @@ void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TM
|
|||
case TM_DIV:
|
||||
setvvalue(ra, vb[0] / vc[0], vb[1] / vc[1], vb[2] / vc[2], vb[3] / vc[3]);
|
||||
return;
|
||||
case TM_IDIV:
|
||||
setvvalue(ra, float(luai_numidiv(vb[0], vc[0])), float(luai_numidiv(vb[1], vc[1])), float(luai_numidiv(vb[2], vc[2])),
|
||||
float(luai_numidiv(vb[3], vc[3])));
|
||||
return;
|
||||
case TM_UNM:
|
||||
setvvalue(ra, -vb[0], -vb[1], -vb[2], -vb[3]);
|
||||
return;
|
||||
|
@ -453,6 +465,10 @@ void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TM
|
|||
case TM_DIV:
|
||||
setvvalue(ra, vb[0] / nc, vb[1] / nc, vb[2] / nc, vb[3] / nc);
|
||||
return;
|
||||
case TM_IDIV:
|
||||
setvvalue(ra, float(luai_numidiv(vb[0], nc)), float(luai_numidiv(vb[1], nc)), float(luai_numidiv(vb[2], nc)),
|
||||
float(luai_numidiv(vb[3], nc)));
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -474,6 +490,10 @@ void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TM
|
|||
case TM_DIV:
|
||||
setvvalue(ra, nb / vc[0], nb / vc[1], nb / vc[2], nb / vc[3]);
|
||||
return;
|
||||
case TM_IDIV:
|
||||
setvvalue(ra, float(luai_numidiv(nb, vc[0])), float(luai_numidiv(nb, vc[1])), float(luai_numidiv(nb, vc[2])),
|
||||
float(luai_numidiv(nb, vc[3])));
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
22
bench/micro_tests/test_TableSort.lua
Normal file
22
bench/micro_tests/test_TableSort.lua
Normal file
|
@ -0,0 +1,22 @@
|
|||
local bench = script and require(script.Parent.bench_support) or require("bench_support")
|
||||
|
||||
local arr_months = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}
|
||||
|
||||
local arr_num = {}
|
||||
for i=1,100 do table.insert(arr_num, math.sin(i)) end
|
||||
|
||||
local arr_numk = {}
|
||||
for i=1,10000 do table.insert(arr_numk, math.sin(i)) end
|
||||
|
||||
function test(arr)
|
||||
local t = table.create(#arr)
|
||||
|
||||
for i=1,1e6/#arr do
|
||||
table.move(arr, 1, #arr, 1, t)
|
||||
table.sort(t)
|
||||
end
|
||||
end
|
||||
|
||||
bench.runCode(function() test(arr_months) end, "table.sort: 12 strings")
|
||||
bench.runCode(function() test(arr_num) end, "table.sort: 100 numbers")
|
||||
bench.runCode(function() test(arr_numk) end, "table.sort: 10k numbers")
|
|
@ -45,7 +45,7 @@ class TablePrinter(object):
|
|||
def _print_horizontal_separator(self):
|
||||
for i, align_width in enumerate(self._widths):
|
||||
if i > 0:
|
||||
print('-+-', end='')
|
||||
print('-|-', end='')
|
||||
print('-' * (align_width+1), end='')
|
||||
print()
|
||||
pass
|
||||
|
|
|
@ -135,17 +135,18 @@ message ExprBinary {
|
|||
Sub = 1;
|
||||
Mul = 2;
|
||||
Div = 3;
|
||||
Mod = 4;
|
||||
Pow = 5;
|
||||
Concat = 6;
|
||||
CompareNe = 7;
|
||||
CompareEq = 8;
|
||||
CompareLt = 9;
|
||||
CompareLe = 10;
|
||||
CompareGt = 11;
|
||||
CompareGe = 12;
|
||||
And = 13;
|
||||
Or = 14;
|
||||
FloorDiv = 4;
|
||||
Mod = 5;
|
||||
Pow = 6;
|
||||
Concat = 7;
|
||||
CompareNe = 8;
|
||||
CompareEq = 9;
|
||||
CompareLt = 10;
|
||||
CompareLe = 11;
|
||||
CompareGt = 12;
|
||||
CompareGe = 13;
|
||||
And = 14;
|
||||
Or = 15;
|
||||
}
|
||||
|
||||
required Op op = 1;
|
||||
|
|
|
@ -495,6 +495,8 @@ struct ProtoToLuau
|
|||
source += " * ";
|
||||
else if (expr.op() == luau::ExprBinary::Div)
|
||||
source += " / ";
|
||||
else if (expr.op() == luau::ExprBinary::FloorDiv)
|
||||
source += " // ";
|
||||
else if (expr.op() == luau::ExprBinary::Mod)
|
||||
source += " % ";
|
||||
else if (expr.op() == luau::ExprBinary::Pow)
|
||||
|
|
|
@ -107,6 +107,13 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Binary")
|
|||
SINGLE_COMPARE(cmp(w0, 42), 0x7100A81F);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "BinaryExtended")
|
||||
{
|
||||
// reg, reg
|
||||
SINGLE_COMPARE(add(x0, x1, w2, 3), 0x8B224C20);
|
||||
SINGLE_COMPARE(sub(x0, x1, w2, 3), 0xCB224C20);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "BinaryImm")
|
||||
{
|
||||
// instructions
|
||||
|
@ -524,6 +531,8 @@ TEST_CASE("LogTest")
|
|||
build.ldr(x0, mem(x1, 1, AddressKindA64::pre));
|
||||
build.ldr(x0, mem(x1, 1, AddressKindA64::post));
|
||||
|
||||
build.add(x1, x2, w3, 3);
|
||||
|
||||
build.setLabel(l);
|
||||
build.ret();
|
||||
|
||||
|
@ -560,6 +569,7 @@ TEST_CASE("LogTest")
|
|||
ldr x0,[x1,#1]
|
||||
ldr x0,[x1,#1]!
|
||||
ldr x0,[x1]!,#1
|
||||
add x1,x2,w3 UXTW #3
|
||||
.L1:
|
||||
ret
|
||||
)";
|
||||
|
|
|
@ -986,6 +986,33 @@ TEST_CASE_FIXTURE(ACFixture, "autocomplete_end_with_lambda")
|
|||
CHECK_EQ(ac.context, AutocompleteContext::Statement);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ACFixture, "autocomplete_end_of_do_block")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauAutocompleteDoEnd", true};
|
||||
|
||||
check("do @1");
|
||||
|
||||
auto ac = autocomplete('1');
|
||||
|
||||
CHECK(ac.entryMap.count("end"));
|
||||
|
||||
check(R"(
|
||||
function f()
|
||||
do
|
||||
@1
|
||||
end
|
||||
@2
|
||||
)");
|
||||
|
||||
ac = autocomplete('1');
|
||||
|
||||
CHECK(ac.entryMap.count("end"));
|
||||
|
||||
ac = autocomplete('2');
|
||||
|
||||
CHECK(ac.entryMap.count("end"));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ACFixture, "stop_at_first_stat_when_recommending_keywords")
|
||||
{
|
||||
check(R"(
|
||||
|
|
|
@ -412,6 +412,10 @@ static void obscureThrowCase(int64_t (*f)(int64_t, void (*)(int64_t)))
|
|||
|
||||
TEST_CASE("GeneratedCodeExecutionWithThrowX64Simd")
|
||||
{
|
||||
// This test requires AVX
|
||||
if (!Luau::CodeGen::isSupported())
|
||||
return;
|
||||
|
||||
using namespace X64;
|
||||
|
||||
AssemblyBuilderX64 build(/* logText= */ false);
|
||||
|
|
|
@ -24,6 +24,7 @@ extern bool codegen;
|
|||
extern int optimizationLevel;
|
||||
|
||||
LUAU_FASTFLAG(LuauPCallDebuggerFix);
|
||||
LUAU_FASTFLAG(LuauFloorDivision);
|
||||
|
||||
static lua_CompileOptions defaultOptions()
|
||||
{
|
||||
|
@ -280,6 +281,7 @@ TEST_CASE("Assert")
|
|||
|
||||
TEST_CASE("Basic")
|
||||
{
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
runConformance("basic.lua");
|
||||
}
|
||||
|
||||
|
@ -363,6 +365,7 @@ TEST_CASE("Errors")
|
|||
|
||||
TEST_CASE("Events")
|
||||
{
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
runConformance("events.lua");
|
||||
}
|
||||
|
||||
|
@ -444,6 +447,8 @@ TEST_CASE("Pack")
|
|||
|
||||
TEST_CASE("Vector")
|
||||
{
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
|
||||
lua_CompileOptions copts = defaultOptions();
|
||||
copts.vectorCtor = "vector";
|
||||
|
||||
|
@ -1616,6 +1621,9 @@ static void pushInt64(lua_State* L, int64_t value)
|
|||
|
||||
TEST_CASE("Userdata")
|
||||
{
|
||||
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
|
||||
runConformance("userdata.lua", [](lua_State* L) {
|
||||
// create metatable with all the metamethods
|
||||
lua_newtable(L);
|
||||
|
@ -1735,6 +1743,19 @@ TEST_CASE("Userdata")
|
|||
nullptr);
|
||||
lua_setfield(L, -2, "__div");
|
||||
|
||||
// __idiv
|
||||
lua_pushcfunction(
|
||||
L,
|
||||
[](lua_State* L) {
|
||||
// for testing we use different semantics here compared to __div: __idiv rounds to negative inf, __div truncates (rounds to zero)
|
||||
// additionally, division loses precision here outside of 2^53 range
|
||||
// we do not necessarily recommend this behavior in production code!
|
||||
pushInt64(L, int64_t(floor(double(getInt64(L, 1)) / double(getInt64(L, 2)))));
|
||||
return 1;
|
||||
},
|
||||
nullptr);
|
||||
lua_setfield(L, -2, "__idiv");
|
||||
|
||||
// __mod
|
||||
lua_pushcfunction(
|
||||
L,
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "Luau/IrUtils.h"
|
||||
#include "Luau/OptimizeConstProp.h"
|
||||
#include "Luau/OptimizeFinalX64.h"
|
||||
#include "ScopedFlags.h"
|
||||
|
||||
#include "doctest.h"
|
||||
|
||||
|
@ -1930,6 +1931,135 @@ bb_0:
|
|||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateHashSlotChecks")
|
||||
{
|
||||
ScopedFastFlag luauReuseHashSlots{"LuauReuseHashSlots2", true};
|
||||
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
// This roughly corresponds to 'return t.a + t.a'
|
||||
IrOp table1 = build.inst(IrCmd::LOAD_POINTER, build.vmReg(1));
|
||||
IrOp slot1 = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table1, build.constUint(3), build.vmConst(1));
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot1, build.vmConst(1), fallback);
|
||||
IrOp value1 = build.inst(IrCmd::LOAD_TVALUE, slot1, build.constInt(0));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(3), value1);
|
||||
|
||||
IrOp slot1b = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table1, build.constUint(8), build.vmConst(1)); // This will be removed
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot1b, build.vmConst(1), fallback); // Key will be replaced with undef here
|
||||
IrOp value1b = build.inst(IrCmd::LOAD_TVALUE, slot1b, build.constInt(0));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(4), value1b);
|
||||
|
||||
IrOp a = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(3));
|
||||
IrOp b = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(4));
|
||||
IrOp sum = build.inst(IrCmd::ADD_NUM, a, b);
|
||||
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(2), sum);
|
||||
|
||||
build.inst(IrCmd::RETURN, build.vmReg(2), build.constUint(1));
|
||||
|
||||
build.beginBlock(fallback);
|
||||
build.inst(IrCmd::RETURN, build.vmReg(0), build.constUint(1));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
// In the future, we might even see duplicate identical TValue loads go away
|
||||
// In the future, we might even see loads of different VM regs with the same value go away
|
||||
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
|
||||
bb_0:
|
||||
%0 = LOAD_POINTER R1
|
||||
%1 = GET_SLOT_NODE_ADDR %0, 3u, K1
|
||||
CHECK_SLOT_MATCH %1, K1, bb_fallback_1
|
||||
%3 = LOAD_TVALUE %1, 0i
|
||||
STORE_TVALUE R3, %3
|
||||
CHECK_NODE_VALUE %1, bb_fallback_1
|
||||
%7 = LOAD_TVALUE %1, 0i
|
||||
STORE_TVALUE R4, %7
|
||||
%9 = LOAD_DOUBLE R3
|
||||
%10 = LOAD_DOUBLE R4
|
||||
%11 = ADD_NUM %9, %10
|
||||
STORE_DOUBLE R2, %11
|
||||
RETURN R2, 1u
|
||||
|
||||
bb_fallback_1:
|
||||
RETURN R0, 1u
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateHashSlotChecksAvoidNil")
|
||||
{
|
||||
ScopedFastFlag luauReuseHashSlots{"LuauReuseHashSlots2", true};
|
||||
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
IrOp table1 = build.inst(IrCmd::LOAD_POINTER, build.vmReg(1));
|
||||
IrOp slot1 = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table1, build.constUint(3), build.vmConst(1));
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot1, build.vmConst(1), fallback);
|
||||
IrOp value1 = build.inst(IrCmd::LOAD_TVALUE, slot1, build.constInt(0));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(3), value1);
|
||||
|
||||
IrOp table2 = build.inst(IrCmd::LOAD_POINTER, build.vmReg(2));
|
||||
IrOp slot2 = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table2, build.constUint(6), build.vmConst(1));
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot2, build.vmConst(1), fallback);
|
||||
build.inst(IrCmd::CHECK_READONLY, table2, fallback);
|
||||
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(4), build.constTag(tnil));
|
||||
IrOp valueNil = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(4));
|
||||
build.inst(IrCmd::STORE_TVALUE, slot2, valueNil, build.constInt(0));
|
||||
|
||||
// In the future, we might get to track that value became 'nil' and that fallback will be taken
|
||||
IrOp slot1b = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table1, build.constUint(8), build.vmConst(1)); // This will be removed
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot1b, build.vmConst(1), fallback); // Key will be replaced with undef here
|
||||
IrOp value1b = build.inst(IrCmd::LOAD_TVALUE, slot1b, build.constInt(0));
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(3), value1b);
|
||||
|
||||
IrOp slot2b = build.inst(IrCmd::GET_SLOT_NODE_ADDR, table2, build.constUint(11), build.vmConst(1)); // This will be removed
|
||||
build.inst(IrCmd::CHECK_SLOT_MATCH, slot2b, build.vmConst(1), fallback); // Key will be replaced with undef here
|
||||
build.inst(IrCmd::CHECK_READONLY, table2, fallback);
|
||||
|
||||
build.inst(IrCmd::STORE_SPLIT_TVALUE, slot2b, build.constTag(tnumber), build.constDouble(1), build.constInt(0));
|
||||
|
||||
build.inst(IrCmd::RETURN, build.vmReg(3), build.constUint(2));
|
||||
|
||||
build.beginBlock(fallback);
|
||||
build.inst(IrCmd::RETURN, build.vmReg(1), build.constUint(2));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
|
||||
bb_0:
|
||||
%0 = LOAD_POINTER R1
|
||||
%1 = GET_SLOT_NODE_ADDR %0, 3u, K1
|
||||
CHECK_SLOT_MATCH %1, K1, bb_fallback_1
|
||||
%3 = LOAD_TVALUE %1, 0i
|
||||
STORE_TVALUE R3, %3
|
||||
%5 = LOAD_POINTER R2
|
||||
%6 = GET_SLOT_NODE_ADDR %5, 6u, K1
|
||||
CHECK_SLOT_MATCH %6, K1, bb_fallback_1
|
||||
CHECK_READONLY %5, bb_fallback_1
|
||||
STORE_TAG R4, tnil
|
||||
%10 = LOAD_TVALUE R4
|
||||
STORE_TVALUE %6, %10, 0i
|
||||
CHECK_NODE_VALUE %1, bb_fallback_1
|
||||
%14 = LOAD_TVALUE %1, 0i
|
||||
STORE_TVALUE R3, %14
|
||||
CHECK_NODE_VALUE %6, bb_fallback_1
|
||||
STORE_SPLIT_TVALUE %6, tnumber, 1, 0i
|
||||
RETURN R3, 2u
|
||||
|
||||
bb_fallback_1:
|
||||
RETURN R1, 2u
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
||||
TEST_SUITE_BEGIN("Analysis");
|
||||
|
|
|
@ -49,9 +49,9 @@ TEST_CASE_FIXTURE(IrRegAllocX64Fixture, "RelocateFix")
|
|||
LUAU_ASSERT(function.instructions[1].spilled);
|
||||
|
||||
checkMatch(R"(
|
||||
vmovsd qword ptr [rsp+070h],rax
|
||||
vmovsd qword ptr [rsp+078h],rax
|
||||
vmovsd rax,qword ptr [rsp+070h]
|
||||
vmovsd qword ptr [rsp+048h],rax
|
||||
vmovsd qword ptr [rsp+050h],rax
|
||||
vmovsd rax,qword ptr [rsp+048h]
|
||||
)");
|
||||
}
|
||||
|
||||
|
|
|
@ -2970,4 +2970,40 @@ TEST_CASE_FIXTURE(Fixture, "unfinished_string_literal_types_get_reported_but_par
|
|||
CHECK_EQ(result.root->body.size, 2);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "do_block_with_no_end")
|
||||
{
|
||||
ParseResult result = tryParse(R"(
|
||||
do
|
||||
)");
|
||||
|
||||
REQUIRE_EQ(1, result.errors.size());
|
||||
|
||||
AstStatBlock* stat0 = result.root->body.data[0]->as<AstStatBlock>();
|
||||
REQUIRE(stat0);
|
||||
|
||||
CHECK(!stat0->hasEnd);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "parse_interpolated_string_with_lookahead_involved")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauLexerLookaheadRemembersBraceType", true};
|
||||
|
||||
ParseResult result = tryParse(R"(
|
||||
local x = `{ {y} }`
|
||||
)");
|
||||
|
||||
REQUIRE_MESSAGE(result.errors.empty(), result.errors[0].getMessage());
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "parse_interpolated_string_with_lookahead_involved2")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauLexerLookaheadRemembersBraceType", true};
|
||||
|
||||
ParseResult result = tryParse(R"(
|
||||
local x = `{ { y{} } }`
|
||||
)");
|
||||
|
||||
REQUIRE_MESSAGE(result.errors.empty(), result.errors[0].getMessage());
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
|
|
@ -52,6 +52,35 @@ struct SubtypeFixture : Fixture
|
|||
return arena.addType(TableType{std::move(props), std::nullopt, {}, TableState::Sealed});
|
||||
}
|
||||
|
||||
// `&`
|
||||
TypeId meet(TypeId a, TypeId b)
|
||||
{
|
||||
return arena.addType(IntersectionType{{a, b}});
|
||||
}
|
||||
|
||||
// `|`
|
||||
TypeId join(TypeId a, TypeId b)
|
||||
{
|
||||
return arena.addType(UnionType{{a, b}});
|
||||
}
|
||||
|
||||
TypeId negate(TypeId ty)
|
||||
{
|
||||
return arena.addType(NegationType{ty});
|
||||
}
|
||||
|
||||
TypeId cls(const std::string& name, std::optional<TypeId> parent = std::nullopt)
|
||||
{
|
||||
return arena.addType(ClassType{name, {}, parent.value_or(builtinTypes->classType), {}, {}, nullptr, ""});
|
||||
}
|
||||
|
||||
TypeId cls(const std::string& name, ClassType::Props&& props)
|
||||
{
|
||||
TypeId ty = cls(name);
|
||||
getMutable<ClassType>(ty)->props = std::move(props);
|
||||
return ty;
|
||||
}
|
||||
|
||||
TypeId cyclicTable(std::function<void(TypeId, TableType*)>&& cb)
|
||||
{
|
||||
TypeId res = arena.addType(GenericType{});
|
||||
|
@ -61,6 +90,11 @@ struct SubtypeFixture : Fixture
|
|||
return res;
|
||||
}
|
||||
|
||||
TypeId meta(TableType::Props&& metaProps, TableType::Props&& tableProps = {})
|
||||
{
|
||||
return arena.addType(MetatableType{tbl(std::move(tableProps)), tbl(std::move(metaProps))});
|
||||
}
|
||||
|
||||
TypeId genericT = arena.addType(GenericType{"T"});
|
||||
TypeId genericU = arena.addType(GenericType{"U"});
|
||||
|
||||
|
@ -77,8 +111,34 @@ struct SubtypeFixture : Fixture
|
|||
TypeId helloType2 = arena.addType(SingletonType{StringSingleton{"hello"}});
|
||||
TypeId worldType = arena.addType(SingletonType{StringSingleton{"world"}});
|
||||
|
||||
TypeId helloOrWorldType = arena.addType(UnionType{{helloType, worldType}});
|
||||
TypeId trueOrFalseType = arena.addType(UnionType{{builtinTypes->trueType, builtinTypes->falseType}});
|
||||
TypeId helloOrWorldType = join(helloType, worldType);
|
||||
TypeId trueOrFalseType = join(builtinTypes->trueType, builtinTypes->falseType);
|
||||
|
||||
TypeId helloAndWorldType = meet(helloType, worldType);
|
||||
TypeId booleanAndTrueType = meet(builtinTypes->booleanType, builtinTypes->trueType);
|
||||
|
||||
/**
|
||||
* class
|
||||
* \- Root
|
||||
* |- Child
|
||||
* | |-GrandchildOne
|
||||
* | \-GrandchildTwo
|
||||
* \- AnotherChild
|
||||
* |- AnotherGrandchildOne
|
||||
* \- AnotherGrandchildTwo
|
||||
*/
|
||||
TypeId rootClass = cls("Root");
|
||||
TypeId childClass = cls("Child", rootClass);
|
||||
TypeId grandchildOneClass = cls("GrandchildOne", childClass);
|
||||
TypeId grandchildTwoClass = cls("GrandchildTwo", childClass);
|
||||
TypeId anotherChildClass = cls("AnotherChild", rootClass);
|
||||
TypeId anotherGrandchildOneClass = cls("AnotherGrandchildOne", anotherChildClass);
|
||||
TypeId anotherGrandchildTwoClass = cls("AnotherGrandchildTwo", anotherChildClass);
|
||||
|
||||
TypeId vec2Class = cls("Vec2", {
|
||||
{"X", builtinTypes->numberType},
|
||||
{"Y", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
// "hello" | "hello"
|
||||
TypeId helloOrHelloType = arena.addType(UnionType{{helloType, helloType}});
|
||||
|
@ -86,12 +146,6 @@ struct SubtypeFixture : Fixture
|
|||
// () -> ()
|
||||
const TypeId nothingToNothingType = fn({}, {});
|
||||
|
||||
// ("hello") -> "world"
|
||||
TypeId helloAndWorldType = arena.addType(IntersectionType{{helloType, worldType}});
|
||||
|
||||
// (boolean) -> true
|
||||
TypeId booleanAndTrueType = arena.addType(IntersectionType{{builtinTypes->booleanType, builtinTypes->trueType}});
|
||||
|
||||
// (number) -> string
|
||||
const TypeId numberToStringType = fn(
|
||||
{builtinTypes->numberType},
|
||||
|
@ -247,6 +301,11 @@ struct SubtypeFixture : Fixture
|
|||
builtinTypes->emptyTypePack,
|
||||
genericAs
|
||||
});
|
||||
|
||||
// { lower : string -> string }
|
||||
TypeId tableWithLower = tbl(TableType::Props{{"lower", fn({builtinTypes->stringType}, {builtinTypes->stringType})}});
|
||||
// { insaneThingNoScalarHas : () -> () }
|
||||
TypeId tableWithoutScalarProp = tbl(TableType::Props{{"insaneThingNoScalarHas", fn({}, {})}});
|
||||
};
|
||||
|
||||
#define CHECK_IS_SUBTYPE(left, right) \
|
||||
|
@ -620,6 +679,99 @@ TEST_CASE_FIXTURE(SubtypeFixture, "{x: <T>(T) -> ()} <: {x: <U>(U) -> ()}")
|
|||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable { x: number } } <: { @metatable {} }")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(
|
||||
meta({{"x", builtinTypes->numberType}}),
|
||||
meta({})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable { x: number } } <!: { @metatable { x: boolean } }")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(
|
||||
meta({{"x", builtinTypes->numberType}}),
|
||||
meta({{"x", builtinTypes->booleanType}})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable {} } <!: { @metatable { x: boolean } }")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(
|
||||
meta({}),
|
||||
meta({{"x", builtinTypes->booleanType}})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable {} } <: {}")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(
|
||||
meta({}),
|
||||
tbl({})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable { u: boolean }, x: number } <: { x: number }")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(
|
||||
meta({{"u", builtinTypes->booleanType}}, {{"x", builtinTypes->numberType}}),
|
||||
tbl({{"x", builtinTypes->numberType}})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ @metatable { x: number } } <!: { x: number }")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(
|
||||
meta({{"x", builtinTypes->numberType}}),
|
||||
tbl({{"x", builtinTypes->numberType}})
|
||||
);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Root <: class")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(rootClass, builtinTypes->classType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child | AnotherChild <: class")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(join(childClass, anotherChildClass), builtinTypes->classType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child | AnotherChild <: Child | AnotherChild")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(join(childClass, anotherChildClass), join(childClass, anotherChildClass));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child | Root <: Root")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(join(childClass, rootClass), rootClass);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child & AnotherChild <: class")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(meet(childClass, anotherChildClass), builtinTypes->classType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child & Root <: class")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(meet(childClass, rootClass), builtinTypes->classType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child & ~Root <: class")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(meet(childClass, negate(rootClass)), builtinTypes->classType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child & AnotherChild <: number")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(meet(childClass, anotherChildClass), builtinTypes->numberType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Child & ~GrandchildOne <!: number")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(meet(childClass, negate(grandchildOneClass)), builtinTypes->numberType);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "t1 where t1 = {trim: (t1) -> string} <: t2 where t2 = {trim: (t2) -> string}")
|
||||
{
|
||||
TypeId t1 = cyclicTable([&](TypeId ty, TableType* tt)
|
||||
|
@ -665,6 +817,84 @@ TEST_CASE_FIXTURE(SubtypeFixture, "t1 where t1 = {trim: (t1) -> t1} <!: t2 where
|
|||
CHECK_IS_NOT_SUBTYPE(t1, t2);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Vec2 <: { X: number, Y: number }")
|
||||
{
|
||||
TypeId xy = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
{"Y", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_SUBTYPE(vec2Class, xy);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Vec2 <: { X: number }")
|
||||
{
|
||||
TypeId x = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_SUBTYPE(vec2Class, x);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ X: number, Y: number } <!: Vec2")
|
||||
{
|
||||
TypeId xy = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
{"Y", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_NOT_SUBTYPE(xy, vec2Class);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "{ X: number } <!: Vec2")
|
||||
{
|
||||
TypeId x = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_NOT_SUBTYPE(x, vec2Class);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "table & { X: number, Y: number } <!: Vec2")
|
||||
{
|
||||
TypeId x = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
{"Y", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_NOT_SUBTYPE(meet(builtinTypes->tableType, x), vec2Class);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "Vec2 <!: table & { X: number, Y: number }")
|
||||
{
|
||||
TypeId xy = tbl({
|
||||
{"X", builtinTypes->numberType},
|
||||
{"Y", builtinTypes->numberType},
|
||||
});
|
||||
|
||||
CHECK_IS_NOT_SUBTYPE(vec2Class, meet(builtinTypes->tableType, xy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" <: { lower : (string) -> string }")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(helloType, tableWithLower);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "\"hello\" <!: { insaneThingNoScalarHas : () -> () }")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(helloType, tableWithoutScalarProp);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "string <: { lower : (string) -> string }")
|
||||
{
|
||||
CHECK_IS_SUBTYPE(builtinTypes->stringType, tableWithLower);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SubtypeFixture, "string <!: { insaneThingNoScalarHas : () -> () }")
|
||||
{
|
||||
CHECK_IS_NOT_SUBTYPE(builtinTypes->stringType, tableWithoutScalarProp);
|
||||
}
|
||||
|
||||
/*
|
||||
* <A>(A) -> A <: <X>(X) -> X
|
||||
* A can be bound to X.
|
||||
|
|
|
@ -833,14 +833,6 @@ TEST_CASE_FIXTURE(Fixture, "tostring_unsee_ttv_if_array")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "tostring_error_mismatch")
|
||||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
|
||||
ScopedFastInt sfi[] = {
|
||||
{"LuauIndentTypeMismatchMaxTypeLength", 10},
|
||||
};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
function f1() : {a : number, b : string, c : { d : number}}
|
||||
|
|
|
@ -529,14 +529,17 @@ until c
|
|||
CHECK_EQ(code, transpile(code, {}, true).code);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "transpile_compound_assignmenr")
|
||||
TEST_CASE_FIXTURE(Fixture, "transpile_compound_assignment")
|
||||
{
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
|
||||
std::string code = R"(
|
||||
local a = 1
|
||||
a += 2
|
||||
a -= 3
|
||||
a *= 4
|
||||
a /= 5
|
||||
a //= 5
|
||||
a %= 6
|
||||
a ^= 7
|
||||
a ..= ' - result'
|
||||
|
|
|
@ -189,9 +189,7 @@ TEST_CASE_FIXTURE(Fixture, "generic_aliases")
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"DebugLuauDeferredConstraintResolution", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type T<a> = { v: a }
|
||||
local x: T<number> = { v = 123 }
|
||||
|
@ -212,10 +210,7 @@ TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases")
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"DebugLuauDeferredConstraintResolution", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type T<a> = { v: a }
|
||||
|
|
|
@ -134,9 +134,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "sort_with_bad_predicate")
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauAlwaysCommitInferencesOfFunctionCalls", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
|
|
|
@ -369,9 +369,7 @@ TEST_CASE_FIXTURE(ClassFixture, "detailed_class_unification_error")
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauAlwaysCommitInferencesOfFunctionCalls", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
local function foo(v)
|
||||
return v.X :: number + string.len(v.Y)
|
||||
|
@ -457,8 +455,6 @@ TEST_CASE_FIXTURE(ClassFixture, "index_instance_property_nonstrict")
|
|||
|
||||
TEST_CASE_FIXTURE(ClassFixture, "type_mismatch_invariance_required_for_error")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type A = { x: ChildClass }
|
||||
type B = { x: BaseClass }
|
||||
|
|
|
@ -1095,9 +1095,6 @@ TEST_CASE_FIXTURE(Fixture, "return_type_by_overload")
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "infer_anonymous_function_arguments")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
// Simple direct arg to arg propagation
|
||||
CheckResult result = check(R"(
|
||||
type Table = { x: number, y: number }
|
||||
|
@ -1342,8 +1339,6 @@ end
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_function_mismatch_arg_count")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type A = (number, number) -> string
|
||||
type B = (number) -> string
|
||||
|
@ -1364,9 +1359,6 @@ caused by:
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_function_mismatch_arg")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type A = (number, number) -> string
|
||||
type B = (number, string) -> string
|
||||
|
@ -1388,9 +1380,6 @@ Type 'string' could not be converted into 'number')";
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_function_mismatch_ret_count")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type A = (number, number) -> (number)
|
||||
type B = (number, number) -> (number, boolean)
|
||||
|
@ -1411,9 +1400,6 @@ caused by:
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_function_mismatch_ret")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type A = (number, number) -> string
|
||||
type B = (number, number) -> number
|
||||
|
@ -1435,9 +1421,6 @@ Type 'string' could not be converted into 'number')";
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_function_mismatch_ret_mult")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type A = (number, number) -> (number, string)
|
||||
type B = (number, number) -> (number, boolean)
|
||||
|
@ -1563,9 +1546,6 @@ TEST_CASE_FIXTURE(Fixture, "inferred_higher_order_functions_are_quantified_at_th
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "function_decl_non_self_unsealed_overwrite")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local t = { f = nil :: ((x: number) -> number)? }
|
||||
|
||||
|
@ -1608,9 +1588,6 @@ TEST_CASE_FIXTURE(Fixture, "strict_mode_ok_with_missing_arguments")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "function_statement_sealed_table_assignment_through_indexer")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local t: {[string]: () -> number} = {}
|
||||
|
||||
|
@ -1819,9 +1796,6 @@ foo(string.find("hello", "e"))
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "luau_subtyping_is_np_hard")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
|
||||
|
@ -2017,10 +1991,8 @@ TEST_CASE_FIXTURE(Fixture, "function_exprs_are_generalized_at_signature_scope_no
|
|||
TEST_CASE_FIXTURE(BuiltinsFixture, "param_1_and_2_both_takes_the_same_generic_but_their_arguments_are_incompatible")
|
||||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
{"LuauAlwaysCommitInferencesOfFunctionCalls", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function foo<a>(x: a, y: a?)
|
||||
|
|
|
@ -713,9 +713,6 @@ end
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "generic_functions_should_be_memory_safe")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
-- At one point this produced a UAF
|
||||
|
|
|
@ -317,9 +317,6 @@ TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type X = { x: (number) -> number }
|
||||
type Y = { y: (string) -> string }
|
||||
|
@ -350,9 +347,6 @@ caused by:
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "table_write_sealed_indirect")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
// After normalization, previous 'table_intersection_write_sealed_indirect' is identical to this one
|
||||
CheckResult result = check(R"(
|
||||
type XY = { x: (number) -> number, y: (string) -> string }
|
||||
|
@ -392,9 +386,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_intersection_setmetatable")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_intersection_part")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type X = { x: number }
|
||||
type Y = { y: number }
|
||||
|
@ -482,9 +473,6 @@ TEST_CASE_FIXTURE(Fixture, "intersect_false_and_bool_and_false")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersect_saturate_overloaded_functions")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : ((number?) -> number?) & ((string?) -> string?)
|
||||
local y : (nil) -> nil = x -- OK
|
||||
|
@ -501,8 +489,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_saturate_overloaded_functions")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : ((number) -> number) & ((string) -> string)
|
||||
|
@ -520,9 +506,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : { p : number?, q : string? } & { p : number?, q : number?, r : number? }
|
||||
local y : { p : number?, q : nil, r : number? } = x -- OK
|
||||
|
@ -539,9 +522,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : { p : number?, q : any } & { p : unknown, q : string? }
|
||||
local y : { p : number?, q : string? } = x -- OK
|
||||
|
@ -594,9 +574,6 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : ((number?) -> ({ p : number } & { q : number })) & ((string?) -> ({ p : number } & { r : number }))
|
||||
local y : (nil) -> { p : number, q : number, r : number} = x -- OK
|
||||
|
@ -613,9 +590,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a>()
|
||||
local x : ((number?) -> (a | number)) & ((string?) -> (a | string))
|
||||
|
@ -634,9 +608,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generics")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a,b,c>()
|
||||
local x : ((a?) -> (a | b)) & ((c?) -> (b | c))
|
||||
|
@ -655,9 +626,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic_packs")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...,b...>()
|
||||
local x : ((number?, a...) -> (number?, b...)) & ((string?, a...) -> (string?, b...))
|
||||
|
@ -676,9 +644,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_result")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...,b...>()
|
||||
local x : ((number) -> number) & ((nil) -> unknown)
|
||||
|
@ -697,9 +662,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_arguments")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...,b...>()
|
||||
local x : ((number) -> number?) & ((unknown) -> string?)
|
||||
|
@ -718,9 +680,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_result")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...,b...>()
|
||||
local x : ((number) -> number) & ((nil) -> never)
|
||||
|
@ -739,9 +698,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_arguments")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...,b...>()
|
||||
local x : ((number) -> number?) & ((never) -> string?)
|
||||
|
@ -760,9 +716,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_overlapping_results_and_variadics")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : ((string?) -> (string | number)) & ((number?) -> ...number)
|
||||
local y : ((nil) -> (number, number?)) = x -- OK
|
||||
|
@ -809,9 +762,6 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_2")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_3")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...>()
|
||||
local x : (() -> a...) & (() -> (number?,a...))
|
||||
|
@ -830,9 +780,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_4")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
function f<a...>()
|
||||
local x : ((a...) -> ()) & ((number,a...) -> number)
|
||||
|
|
|
@ -389,9 +389,6 @@ type Table = typeof(tbl)
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "module_type_conflict")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
fileResolver.source["game/A"] = R"(
|
||||
export type T = { x: number }
|
||||
return {}
|
||||
|
@ -420,9 +417,6 @@ Type 'number' could not be converted into 'string' in an invariant context)";
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "module_type_conflict_instantiated")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
fileResolver.source["game/A"] = R"(
|
||||
export type Wrap<T> = { x: T }
|
||||
return {}
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
#include "doctest.h"
|
||||
|
||||
#include "ScopedFlags.h"
|
||||
|
||||
using namespace Luau;
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
|
||||
|
@ -143,6 +145,24 @@ TEST_CASE_FIXTURE(Fixture, "some_primitive_binary_ops")
|
|||
CHECK_EQ("number", toString(requireType("c")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "floor_division_binary_op")
|
||||
{
|
||||
ScopedFastFlag sffs{"LuauFloorDivision", true};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local a = 4 // 8
|
||||
local b = -4 // 9
|
||||
local c = 9
|
||||
c //= -6.5
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ("number", toString(requireType("a")));
|
||||
CHECK_EQ("number", toString(requireType("b")));
|
||||
CHECK_EQ("number", toString(requireType("c")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "typecheck_overloaded_multiply_that_is_an_intersection")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
|
|
|
@ -787,9 +787,6 @@ TEST_CASE_FIXTURE(IsSubtypeFixture, "functions_with_mismatching_arity_but_any_is
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "assign_table_with_refined_property_with_a_similar_type_is_illegal")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local t: {x: number?} = {x = nil}
|
||||
|
||||
|
@ -1050,4 +1047,42 @@ tbl:f3()
|
|||
}
|
||||
}
|
||||
|
||||
// Ideally, unification with any will not cause a 2^n normalization of a function overload
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "normalization_limit_in_unify_with_any")
|
||||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauTransitiveSubtyping", true},
|
||||
{"DebugLuauDeferredConstraintResolution", true},
|
||||
};
|
||||
|
||||
// With default limit, this test will take 10 seconds in NoOpt
|
||||
ScopedFastInt luauNormalizeCacheLimit{"LuauNormalizeCacheLimit", 1000};
|
||||
|
||||
// Build a function type with a large overload set
|
||||
const int parts = 100;
|
||||
std::string source;
|
||||
|
||||
for (int i = 0; i < parts; i++)
|
||||
formatAppend(source, "type T%d = { f%d: number }\n", i, i);
|
||||
|
||||
source += "type Instance = { new: (('s0', extra: Instance?) -> T0)";
|
||||
|
||||
for (int i = 1; i < parts; i++)
|
||||
formatAppend(source, " & (('s%d', extra: Instance?) -> T%d)", i, i);
|
||||
|
||||
source += " }\n";
|
||||
|
||||
source += R"(
|
||||
local Instance: Instance = {} :: any
|
||||
|
||||
local function foo(a: typeof(Instance.new)) return if a then 2 else 3 end
|
||||
|
||||
foo(1 :: any)
|
||||
)";
|
||||
|
||||
CheckResult result = check(source);
|
||||
|
||||
LUAU_REQUIRE_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
|
|
@ -316,9 +316,6 @@ TEST_CASE_FIXTURE(Fixture, "table_properties_type_error_escapes")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_tagged_union_mismatch_string")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type Cat = { tag: 'cat', catfood: string }
|
||||
type Dog = { tag: 'dog', dogfood: string }
|
||||
|
@ -337,9 +334,6 @@ Table type 'a' not compatible with type 'Cat' because the former is missing fiel
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_tagged_union_mismatch_bool")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type Good = { success: true, result: string }
|
||||
type Bad = { success: false, error: string }
|
||||
|
@ -360,9 +354,7 @@ TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias")
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"DebugLuauDeferredConstraintResolution", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type Ok<T> = {success: true, result: T}
|
||||
type Err<T> = {success: false, error: T}
|
||||
|
|
|
@ -2083,8 +2083,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_insert_should_cope_with_optional_prope
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_prop")
|
||||
{
|
||||
ScopedFastFlag sff[] = {{"LuauIndentTypeMismatch", true}};
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
CheckResult result = check(R"(
|
||||
type A = { x: number, y: number }
|
||||
type B = { x: number, y: string }
|
||||
|
@ -2103,8 +2101,6 @@ Type 'number' could not be converted into 'string' in an invariant context)";
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_prop_nested")
|
||||
{
|
||||
ScopedFastFlag sff[] = {{"LuauIndentTypeMismatch", true}};
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
CheckResult result = check(R"(
|
||||
type AS = { x: number, y: number }
|
||||
type BS = { x: number, y: string }
|
||||
|
@ -2129,9 +2125,6 @@ Type 'number' could not be converted into 'string' in an invariant context)";
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "error_detailed_metatable_prop")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local a1 = setmetatable({ x = 2, y = 3 }, { __call = function(s) end });
|
||||
local b1 = setmetatable({ x = 2, y = "hello" }, { __call = function(s) end });
|
||||
|
@ -2202,8 +2195,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_key")
|
||||
{
|
||||
ScopedFastFlag sff[] = {{"LuauIndentTypeMismatch", true}};
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
CheckResult result = check(R"(
|
||||
type A = { [number]: string }
|
||||
type B = { [string]: string }
|
||||
|
@ -2222,8 +2213,6 @@ Type 'number' could not be converted into 'string' in an invariant context)";
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_value")
|
||||
{
|
||||
ScopedFastFlag sff[] = {{"LuauIndentTypeMismatch", true}};
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
CheckResult result = check(R"(
|
||||
type A = { [number]: number }
|
||||
type B = { [number]: string }
|
||||
|
@ -2257,8 +2246,6 @@ a.p = { x = 9 }
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "explicitly_typed_table_error")
|
||||
{
|
||||
ScopedFastFlag sff[] = {{"LuauIndentTypeMismatch", true}};
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
type Super = { x : number }
|
||||
|
@ -3359,14 +3346,10 @@ TEST_CASE_FIXTURE(Fixture, "scalar_is_a_subtype_of_a_compatible_polymorphic_shap
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "scalar_is_not_a_subtype_of_a_compatible_polymorphic_shape_type")
|
||||
{
|
||||
ScopedFastInt sfi[] = {{"LuauIndentTypeMismatchMaxTypeLength", 10}};
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauAlwaysCommitInferencesOfFunctionCalls", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
|
||||
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function f(s)
|
||||
return s:absolutely_no_scalar_has_this_method()
|
||||
|
@ -3422,8 +3405,6 @@ TEST_CASE_FIXTURE(Fixture, "a_free_shape_can_turn_into_a_scalar_if_it_is_compati
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function f(s): string
|
||||
|
|
|
@ -992,9 +992,6 @@ end
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "cli_50041_committing_txnlog_in_apollo_client_error")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
--!nolint
|
||||
|
|
|
@ -345,9 +345,7 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "metatables_unify_against_shape_of_free_table
|
|||
{
|
||||
ScopedFastFlag sff[] = {
|
||||
{"LuauTransitiveSubtyping", true},
|
||||
{"LuauIndentTypeMismatch", true},
|
||||
};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
TableType::Props freeProps{
|
||||
{"foo", {builtinTypes->numberType}},
|
||||
|
|
|
@ -872,9 +872,6 @@ type R = { m: F<R> }
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "pack_tail_unification_check")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local a: () -> (number, ...string)
|
||||
local b: () -> (number, ...boolean)
|
||||
|
|
|
@ -459,8 +459,6 @@ local oh : boolean = t.y
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_union_part")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type X = { x: number }
|
||||
type Y = { y: number }
|
||||
|
@ -498,8 +496,6 @@ local a: XYZ = { w = 4 }
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "error_detailed_optional")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
type X = { x: number }
|
||||
|
||||
|
@ -532,8 +528,6 @@ TEST_CASE_FIXTURE(Fixture, "dont_allow_cyclic_unions_to_be_inferred")
|
|||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "table_union_write_indirect")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type A = { x: number, y: (number) -> string } | { z: number, y: (number) -> string }
|
||||
|
@ -620,8 +614,6 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generics")
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generic_typepacks")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
function f<a...>()
|
||||
local x : (number, a...) -> (number?, a...)
|
||||
|
@ -640,8 +632,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_arities")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
CheckResult result = check(R"(
|
||||
local x : (number) -> number?
|
||||
local y : ((number?) -> number) | ((number | string) -> nil) = x -- OK
|
||||
|
@ -658,8 +648,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_arities")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : () -> (number | string)
|
||||
|
@ -677,8 +665,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_variadics")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : (...nil) -> (...number?)
|
||||
|
@ -696,8 +682,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_variadics")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : (number) -> ()
|
||||
|
@ -715,8 +699,6 @@ could not be converted into
|
|||
|
||||
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_variadics")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauIndentTypeMismatch", true};
|
||||
ScopedFastInt sfi{"LuauIndentTypeMismatchMaxTypeLength", 10};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x : () -> (number?, ...number)
|
||||
|
|
|
@ -91,6 +91,15 @@ assert((function() local a = 1 a = a + 2 return a end)() == 3)
|
|||
assert((function() local a = 1 a = a - 2 return a end)() == -1)
|
||||
assert((function() local a = 1 a = a * 2 return a end)() == 2)
|
||||
assert((function() local a = 1 a = a / 2 return a end)() == 0.5)
|
||||
|
||||
-- floor division should always round towards -Infinity
|
||||
assert((function() local a = 1 a = a // 2 return a end)() == 0)
|
||||
assert((function() local a = 3 a = a // 2 return a end)() == 1)
|
||||
assert((function() local a = 3.5 a = a // 2 return a end)() == 1)
|
||||
assert((function() local a = -1 a = a // 2 return a end)() == -1)
|
||||
assert((function() local a = -3 a = a // 2 return a end)() == -2)
|
||||
assert((function() local a = -3.5 a = a // 2 return a end)() == -2)
|
||||
|
||||
assert((function() local a = 5 a = a % 2 return a end)() == 1)
|
||||
assert((function() local a = 3 a = a ^ 2 return a end)() == 9)
|
||||
assert((function() local a = 3 a = a ^ 3 return a end)() == 27)
|
||||
|
@ -494,6 +503,7 @@ local function vec3t(x, y, z)
|
|||
__sub = function(l, r) return vec3t(l.x - r.x, l.y - r.y, l.z - r.z) end,
|
||||
__mul = function(l, r) return type(r) == "number" and vec3t(l.x * r, l.y * r, l.z * r) or vec3t(l.x * r.x, l.y * r.y, l.z * r.z) end,
|
||||
__div = function(l, r) return type(r) == "number" and vec3t(l.x / r, l.y / r, l.z / r) or vec3t(l.x / r.x, l.y / r.y, l.z / r.z) end,
|
||||
__idiv = function(l, r) return type(r) == "number" and vec3t(l.x // r, l.y // r, l.z // r) or vec3t(l.x // r.x, l.y // r.y, l.z // r.z) end,
|
||||
__unm = function(v) return vec3t(-v.x, -v.y, -v.z) end,
|
||||
__tostring = function(v) return string.format("%g, %g, %g", v.x, v.y, v.z) end
|
||||
})
|
||||
|
@ -504,10 +514,13 @@ assert((function() return tostring(vec3t(1,2,3) + vec3t(4,5,6)) end)() == "5, 7,
|
|||
assert((function() return tostring(vec3t(1,2,3) - vec3t(4,5,6)) end)() == "-3, -3, -3")
|
||||
assert((function() return tostring(vec3t(1,2,3) * vec3t(4,5,6)) end)() == "4, 10, 18")
|
||||
assert((function() return tostring(vec3t(1,2,3) / vec3t(2,4,8)) end)() == "0.5, 0.5, 0.375")
|
||||
assert((function() return tostring(vec3t(1,2,3) // vec3t(2,4,2)) end)() == "0, 0, 1")
|
||||
assert((function() return tostring(vec3t(1,2,3) // vec3t(-2,-4,-2)) end)() == "-1, -1, -2")
|
||||
|
||||
-- reg vs constant
|
||||
assert((function() return tostring(vec3t(1,2,3) * 2) end)() == "2, 4, 6")
|
||||
assert((function() return tostring(vec3t(1,2,3) / 2) end)() == "0.5, 1, 1.5")
|
||||
assert((function() return tostring(vec3t(1,2,3) // 2) end)() == "0, 1, 1")
|
||||
|
||||
-- unary
|
||||
assert((function() return tostring(-vec3t(1,2,3)) end)() == "-1, -2, -3")
|
||||
|
|
|
@ -107,6 +107,7 @@ t.__add = f("add")
|
|||
t.__sub = f("sub")
|
||||
t.__mul = f("mul")
|
||||
t.__div = f("div")
|
||||
t.__idiv = f("idiv")
|
||||
t.__mod = f("mod")
|
||||
t.__unm = f("unm")
|
||||
t.__pow = f("pow")
|
||||
|
@ -128,6 +129,8 @@ assert(a*a == a)
|
|||
assert(cap[0] == "mul" and cap[1] == a and cap[2] == a and cap[3]==nil)
|
||||
assert(a/0 == a)
|
||||
assert(cap[0] == "div" and cap[1] == a and cap[2] == 0 and cap[3]==nil)
|
||||
assert(a//0 == a)
|
||||
assert(cap[0] == "idiv" and cap[1] == a and cap[2] == 0 and cap[3]==nil)
|
||||
assert(a%2 == a)
|
||||
assert(cap[0] == "mod" and cap[1] == a and cap[2] == 2 and cap[3]==nil)
|
||||
assert(-a == a)
|
||||
|
|
|
@ -82,6 +82,7 @@ assert(not(1>1) and not(1>2) and (2>1))
|
|||
assert(not('a'>'a') and not('a'>'b') and ('b'>'a'))
|
||||
assert((1>=1) and not(1>=2) and (2>=1))
|
||||
assert(('a'>='a') and not('a'>='b') and ('b'>='a'))
|
||||
assert((unk and unk > 0) == nil) -- validate precedence between and and >
|
||||
|
||||
-- testing mod operator
|
||||
assert(-4%3 == 2)
|
||||
|
|
|
@ -130,4 +130,45 @@ end
|
|||
|
||||
assert(pcall(fuzzfail13) == true)
|
||||
|
||||
local function arraySizeInv1()
|
||||
local t = {1, 2, nil, nil, nil, nil, nil, nil, nil, true}
|
||||
|
||||
table.insert(t, 3)
|
||||
|
||||
return t[10]
|
||||
end
|
||||
|
||||
assert(arraySizeInv1() == true)
|
||||
|
||||
local function arraySizeInv2()
|
||||
local t = {1, 2, nil, nil, nil, nil, nil, nil, nil, true}
|
||||
|
||||
local u = {a = t}
|
||||
table.insert(u.a, 3) -- aliased modifiction of 't' register through other value
|
||||
|
||||
return t[10]
|
||||
end
|
||||
|
||||
assert(arraySizeInv2() == true)
|
||||
|
||||
local function nilInvalidatesSlot()
|
||||
local function tabs()
|
||||
local t = { x=1, y=2, z=3 }
|
||||
setmetatable(t, { __index = function(t, k) return 42 end })
|
||||
return t, t
|
||||
end
|
||||
|
||||
local t1, t2 = tabs()
|
||||
|
||||
for i=1,2 do
|
||||
local a = t1.x
|
||||
t2.x = nil
|
||||
local b = t1.x
|
||||
t2.x = 1
|
||||
assert(a == 1 and b == 42)
|
||||
end
|
||||
end
|
||||
|
||||
nilInvalidatesSlot()
|
||||
|
||||
return('OK')
|
||||
|
|
|
@ -30,6 +30,12 @@ assert(int64(4) / 2 == int64(2))
|
|||
assert(int64(4) % 3 == int64(1))
|
||||
assert(int64(2) ^ 3 == int64(8))
|
||||
|
||||
-- / and // round in different directions in our test implementation
|
||||
assert(int64(5) / int64(2) == int64(2))
|
||||
assert(int64(5) // int64(2) == int64(2))
|
||||
assert(int64(-5) / int64(2) == int64(-2))
|
||||
assert(int64(-5) // int64(2) == int64(-3))
|
||||
|
||||
-- tostring
|
||||
assert(tostring(int64(2)) == "2")
|
||||
|
||||
|
|
|
@ -67,6 +67,18 @@ assert(vector(1, 2, 4) / '8' == vector(1/8, 1/4, 1/2));
|
|||
|
||||
assert(-vector(1, 2, 4) == vector(-1, -2, -4));
|
||||
|
||||
-- test floor division
|
||||
assert(vector(1, 3, 5) // 2 == vector(0, 1, 2))
|
||||
assert(vector(1, 3, 5) // val == vector(8, 24, 40))
|
||||
|
||||
if vector_size == 4 then
|
||||
assert(10 // vector(1, 2, 3, 4) == vector(10, 5, 3, 2))
|
||||
assert(vector(10, 9, 8, 7) // vector(1, 2, 3, 4) == vector(10, 4, 2, 1))
|
||||
else
|
||||
assert(10 // vector(1, 2, 3) == vector(10, 5, 3))
|
||||
assert(vector(10, 9, 8) // vector(1, 2, 3) == vector(10, 4, 2))
|
||||
end
|
||||
|
||||
-- test NaN comparison
|
||||
local nanv = vector(0/0, 0/0, 0/0)
|
||||
assert(nanv ~= nanv);
|
||||
|
|
Loading…
Reference in a new issue