Merge branch 'upstream' into merge

This commit is contained in:
Aaron Weiss 2024-01-26 18:31:35 -08:00
commit ce2665d9f1
59 changed files with 2048 additions and 501 deletions

View file

@ -90,18 +90,42 @@ struct FunctionCallConstraint
DenseHashMap<const AstNode*, TypeId>* astOverloadResolvedTypes = nullptr;
};
// result ~ prim ExpectedType SomeSingletonType MultitonType
// function_check fn argsPack
//
// If ExpectedType is potentially a singleton (an actual singleton or a union
// that contains a singleton), then result ~ SomeSingletonType
// If fn is a function type and argsPack is a partially solved
// pack of arguments to be supplied to the function, propagate the argument
// types of fn into the types of argsPack. This is used to implement
// bidirectional inference of lambda arguments.
struct FunctionCheckConstraint
{
TypeId fn;
TypePackId argsPack;
class AstExprCall* callSite = nullptr;
};
// prim FreeType ExpectedType PrimitiveType
//
// else result ~ MultitonType
// FreeType is bounded below by the singleton type and above by PrimitiveType
// initially. When this constraint is resolved, it will check that the bounds
// of the free type are well-formed by subtyping.
//
// If they are not well-formed, then FreeType is replaced by its lower bound
//
// If they are well-formed and ExpectedType is potentially a singleton (an
// actual singleton or a union that contains a singleton),
// then FreeType is replaced by its lower bound
//
// else FreeType is replaced by PrimitiveType
struct PrimitiveTypeConstraint
{
TypeId resultType;
TypeId expectedType;
TypeId singletonType;
TypeId multitonType;
TypeId freeType;
// potentially gets used to force the lower bound?
std::optional<TypeId> expectedType;
// the primitive type to check against
TypeId primitiveType;
};
// result ~ hasProp type "prop_name"
@ -230,7 +254,7 @@ struct ReducePackConstraint
};
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, IterableConstraint,
NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint, HasPropConstraint, SetPropConstraint,
NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, FunctionCheckConstraint, PrimitiveTypeConstraint, HasPropConstraint, SetPropConstraint,
SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint>;
struct Constraint

View file

@ -150,7 +150,7 @@ private:
*/
ScopePtr childScope(AstNode* node, const ScopePtr& parent);
std::optional<TypeId> lookup(Scope* scope, DefId def, bool prototype = true);
std::optional<TypeId> lookup(const ScopePtr& scope, DefId def, bool prototype = true);
/**
* Adds a new constraint with no dependencies to a given scope.
@ -178,8 +178,8 @@ private:
};
using RefinementContext = InsertionOrderedMap<DefId, RefinementPartition>;
void unionRefinements(const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints);
void computeRefinement(const ScopePtr& scope, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints);
void unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints);
void computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints);
void applyRefinements(const ScopePtr& scope, Location location, RefinementId refinement);
ControlFlow visitBlockWithoutChildScope(const ScopePtr& scope, AstStatBlock* block);
@ -329,6 +329,11 @@ private:
void reportError(Location location, TypeErrorData err);
void reportCodeTooComplex(Location location);
// make a union type family of these two types
TypeId makeUnion(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs);
// make an intersect type family of these two types
TypeId makeIntersect(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs);
/** Scan the program for global definitions.
*
* ConstraintGenerator needs to differentiate between globals and accesses to undefined symbols. Doing this "for

View file

@ -75,7 +75,7 @@ struct ConstraintSolver
// anything.
std::unordered_map<NotNull<const Constraint>, size_t> blockedConstraints;
// A mapping of type/pack pointers to the constraints they block.
std::unordered_map<BlockedConstraintId, std::vector<NotNull<const Constraint>>, HashBlockedConstraintId> blocked;
std::unordered_map<BlockedConstraintId, DenseHashSet<const Constraint*>, HashBlockedConstraintId> blocked;
// Memoized instantiations of type aliases.
DenseHashMap<InstantiationSignature, TypeId, HashInstantiationSignature> instantiatedAliases{{}};
// Breadcrumbs for where a free type's upper bound was expanded. We use
@ -126,6 +126,7 @@ struct ConstraintSolver
bool tryDispatch(const NameConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const TypeAliasExpansionConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const FunctionCallConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const FunctionCheckConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const HasPropConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const SetPropConstraint& c, NotNull<const Constraint> constraint, bool force);
@ -285,7 +286,7 @@ private:
* @param target the type or type pack pointer that the constraint is blocked on.
* @param constraint the constraint to block.
**/
void block_(BlockedConstraintId target, NotNull<const Constraint> constraint);
bool block_(BlockedConstraintId target, NotNull<const Constraint> constraint);
/**
* Informs the solver that progress has been made on a type or type pack. The

View file

@ -163,6 +163,9 @@ struct BuiltinTypeFamilies
TypeFamily eqFamily;
TypeFamily refineFamily;
TypeFamily unionFamily;
TypeFamily intersectFamily;
TypeFamily keyofFamily;
TypeFamily rawkeyofFamily;

View file

@ -56,6 +56,7 @@ struct Unifier2
* free TypePack to another and encounter an occurs check violation.
*/
bool unify(TypeId subTy, TypeId superTy);
bool unify(const LocalType* subTy, TypeId superFn);
bool unify(TypeId subTy, const FunctionType* superFn);
bool unify(const UnionType* subUnion, TypeId superTy);
bool unify(TypeId subTy, const UnionType* superUnion);

View file

@ -8,8 +8,6 @@
#include <math.h>
LUAU_FASTFLAG(LuauClipExtraHasEndProps);
namespace Luau
{
@ -393,8 +391,6 @@ struct AstJsonEncoder : public AstVisitor
PROP(body);
PROP(functionDepth);
PROP(debugname);
if (!FFlag::LuauClipExtraHasEndProps)
write("hasEnd", node->DEPRECATED_hasEnd);
});
}
@ -591,11 +587,8 @@ struct AstJsonEncoder : public AstVisitor
void write(class AstStatBlock* node)
{
writeNode(node, "AstStatBlock", [&]() {
if (FFlag::LuauClipExtraHasEndProps)
{
writeRaw(",\"hasEnd\":");
write(node->hasEnd);
}
writeRaw(",\"hasEnd\":");
write(node->hasEnd);
writeRaw(",\"body\":[");
bool comma = false;
for (AstStat* stat : node->body)
@ -619,8 +612,6 @@ struct AstJsonEncoder : public AstVisitor
if (node->elsebody)
PROP(elsebody);
write("hasThen", node->thenLocation.has_value());
if (!FFlag::LuauClipExtraHasEndProps)
write("hasEnd", node->DEPRECATED_hasEnd);
});
}
@ -630,8 +621,6 @@ struct AstJsonEncoder : public AstVisitor
PROP(condition);
PROP(body);
PROP(hasDo);
if (!FFlag::LuauClipExtraHasEndProps)
write("hasEnd", node->DEPRECATED_hasEnd);
});
}
@ -640,8 +629,6 @@ struct AstJsonEncoder : public AstVisitor
writeNode(node, "AstStatRepeat", [&]() {
PROP(condition);
PROP(body);
if (!FFlag::LuauClipExtraHasEndProps)
write("hasUntil", node->DEPRECATED_hasUntil);
});
}
@ -687,8 +674,6 @@ struct AstJsonEncoder : public AstVisitor
PROP(step);
PROP(body);
PROP(hasDo);
if (!FFlag::LuauClipExtraHasEndProps)
write("hasEnd", node->DEPRECATED_hasEnd);
});
}
@ -700,8 +685,6 @@ struct AstJsonEncoder : public AstVisitor
PROP(body);
PROP(hasIn);
PROP(hasDo);
if (!FFlag::LuauClipExtraHasEndProps)
write("hasEnd", node->DEPRECATED_hasEnd);
});
}

View file

@ -15,7 +15,6 @@
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(DebugLuauReadWriteProperties);
LUAU_FASTFLAG(LuauClipExtraHasEndProps);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteStringLiteralBounds, false);
static const std::unordered_set<std::string> kStatementStartingKeywords = {
@ -1068,51 +1067,30 @@ static AutocompleteEntryMap autocompleteStatement(
for (const auto& kw : kStatementStartingKeywords)
result.emplace(kw, AutocompleteEntry{AutocompleteEntryKind::Keyword});
if (FFlag::LuauClipExtraHasEndProps)
for (auto it = ancestry.rbegin(); it != ancestry.rend(); ++it)
{
for (auto it = ancestry.rbegin(); it != ancestry.rend(); ++it)
if (AstStatForIn* statForIn = (*it)->as<AstStatForIn>(); statForIn && !statForIn->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatFor* statFor = (*it)->as<AstStatFor>(); statFor && !statFor->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatIf* statIf = (*it)->as<AstStatIf>())
{
if (AstStatForIn* statForIn = (*it)->as<AstStatForIn>(); statForIn && !statForIn->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatFor* statFor = (*it)->as<AstStatFor>(); statFor && !statFor->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatIf* statIf = (*it)->as<AstStatIf>())
bool hasEnd = statIf->thenbody->hasEnd;
if (statIf->elsebody)
{
bool hasEnd = statIf->thenbody->hasEnd;
if (statIf->elsebody)
{
if (AstStatBlock* elseBlock = statIf->elsebody->as<AstStatBlock>())
hasEnd = elseBlock->hasEnd;
}
if (!hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
if (AstStatBlock* elseBlock = statIf->elsebody->as<AstStatBlock>())
hasEnd = elseBlock->hasEnd;
}
else if (AstStatWhile* statWhile = (*it)->as<AstStatWhile>(); statWhile && !statWhile->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstExprFunction* exprFunction = (*it)->as<AstExprFunction>(); exprFunction && !exprFunction->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
if (AstStatBlock* exprBlock = (*it)->as<AstStatBlock>(); exprBlock && !exprBlock->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
}
else
{
for (auto it = ancestry.rbegin(); it != ancestry.rend(); ++it)
{
if (AstStatForIn* statForIn = (*it)->as<AstStatForIn>(); statForIn && !statForIn->DEPRECATED_hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatFor* statFor = (*it)->as<AstStatFor>(); statFor && !statFor->DEPRECATED_hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatIf* statIf = (*it)->as<AstStatIf>(); statIf && !statIf->DEPRECATED_hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstStatWhile* statWhile = (*it)->as<AstStatWhile>(); statWhile && !statWhile->DEPRECATED_hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstExprFunction* exprFunction = (*it)->as<AstExprFunction>(); exprFunction && !exprFunction->DEPRECATED_hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
if (AstStatBlock* exprBlock = (*it)->as<AstStatBlock>(); exprBlock && !exprBlock->hasEnd)
if (!hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
else if (AstStatWhile* statWhile = (*it)->as<AstStatWhile>(); statWhile && !statWhile->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
else if (AstExprFunction* exprFunction = (*it)->as<AstExprFunction>(); exprFunction && !exprFunction->body->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
if (AstStatBlock* exprBlock = (*it)->as<AstStatBlock>(); exprBlock && !exprBlock->hasEnd)
result.emplace("end", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
if (ancestry.size() >= 2)
@ -1127,16 +1105,8 @@ static AutocompleteEntryMap autocompleteStatement(
}
}
if (FFlag::LuauClipExtraHasEndProps)
{
if (AstStatRepeat* statRepeat = parent->as<AstStatRepeat>(); statRepeat && !statRepeat->body->hasEnd)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
else
{
if (AstStatRepeat* statRepeat = parent->as<AstStatRepeat>(); statRepeat && !statRepeat->DEPRECATED_hasUntil)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
if (AstStatRepeat* statRepeat = parent->as<AstStatRepeat>(); statRepeat && !statRepeat->body->hasEnd)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
if (ancestry.size() >= 4)
@ -1150,16 +1120,8 @@ static AutocompleteEntryMap autocompleteStatement(
}
}
if (FFlag::LuauClipExtraHasEndProps)
{
if (AstStatRepeat* statRepeat = extractStat<AstStatRepeat>(ancestry); statRepeat && !statRepeat->body->hasEnd)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
else
{
if (AstStatRepeat* statRepeat = extractStat<AstStatRepeat>(ancestry); statRepeat && !statRepeat->DEPRECATED_hasUntil)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
}
if (AstStatRepeat* statRepeat = extractStat<AstStatRepeat>(ancestry); statRepeat && !statRepeat->body->hasEnd)
result.emplace("until", AutocompleteEntry{AutocompleteEntryKind::Keyword});
return result;
}

View file

@ -45,6 +45,12 @@ DenseHashSet<TypeId> Constraint::getFreeTypes() const
ftc.traverse(psc->subPack);
ftc.traverse(psc->superPack);
}
else if (auto ptc = get<PrimitiveTypeConstraint>(*this))
{
// we need to take into account primitive type constraints to prevent type families from reducing on
// primitive whose types we have not yet selected to be singleton or not.
ftc.traverse(ptc->freeType);
}
return types;
}

View file

@ -20,6 +20,7 @@
#include "Luau/InsertionOrderedMap.h"
#include <algorithm>
#include <memory>
LUAU_FASTINT(LuauCheckRecursionLimit);
LUAU_FASTFLAG(DebugLuauLogSolverToJson);
@ -205,7 +206,7 @@ ScopePtr ConstraintGenerator::childScope(AstNode* node, const ScopePtr& parent)
return scope;
}
std::optional<TypeId> ConstraintGenerator::lookup(Scope* scope, DefId def, bool prototype)
std::optional<TypeId> ConstraintGenerator::lookup(const ScopePtr& scope, DefId def, bool prototype)
{
if (get<Cell>(def))
return scope->lookup(def);
@ -230,7 +231,7 @@ std::optional<TypeId> ConstraintGenerator::lookup(Scope* scope, DefId def, bool
rootScope->lvalueTypes[operand] = *ty;
}
res = simplifyUnion(builtinTypes, arena, res, *ty).result;
res = makeUnion(scope, Location{} /* TODO: can we provide a real location here? */, res, *ty);
}
scope->lvalueTypes[def] = res;
@ -250,18 +251,13 @@ NotNull<Constraint> ConstraintGenerator::addConstraint(const ScopePtr& scope, st
return NotNull{constraints.emplace_back(std::move(c)).get()};
}
void ConstraintGenerator::unionRefinements(const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints)
void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints)
{
const auto intersect = [&](const std::vector<TypeId>& types) {
if (1 == types.size())
return types[0];
else if (2 == types.size())
{
// TODO: It may be advantageous to introduce a refine type family here when there are blockedTypes.
SimplifyResult sr = simplifyIntersection(builtinTypes, arena, types[0], types[1]);
if (sr.blockedTypes.empty())
return sr.result;
}
return makeIntersect(scope, location, types[0], types[1]);
return arena->addType(IntersectionType{types});
};
@ -281,48 +277,48 @@ void ConstraintGenerator::unionRefinements(const RefinementContext& lhs, const R
rhsIt->second.discriminantTypes.size() == 1 ? rhsIt->second.discriminantTypes[0] : intersect(rhsIt->second.discriminantTypes);
dest.insert(def, {});
dest.get(def)->discriminantTypes.push_back(simplifyUnion(builtinTypes, arena, leftDiscriminantTy, rightDiscriminantTy).result);
dest.get(def)->discriminantTypes.push_back(makeUnion(scope, location, leftDiscriminantTy, rightDiscriminantTy));
dest.get(def)->shouldAppendNilType |= partition.shouldAppendNilType || rhsIt->second.shouldAppendNilType;
}
}
void ConstraintGenerator::computeRefinement(const ScopePtr& scope, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints)
void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints)
{
if (!refinement)
return;
else if (auto variadic = get<Variadic>(refinement))
{
for (RefinementId refi : variadic->refinements)
computeRefinement(scope, refi, refis, sense, eq, constraints);
computeRefinement(scope, location, refi, refis, sense, eq, constraints);
}
else if (auto negation = get<Negation>(refinement))
return computeRefinement(scope, negation->refinement, refis, !sense, eq, constraints);
return computeRefinement(scope, location, negation->refinement, refis, !sense, eq, constraints);
else if (auto conjunction = get<Conjunction>(refinement))
{
RefinementContext lhsRefis;
RefinementContext rhsRefis;
computeRefinement(scope, conjunction->lhs, sense ? refis : &lhsRefis, sense, eq, constraints);
computeRefinement(scope, conjunction->rhs, sense ? refis : &rhsRefis, sense, eq, constraints);
computeRefinement(scope, location, conjunction->lhs, sense ? refis : &lhsRefis, sense, eq, constraints);
computeRefinement(scope, location, conjunction->rhs, sense ? refis : &rhsRefis, sense, eq, constraints);
if (!sense)
unionRefinements(lhsRefis, rhsRefis, *refis, constraints);
unionRefinements(scope, location, lhsRefis, rhsRefis, *refis, constraints);
}
else if (auto disjunction = get<Disjunction>(refinement))
{
RefinementContext lhsRefis;
RefinementContext rhsRefis;
computeRefinement(scope, disjunction->lhs, sense ? &lhsRefis : refis, sense, eq, constraints);
computeRefinement(scope, disjunction->rhs, sense ? &rhsRefis : refis, sense, eq, constraints);
computeRefinement(scope, location, disjunction->lhs, sense ? &lhsRefis : refis, sense, eq, constraints);
computeRefinement(scope, location, disjunction->rhs, sense ? &rhsRefis : refis, sense, eq, constraints);
if (sense)
unionRefinements(lhsRefis, rhsRefis, *refis, constraints);
unionRefinements(scope, location, lhsRefis, rhsRefis, *refis, constraints);
}
else if (auto equivalence = get<Equivalence>(refinement))
{
computeRefinement(scope, equivalence->lhs, refis, sense, true, constraints);
computeRefinement(scope, equivalence->rhs, refis, sense, true, constraints);
computeRefinement(scope, location, equivalence->lhs, refis, sense, true, constraints);
computeRefinement(scope, location, equivalence->rhs, refis, sense, true, constraints);
}
else if (auto proposition = get<Proposition>(refinement))
{
@ -423,11 +419,11 @@ void ConstraintGenerator::applyRefinements(const ScopePtr& scope, Location locat
RefinementContext refinements;
std::vector<ConstraintV> constraints;
computeRefinement(scope, refinement, &refinements, /*sense*/ true, /*eq*/ false, &constraints);
computeRefinement(scope, location, refinement, &refinements, /*sense*/ true, /*eq*/ false, &constraints);
for (auto& [def, partition] : refinements)
{
if (std::optional<TypeId> defTy = lookup(scope.get(), def))
if (std::optional<TypeId> defTy = lookup(scope, def))
{
TypeId ty = *defTy;
if (partition.shouldAppendNilType)
@ -455,15 +451,15 @@ void ConstraintGenerator::applyRefinements(const ScopePtr& scope, Location locat
switch (shouldSuppressErrors(normalizer, ty))
{
case ErrorSuppression::DoNotSuppress:
ty = simplifyIntersection(builtinTypes, arena, ty, dt).result;
ty = makeIntersect(scope, location, ty, dt);
break;
case ErrorSuppression::Suppress:
ty = simplifyIntersection(builtinTypes, arena, ty, dt).result;
ty = simplifyUnion(builtinTypes, arena, ty, builtinTypes->errorType).result;
ty = makeIntersect(scope, location, ty, dt);
ty = makeUnion(scope, location, ty, builtinTypes->errorType);
break;
case ErrorSuppression::NormalizationFailed:
reportError(location, NormalizationTooComplex{});
ty = simplifyIntersection(builtinTypes, arena, ty, dt).result;
ty = makeIntersect(scope, location, ty, dt);
break;
}
}
@ -761,7 +757,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatForIn* forI
for (AstLocal* var : forIn->vars)
{
TypeId assignee = arena->addType(BlockedType{});
TypeId assignee = arena->addType(LocalType{builtinTypes->neverType, /* blockCount */ 1, var->name.value});
variableTypes.push_back(assignee);
if (var->annotation)
@ -872,7 +868,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatFunction* f
DenseHashSet<Constraint*> excludeList{nullptr};
DefId def = dfg->getDef(function->name);
std::optional<TypeId> existingFunctionTy = lookup(scope.get(), def);
std::optional<TypeId> existingFunctionTy = lookup(scope, def);
if (AstExprLocal* localName = function->name->as<AstExprLocal>())
{
@ -1492,8 +1488,12 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall*
discriminantTypes.push_back(std::nullopt);
}
Checkpoint funcBeginCheckpoint = checkpoint(this);
TypeId fnType = check(scope, call->func).ty;
Checkpoint funcEndCheckpoint = checkpoint(this);
std::vector<std::optional<TypeId>> expectedTypesForCall = getExpectedCallTypesForFunctionOverloads(fnType);
module->astOriginalCallTypes[call->func] = fnType;
@ -1624,13 +1624,31 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall*
&module->astOverloadResolvedTypes,
});
// We force constraints produced by checking function arguments to wait
// until after we have resolved the constraint on the function itself.
// This ensures, for instance, that we start inferring the contents of
// lambdas under the assumption that their arguments and return types
// will be compatible with the enclosing function call.
forEachConstraint(argBeginCheckpoint, argEndCheckpoint, this, [fcc](const ConstraintPtr& constraint) {
constraint->dependencies.emplace_back(fcc);
NotNull<Constraint> foo = addConstraint(scope, call->func->location,
FunctionCheckConstraint{
fnType,
argPack,
call
}
);
/*
* To make bidirectional type checking work, we need to solve these constraints in a particular order:
*
* 1. Solve the function type
* 2. Propagate type information from the function type to the argument types
* 3. Solve the argument types
* 4. Solve the call
*/
forEachConstraint(funcBeginCheckpoint, funcEndCheckpoint, this, [foo](const ConstraintPtr& constraint) {
foo->dependencies.emplace_back(constraint.get());
});
forEachConstraint(argBeginCheckpoint, argEndCheckpoint, this, [foo, fcc](const ConstraintPtr& constraint) {
constraint->dependencies.emplace_back(foo);
fcc->dependencies.emplace_back(constraint.get());
});
return InferencePack{rets, {refinementArena.variadic(returnRefinements)}};
@ -1712,23 +1730,12 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprConstantStrin
if (forceSingleton)
return Inference{arena->addType(SingletonType{StringSingleton{std::string{string->value.data, string->value.size}}})};
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
if (get<BlockedType>(expectedTy) || get<PendingExpansionType>(expectedTy) || get<FreeType>(expectedTy))
{
TypeId ty = arena->addType(BlockedType{});
TypeId singletonType = arena->addType(SingletonType(StringSingleton{std::string(string->value.data, string->value.size)}));
addConstraint(scope, string->location, PrimitiveTypeConstraint{ty, expectedTy, singletonType, builtinTypes->stringType});
return Inference{ty};
}
else if (maybeSingleton(expectedTy))
return Inference{arena->addType(SingletonType{StringSingleton{std::string{string->value.data, string->value.size}}})};
return Inference{builtinTypes->stringType};
}
return Inference{builtinTypes->stringType};
FreeType ft = FreeType{scope.get()};
ft.lowerBound = arena->addType(SingletonType{StringSingleton{std::string{string->value.data, string->value.size}}});
ft.upperBound = builtinTypes->stringType;
const TypeId freeTy = arena->addType(ft);
addConstraint(scope, string->location, PrimitiveTypeConstraint{freeTy, expectedType, builtinTypes->stringType});
return Inference{freeTy};
}
Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprConstantBool* boolExpr, std::optional<TypeId> expectedType, bool forceSingleton)
@ -1737,23 +1744,12 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprConstantBool*
if (forceSingleton)
return Inference{singletonType};
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
if (get<BlockedType>(expectedTy) || get<PendingExpansionType>(expectedTy))
{
TypeId ty = arena->addType(BlockedType{});
addConstraint(scope, boolExpr->location, PrimitiveTypeConstraint{ty, expectedTy, singletonType, builtinTypes->booleanType});
return Inference{ty};
}
else if (maybeSingleton(expectedTy))
return Inference{singletonType};
return Inference{builtinTypes->booleanType};
}
return Inference{builtinTypes->booleanType};
FreeType ft = FreeType{scope.get()};
ft.lowerBound = singletonType;
ft.upperBound = builtinTypes->booleanType;
const TypeId freeTy = arena->addType(ft);
addConstraint(scope, boolExpr->location, PrimitiveTypeConstraint{freeTy, expectedType, builtinTypes->booleanType});
return Inference{freeTy};
}
Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprLocal* local)
@ -1766,12 +1762,12 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprLocal* local)
// if we have a refinement key, we can look up its type.
if (key)
maybeTy = lookup(scope.get(), key->def);
maybeTy = lookup(scope, key->def);
// if the current def doesn't have a type, we might be doing a compound assignment
// and therefore might need to look at the rvalue def instead.
if (!maybeTy && rvalueDef)
maybeTy = lookup(scope.get(), *rvalueDef);
maybeTy = lookup(scope, *rvalueDef);
if (maybeTy)
{
@ -1797,7 +1793,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprGlobal* globa
/* prepopulateGlobalScope() has already added all global functions to the environment by this point, so any
* global that is not already in-scope is definitely an unknown symbol.
*/
if (auto ty = lookup(scope.get(), def, /*prototype=*/false))
if (auto ty = lookup(scope, def, /*prototype=*/false))
{
rootScope->lvalueTypes[def] = *ty;
return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)};
@ -1816,7 +1812,7 @@ Inference ConstraintGenerator::checkIndexName(const ScopePtr& scope, const Refin
if (key)
{
if (auto ty = lookup(scope.get(), key->def))
if (auto ty = lookup(scope, key->def))
return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)};
scope->rvalueRefinements[key->def] = result;
@ -1852,7 +1848,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* in
const RefinementKey* key = dfg->getRefinementKey(indexExpr);
if (key)
{
if (auto ty = lookup(scope.get(), key->def))
if (auto ty = lookup(scope, key->def))
return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)};
scope->rvalueRefinements[key->def] = result;
@ -2120,7 +2116,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIfElse* ifEls
applyRefinements(elseScope, ifElse->falseExpr->location, refinementArena.negation(refinement));
TypeId elseType = check(elseScope, ifElse->falseExpr, expectedType).ty;
return Inference{expectedType ? *expectedType : simplifyUnion(builtinTypes, arena, thenType, elseType).result};
return Inference{expectedType ? *expectedType : makeUnion(scope, ifElse->location, thenType, elseType)};
}
Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprTypeAssertion* typeAssert)
@ -3172,6 +3168,30 @@ void ConstraintGenerator::reportCodeTooComplex(Location location)
logger->captureGenerationError(errors.back());
}
TypeId ConstraintGenerator::makeUnion(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs)
{
TypeId resultType = arena->addType(TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.unionFamily},
{lhs, rhs},
{},
});
addConstraint(scope, location, ReduceConstraint{resultType});
return resultType;
}
TypeId ConstraintGenerator::makeIntersect(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs)
{
TypeId resultType = arena->addType(TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.intersectFamily},
{lhs, rhs},
{},
});
addConstraint(scope, location, ReduceConstraint{resultType});
return resultType;
}
struct GlobalPrepopulator : AstVisitor
{
const NotNull<Scope> globalScope;

View file

@ -283,7 +283,8 @@ ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope
for (auto ty : c->getFreeTypes())
{
// increment the reference count for `ty`
unresolvedConstraints[ty] += 1;
auto [refCount, _] = unresolvedConstraints.try_insert(ty, 0);
refCount += 1;
}
for (NotNull<const Constraint> dep : c->dependencies)
@ -368,7 +369,13 @@ void ConstraintSolver::run()
// decrement the referenced free types for this constraint if we dispatched successfully!
for (auto ty : c->getFreeTypes())
unresolvedConstraints[ty] -= 1;
{
// this is a little weird, but because we're only counting free types in subtyping constraints,
// some constraints (like unpack) might actually produce _more_ references to a free type.
size_t& refCount = unresolvedConstraints[ty];
if (refCount > 0)
refCount -= 1;
}
if (logger)
{
@ -534,6 +541,8 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
success = tryDispatch(*taec, constraint);
else if (auto fcc = get<FunctionCallConstraint>(*constraint))
success = tryDispatch(*fcc, constraint);
else if (auto fcc = get<FunctionCheckConstraint>(*constraint))
success = tryDispatch(*fcc, constraint);
else if (auto fcc = get<PrimitiveTypeConstraint>(*constraint))
success = tryDispatch(*fcc, constraint);
else if (auto hpc = get<HasPropConstraint>(*constraint))
@ -992,6 +1001,15 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
return block(c.fn, constraint);
}
// if we're calling an error type, the result is an error type, and that's that.
if (get<ErrorType>(fn))
{
asMutable(c.result)->ty.emplace<BoundTypePack>(builtinTypes->errorTypePack);
unblock(c.result, constraint->location);
return true;
}
auto [argsHead, argsTail] = flatten(argsPack);
bool blocked = false;
@ -1080,44 +1098,6 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
*asMutable(follow(*ty)) = BoundType{builtinTypes->anyType};
}
// We know the type of the function and the arguments it expects to receive.
// We also know the TypeIds of the actual arguments that will be passed.
//
// Bidirectional type checking: Force those TypeIds to be the expected
// arguments. If something is incoherent, we'll spot it in type checking.
//
// Most important detail: If a function argument is a lambda, we also want
// to force unannotated argument types of that lambda to be the expected
// types.
// FIXME: Bidirectional type checking of overloaded functions is not yet supported.
if (auto ftv = get<FunctionType>(fn))
{
const std::vector<TypeId> expectedArgs = flatten(ftv->argTypes).first;
const std::vector<TypeId> argPackHead = flatten(argsPack).first;
for (size_t i = 0; i < c.callSite->args.size && i < expectedArgs.size() && i < argPackHead.size(); ++i)
{
const FunctionType* expectedLambdaTy = get<FunctionType>(follow(expectedArgs[i]));
const FunctionType* lambdaTy = get<FunctionType>(follow(argPackHead[i]));
const AstExprFunction* lambdaExpr = c.callSite->args.data[i]->as<AstExprFunction>();
if (expectedLambdaTy && lambdaTy && lambdaExpr)
{
const std::vector<TypeId> expectedLambdaArgTys = flatten(expectedLambdaTy->argTypes).first;
const std::vector<TypeId> lambdaArgTys = flatten(lambdaTy->argTypes).first;
for (size_t j = 0; j < expectedLambdaArgTys.size() && j < lambdaArgTys.size() && j < lambdaExpr->args.size; ++j)
{
if (!lambdaExpr->args.data[j]->annotation && get<FreeType>(follow(lambdaArgTys[j])))
{
asMutable(lambdaArgTys[j])->ty.emplace<BoundType>(expectedLambdaArgTys[j]);
}
}
}
}
}
TypeId inferredTy = arena->addType(FunctionType{TypeLevel{}, constraint->scope.get(), argsPack, c.result});
Unifier2 u2{NotNull{arena}, builtinTypes, constraint->scope, NotNull{&iceReporter}};
@ -1141,17 +1121,94 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
return true;
}
bool ConstraintSolver::tryDispatch(const FunctionCheckConstraint& c, NotNull<const Constraint> constraint)
{
const TypeId fn = follow(c.fn);
const TypePackId argsPack = follow(c.argsPack);
if (isBlocked(fn))
return block(fn, constraint);
if (isBlocked(argsPack))
return block(argsPack, constraint);
// We know the type of the function and the arguments it expects to receive.
// We also know the TypeIds of the actual arguments that will be passed.
//
// Bidirectional type checking: Force those TypeIds to be the expected
// arguments. If something is incoherent, we'll spot it in type checking.
//
// Most important detail: If a function argument is a lambda, we also want
// to force unannotated argument types of that lambda to be the expected
// types.
// FIXME: Bidirectional type checking of overloaded functions is not yet supported.
if (auto ftv = get<FunctionType>(fn))
{
const std::vector<TypeId> expectedArgs = flatten(ftv->argTypes).first;
const std::vector<TypeId> argPackHead = flatten(argsPack).first;
for (size_t i = 0; i < c.callSite->args.size && i < expectedArgs.size() && i < argPackHead.size(); ++i)
{
const TypeId expectedArgTy = follow(expectedArgs[i]);
const TypeId actualArgTy = follow(argPackHead[i]);
const FunctionType* expectedLambdaTy = get<FunctionType>(expectedArgTy);
const FunctionType* lambdaTy = get<FunctionType>(actualArgTy);
const AstExprFunction* lambdaExpr = c.callSite->args.data[i]->as<AstExprFunction>();
if (expectedLambdaTy && lambdaTy && lambdaExpr)
{
const std::vector<TypeId> expectedLambdaArgTys = flatten(expectedLambdaTy->argTypes).first;
const std::vector<TypeId> lambdaArgTys = flatten(lambdaTy->argTypes).first;
for (size_t j = 0; j < expectedLambdaArgTys.size() && j < lambdaArgTys.size() && j < lambdaExpr->args.size; ++j)
{
if (!lambdaExpr->args.data[j]->annotation && get<FreeType>(follow(lambdaArgTys[j])))
{
asMutable(lambdaArgTys[j])->ty.emplace<BoundType>(expectedLambdaArgTys[j]);
}
}
}
else
{
Unifier2 u2{arena, builtinTypes, constraint->scope, NotNull{&iceReporter}};
u2.unify(actualArgTy, expectedArgTy);
}
}
}
return true;
}
bool ConstraintSolver::tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint)
{
TypeId expectedType = follow(c.expectedType);
if (isBlocked(expectedType) || get<PendingExpansionType>(expectedType))
return block(expectedType, constraint);
std::optional<TypeId> expectedType = c.expectedType ? std::make_optional<TypeId>(follow(*c.expectedType)) : std::nullopt;
if (expectedType && (isBlocked(*expectedType) || get<PendingExpansionType>(*expectedType)))
return block(*expectedType, constraint);
LUAU_ASSERT(get<BlockedType>(c.resultType));
const FreeType* freeType = get<FreeType>(follow(c.freeType));
TypeId bindTo = maybeSingleton(expectedType) ? c.singletonType : c.multitonType;
asMutable(c.resultType)->ty.emplace<BoundType>(bindTo);
unblock(c.resultType, constraint->location);
// if this is no longer a free type, then we're done.
if (!freeType)
return true;
// We will wait if there are any other references to the free type mentioned here.
// This is probably the only thing that makes this not insane to do.
if (auto refCount = unresolvedConstraints.find(c.freeType); refCount && *refCount > 1)
{
block(c.freeType, constraint);
return false;
}
TypeId bindTo = c.primitiveType;
if (freeType->upperBound != c.primitiveType && maybeSingleton(freeType->upperBound))
bindTo = freeType->lowerBound;
else if (expectedType && maybeSingleton(*expectedType))
bindTo = freeType->lowerBound;
asMutable(c.freeType)->ty.emplace<BoundType>(bindTo);
return true;
}
@ -1163,7 +1220,7 @@ bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Con
LUAU_ASSERT(get<BlockedType>(resultType));
if (isBlocked(subjectType) || get<PendingExpansionType>(subjectType))
if (isBlocked(subjectType) || get<PendingExpansionType>(subjectType) || get<TypeFamilyInstanceType>(subjectType))
return block(subjectType, constraint);
auto [blocked, result] = lookupTableProp(subjectType, c.prop, c.suppressSimplification);
@ -1599,7 +1656,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
auto unpack = [&](TypeId ty) {
TypePackId variadic = arena->addTypePack(VariadicTypePack{ty});
pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, variadic});
pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, variadic, /* resultIsLValue */ true});
};
if (get<AnyType>(iteratorTy))
@ -1639,6 +1696,23 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
{
TypePackId expectedVariablePack = arena->addTypePack({iteratorTable->indexer->indexType, iteratorTable->indexer->indexResultType});
unify(constraint->scope, constraint->location, c.variables, expectedVariablePack);
auto [variableTys, variablesTail] = flatten(c.variables);
// the local types for the indexer _should_ be all set after unification
for (TypeId ty : variableTys)
{
if (auto lt = getMutable<LocalType>(ty))
{
LUAU_ASSERT(lt->blockCount > 0);
--lt->blockCount;
LUAU_ASSERT(0 <= lt->blockCount);
if (0 == lt->blockCount)
asMutable(ty)->ty.emplace<BoundType>(lt->domain);
}
}
}
else
unpack(builtinTypes->errorType);
@ -1775,7 +1849,7 @@ bool ConstraintSolver::tryDispatchIterableFunction(
modifiedNextRetHead.push_back(*it);
TypePackId modifiedNextRetPack = arena->addTypePack(std::move(modifiedNextRetHead), it.tail());
auto psc = pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, modifiedNextRetPack});
auto psc = pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, modifiedNextRetPack, /* resultIsLValue */ true});
inheritBlocks(constraint, psc);
return true;
@ -1876,7 +1950,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
{
const TypeId upperBound = follow(ft->upperBound);
if (get<TableType>(upperBound))
if (get<TableType>(upperBound) || get<PrimitiveType>(upperBound))
return lookupTableProp(upperBound, propName, suppressSimplification, seen);
// TODO: The upper bound could be an intersection that contains suitable tables or classes.
@ -2008,46 +2082,63 @@ void ConstraintSolver::bindBlockedType(TypeId blockedTy, TypeId resultTy, TypeId
asMutable(blockedTy)->ty.emplace<BoundType>(resultTy);
}
void ConstraintSolver::block_(BlockedConstraintId target, NotNull<const Constraint> constraint)
bool ConstraintSolver::block_(BlockedConstraintId target, NotNull<const Constraint> constraint)
{
blocked[target].push_back(constraint);
// If a set is not present for the target, construct a new DenseHashSet for it,
// else grab the address of the existing set.
NotNull<DenseHashSet<const Constraint*>> blockVec{&blocked.try_emplace(target, nullptr).first->second};
if (blockVec->find(constraint))
return false;
blockVec->insert(constraint);
auto& count = blockedConstraints[constraint];
count += 1;
return true;
}
void ConstraintSolver::block(NotNull<const Constraint> target, NotNull<const Constraint> constraint)
{
if (logger)
logger->pushBlock(constraint, target);
const bool newBlock = block_(target.get(), constraint);
if (newBlock)
{
if (logger)
logger->pushBlock(constraint, target);
if (FFlag::DebugLuauLogSolver)
printf("block Constraint %s on\t%s\n", toString(*target, opts).c_str(), toString(*constraint, opts).c_str());
block_(target.get(), constraint);
if (FFlag::DebugLuauLogSolver)
printf("block Constraint %s on\t%s\n", toString(*target, opts).c_str(), toString(*constraint, opts).c_str());
}
}
bool ConstraintSolver::block(TypeId target, NotNull<const Constraint> constraint)
{
if (logger)
logger->pushBlock(constraint, target);
const bool newBlock = block_(follow(target), constraint);
if (newBlock)
{
if (logger)
logger->pushBlock(constraint, target);
if (FFlag::DebugLuauLogSolver)
printf("block TypeId %s on\t%s\n", toString(target, opts).c_str(), toString(*constraint, opts).c_str());
if (FFlag::DebugLuauLogSolver)
printf("block TypeId %s on\t%s\n", toString(target, opts).c_str(), toString(*constraint, opts).c_str());
}
block_(follow(target), constraint);
return false;
}
bool ConstraintSolver::block(TypePackId target, NotNull<const Constraint> constraint)
{
if (logger)
logger->pushBlock(constraint, target);
const bool newBlock = block_(target, constraint);
if (newBlock)
{
if (logger)
logger->pushBlock(constraint, target);
if (FFlag::DebugLuauLogSolver)
printf("block TypeId %s on\t%s\n", toString(target, opts).c_str(), toString(*constraint, opts).c_str());
if (FFlag::DebugLuauLogSolver)
printf("block TypeId %s on\t%s\n", toString(target, opts).c_str(), toString(*constraint, opts).c_str());
}
block_(target, constraint);
return false;
}
@ -2058,9 +2149,9 @@ void ConstraintSolver::inheritBlocks(NotNull<const Constraint> source, NotNull<c
auto blockedIt = blocked.find(source.get());
if (blockedIt != blocked.end())
{
for (const auto& blockedConstraint : blockedIt->second)
for (const Constraint* blockedConstraint : blockedIt->second)
{
block(addition, blockedConstraint);
block(addition, NotNull{blockedConstraint});
}
}
}
@ -2112,9 +2203,9 @@ void ConstraintSolver::unblock_(BlockedConstraintId progressed)
return;
// unblocked should contain a value always, because of the above check
for (NotNull<const Constraint> unblockedConstraint : it->second)
for (const Constraint* unblockedConstraint : it->second)
{
auto& count = blockedConstraints[unblockedConstraint];
auto& count = blockedConstraints[NotNull{unblockedConstraint}];
if (FFlag::DebugLuauLogSolver)
printf("Unblocking count=%d\t%s\n", int(count), toString(*unblockedConstraint, opts).c_str());

View file

@ -699,6 +699,9 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, TypePackId
results.push_back(SubtypingResult{ok}.withBothComponent(TypePath::PackField::Tail));
}
}
else if (get<ErrorTypePack>(*subTail) || get<ErrorTypePack>(*superTail))
// error type is fine on either side
results.push_back(SubtypingResult{true}.withBothComponent(TypePath::PackField::Tail));
else
iceReporter->ice(
format("Subtyping::isSubtype got unexpected type packs %s and %s", toString(*subTail).c_str(), toString(*superTail).c_str()));

View file

@ -1742,9 +1742,16 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
{
return "call " + tos(c.fn) + "( " + tos(c.argsPack) + " )" + " with { result = " + tos(c.result) + " }";
}
else if constexpr (std::is_same_v<T, FunctionCheckConstraint>)
{
return "function_check " + tos(c.fn) + " " + tos(c.argsPack);
}
else if constexpr (std::is_same_v<T, PrimitiveTypeConstraint>)
{
return tos(c.resultType) + " ~ prim " + tos(c.expectedType) + ", " + tos(c.singletonType) + ", " + tos(c.multitonType);
if (c.expectedType)
return "prim " + tos(c.freeType) + "[expected: " + tos(*c.expectedType) + "] as " + tos(c.primitiveType);
else
return "prim " + tos(c.freeType) + " as " + tos(c.primitiveType);
}
else if constexpr (std::is_same_v<T, HasPropConstraint>)
{

View file

@ -419,6 +419,10 @@ bool maybeSingleton(TypeId ty)
for (TypeId option : utv)
if (get<SingletonType>(follow(option)))
return true;
if (const IntersectionType* itv = get<IntersectionType>(ty))
for (TypeId part : itv)
if (maybeSingleton(part)) // will i regret this?
return true;
return false;
}

View file

@ -261,6 +261,155 @@ struct TypeChecker2
{
}
static bool allowsNoReturnValues(const TypePackId tp)
{
for (TypeId ty : tp)
{
if (!get<ErrorType>(follow(ty)))
return false;
}
return true;
}
static Location getEndLocation(const AstExprFunction* function)
{
Location loc = function->location;
if (loc.begin.line != loc.end.line)
{
Position begin = loc.end;
begin.column = std::max(0u, begin.column - 3);
loc = Location(begin, 3);
}
return loc;
}
bool isErrorCall(const AstExprCall* call)
{
const AstExprGlobal* global = call->func->as<AstExprGlobal>();
if (!global)
return false;
if (global->name == "error")
return true;
else if (global->name == "assert")
{
// assert() will error because it is missing the first argument
if (call->args.size == 0)
return true;
if (AstExprConstantBool* expr = call->args.data[0]->as<AstExprConstantBool>())
if (!expr->value)
return true;
}
return false;
}
bool hasBreak(AstStat* node)
{
if (AstStatBlock* stat = node->as<AstStatBlock>())
{
for (size_t i = 0; i < stat->body.size; ++i)
{
if (hasBreak(stat->body.data[i]))
return true;
}
return false;
}
if (node->is<AstStatBreak>())
return true;
if (AstStatIf* stat = node->as<AstStatIf>())
{
if (hasBreak(stat->thenbody))
return true;
if (stat->elsebody && hasBreak(stat->elsebody))
return true;
return false;
}
return false;
}
// returns the last statement before the block implicitly exits, or nullptr if the block does not implicitly exit
// i.e. returns nullptr if the block returns properly or never returns
const AstStat* getFallthrough(const AstStat* node)
{
if (const AstStatBlock* stat = node->as<AstStatBlock>())
{
if (stat->body.size == 0)
return stat;
for (size_t i = 0; i < stat->body.size - 1; ++i)
{
if (getFallthrough(stat->body.data[i]) == nullptr)
return nullptr;
}
return getFallthrough(stat->body.data[stat->body.size - 1]);
}
if (const AstStatIf* stat = node->as<AstStatIf>())
{
if (const AstStat* thenf = getFallthrough(stat->thenbody))
return thenf;
if (stat->elsebody)
{
if (const AstStat* elsef = getFallthrough(stat->elsebody))
return elsef;
return nullptr;
}
else
return stat;
}
if (node->is<AstStatReturn>())
return nullptr;
if (const AstStatExpr* stat = node->as<AstStatExpr>())
{
if (AstExprCall* call = stat->expr->as<AstExprCall>(); call && isErrorCall(call))
return nullptr;
return stat;
}
if (const AstStatWhile* stat = node->as<AstStatWhile>())
{
if (AstExprConstantBool* expr = stat->condition->as<AstExprConstantBool>())
{
if (expr->value && !hasBreak(stat->body))
return nullptr;
}
return node;
}
if (const AstStatRepeat* stat = node->as<AstStatRepeat>())
{
if (AstExprConstantBool* expr = stat->condition->as<AstExprConstantBool>())
{
if (!expr->value && !hasBreak(stat->body))
return nullptr;
}
if (getFallthrough(stat->body) == nullptr)
return nullptr;
return node;
}
return node;
}
std::optional<StackPusher> pushStack(AstNode* node)
{
if (Scope** scope = module->astScopes.find(node))
@ -1723,6 +1872,10 @@ struct TypeChecker2
++argIt;
}
bool reachesImplicitReturn = getFallthrough(fn->body) != nullptr;
if (reachesImplicitReturn && !allowsNoReturnValues(follow(inferredFtv->retTypes)))
reportError(FunctionExitsWithoutReturning{inferredFtv->retTypes}, getEndLocation(fn));
}
visit(fn->body);

View file

@ -1052,6 +1052,73 @@ TypeFamilyReductionResult<TypeId> refineFamilyFn(const std::vector<TypeId>& type
return {resultTy, false, {}, {}};
}
TypeFamilyReductionResult<TypeId> unionFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
{
if (typeParams.size() != 2 || !packParams.empty())
{
ctx->ice->ice("union type family: encountered a type family instance without the required argument structure");
LUAU_ASSERT(false);
}
TypeId lhsTy = follow(typeParams.at(0));
TypeId rhsTy = follow(typeParams.at(1));
// check to see if both operand types are resolved enough, and wait to reduce if not
if (isPending(lhsTy, ctx->solver))
return {std::nullopt, false, {lhsTy}, {}};
else if (get<NeverType>(lhsTy)) // if the lhs is never, we don't need this family anymore
return {rhsTy, false, {}, {}};
else if (isPending(rhsTy, ctx->solver))
return {std::nullopt, false, {rhsTy}, {}};
else if (get<NeverType>(rhsTy)) // if the rhs is never, we don't need this family anymore
return {lhsTy, false, {}, {}};
SimplifyResult result = simplifyUnion(ctx->builtins, ctx->arena, lhsTy, rhsTy);
if (!result.blockedTypes.empty())
return {std::nullopt, false, {result.blockedTypes.begin(), result.blockedTypes.end()}, {}};
return {result.result, false, {}, {}};
}
TypeFamilyReductionResult<TypeId> intersectFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
{
if (typeParams.size() != 2 || !packParams.empty())
{
ctx->ice->ice("intersect type family: encountered a type family instance without the required argument structure");
LUAU_ASSERT(false);
}
TypeId lhsTy = follow(typeParams.at(0));
TypeId rhsTy = follow(typeParams.at(1));
// check to see if both operand types are resolved enough, and wait to reduce if not
if (isPending(lhsTy, ctx->solver))
return {std::nullopt, false, {lhsTy}, {}};
else if (get<NeverType>(lhsTy)) // if the lhs is never, we don't need this family anymore
return {ctx->builtins->neverType, false, {}, {}};
else if (isPending(rhsTy, ctx->solver))
return {std::nullopt, false, {rhsTy}, {}};
else if (get<NeverType>(rhsTy)) // if the rhs is never, we don't need this family anymore
return {ctx->builtins->neverType, false, {}, {}};
SimplifyResult result = simplifyIntersection(ctx->builtins, ctx->arena, lhsTy, rhsTy);
if (!result.blockedTypes.empty())
return {std::nullopt, false, {result.blockedTypes.begin(), result.blockedTypes.end()}, {}};
// if the intersection simplifies to `never`, this gives us bad autocomplete.
// we'll just produce the intersection plainly instead, but this might be revisitable
// if we ever give `never` some kind of "explanation" trail.
if (get<NeverType>(result.result))
{
TypeId intersection = ctx->arena->addType(IntersectionType{{lhsTy, rhsTy}});
return {intersection, false, {}, {}};
}
return {result.result, false, {}, {}};
}
// computes the keys of `ty` into `result`
// `isRaw` parameter indicates whether or not we should follow __index metamethods
// returns `false` if `result` should be ignored because the answer is "all strings"
@ -1262,6 +1329,8 @@ BuiltinTypeFamilies::BuiltinTypeFamilies()
, leFamily{"le", leFamilyFn}
, eqFamily{"eq", eqFamilyFn}
, refineFamily{"refine", refineFamilyFn}
, unionFamily{"union", unionFamilyFn}
, intersectFamily{"intersect", intersectFamilyFn}
, keyofFamily{"keyof", keyofFamilyFn}
, rawkeyofFamily{"rawkeyof", rawkeyofFamilyFn}
{

View file

@ -40,6 +40,7 @@ LUAU_FASTFLAGVARIABLE(LuauLoopControlFlowAnalysis, false)
LUAU_FASTFLAGVARIABLE(LuauAlwaysCommitInferencesOfFunctionCalls, false)
LUAU_FASTFLAGVARIABLE(LuauRemoveBadRelationalOperatorWarning, false)
LUAU_FASTFLAGVARIABLE(LuauForbidAliasNamedTypeof, false)
LUAU_FASTFLAGVARIABLE(LuauOkWithIteratingOverTableProperties, false)
namespace Luau
{
@ -1335,10 +1336,10 @@ ControlFlow TypeChecker::check(const ScopePtr& scope, const AstStatForIn& forin)
for (size_t i = 2; i < varTypes.size(); ++i)
unify(nilType, varTypes[i], scope, forin.location);
}
else if (isNonstrictMode())
else if (isNonstrictMode() || FFlag::LuauOkWithIteratingOverTableProperties)
{
for (TypeId var : varTypes)
unify(anyType, var, scope, forin.location);
unify(unknownType, var, scope, forin.location);
}
else
{

View file

@ -56,6 +56,12 @@ bool Unifier2::unify(TypeId subTy, TypeId superTy)
if (subFree || superFree)
return true;
if (auto subLocal = getMutable<LocalType>(subTy))
{
subLocal->domain = mkUnion(subLocal->domain, superTy);
expandedFreeTypes[subTy].push_back(superTy);
}
auto subFn = get<FunctionType>(subTy);
auto superFn = get<FunctionType>(superTy);
if (subFn && superFn)

View file

@ -387,7 +387,7 @@ public:
AstExprFunction(const Location& location, const AstArray<AstGenericType>& generics, const AstArray<AstGenericTypePack>& genericPacks,
AstLocal* self, const AstArray<AstLocal*>& args, bool vararg, const Location& varargLocation, AstStatBlock* body, size_t functionDepth,
const AstName& debugname, const std::optional<AstTypeList>& returnAnnotation = {}, AstTypePack* varargAnnotation = nullptr,
bool DEPRECATED_hasEnd = false, const std::optional<Location>& argLocation = std::nullopt);
const std::optional<Location>& argLocation = std::nullopt);
void visit(AstVisitor* visitor) override;
@ -406,8 +406,6 @@ public:
AstName debugname;
// TODO clip with FFlag::LuauClipExtraHasEndProps
bool DEPRECATED_hasEnd = false;
std::optional<Location> argLocation;
};
@ -573,7 +571,7 @@ public:
LUAU_RTTI(AstStatIf)
AstStatIf(const Location& location, AstExpr* condition, AstStatBlock* thenbody, AstStat* elsebody, const std::optional<Location>& thenLocation,
const std::optional<Location>& elseLocation, bool DEPRECATED_hasEnd);
const std::optional<Location>& elseLocation);
void visit(AstVisitor* visitor) override;
@ -585,9 +583,6 @@ public:
// Active for 'elseif' as well
std::optional<Location> elseLocation;
// TODO clip with FFlag::LuauClipExtraHasEndProps
bool DEPRECATED_hasEnd = false;
};
class AstStatWhile : public AstStat
@ -595,7 +590,7 @@ class AstStatWhile : public AstStat
public:
LUAU_RTTI(AstStatWhile)
AstStatWhile(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasDo, const Location& doLocation, bool DEPRECATED_hasEnd);
AstStatWhile(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasDo, const Location& doLocation);
void visit(AstVisitor* visitor) override;
@ -604,9 +599,6 @@ public:
bool hasDo = false;
Location doLocation;
// TODO clip with FFlag::LuauClipExtraHasEndProps
bool DEPRECATED_hasEnd = false;
};
class AstStatRepeat : public AstStat
@ -690,7 +682,7 @@ public:
LUAU_RTTI(AstStatFor)
AstStatFor(const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo,
const Location& doLocation, bool DEPRECATED_hasEnd);
const Location& doLocation);
void visit(AstVisitor* visitor) override;
@ -702,9 +694,6 @@ public:
bool hasDo = false;
Location doLocation;
// TODO clip with FFlag::LuauClipExtraHasEndProps
bool DEPRECATED_hasEnd = false;
};
class AstStatForIn : public AstStat
@ -713,7 +702,7 @@ public:
LUAU_RTTI(AstStatForIn)
AstStatForIn(const Location& location, const AstArray<AstLocal*>& vars, const AstArray<AstExpr*>& values, AstStatBlock* body, bool hasIn,
const Location& inLocation, bool hasDo, const Location& doLocation, bool DEPRECATED_hasEnd);
const Location& inLocation, bool hasDo, const Location& doLocation);
void visit(AstVisitor* visitor) override;
@ -726,9 +715,6 @@ public:
bool hasDo = false;
Location doLocation;
// TODO clip with FFlag::LuauClipExtraHasEndProps
bool DEPRECATED_hasEnd = false;
};
class AstStatAssign : public AstStat

View file

@ -163,7 +163,7 @@ void AstExprIndexExpr::visit(AstVisitor* visitor)
AstExprFunction::AstExprFunction(const Location& location, const AstArray<AstGenericType>& generics, const AstArray<AstGenericTypePack>& genericPacks,
AstLocal* self, const AstArray<AstLocal*>& args, bool vararg, const Location& varargLocation, AstStatBlock* body, size_t functionDepth,
const AstName& debugname, const std::optional<AstTypeList>& returnAnnotation, AstTypePack* varargAnnotation, bool DEPRECATED_hasEnd,
const AstName& debugname, const std::optional<AstTypeList>& returnAnnotation, AstTypePack* varargAnnotation,
const std::optional<Location>& argLocation)
: AstExpr(ClassIndex(), location)
, generics(generics)
@ -177,7 +177,6 @@ AstExprFunction::AstExprFunction(const Location& location, const AstArray<AstGen
, body(body)
, functionDepth(functionDepth)
, debugname(debugname)
, DEPRECATED_hasEnd(DEPRECATED_hasEnd)
, argLocation(argLocation)
{
}
@ -395,14 +394,13 @@ void AstStatBlock::visit(AstVisitor* visitor)
}
AstStatIf::AstStatIf(const Location& location, AstExpr* condition, AstStatBlock* thenbody, AstStat* elsebody,
const std::optional<Location>& thenLocation, const std::optional<Location>& elseLocation, bool DEPRECATED_hasEnd)
const std::optional<Location>& thenLocation, const std::optional<Location>& elseLocation)
: AstStat(ClassIndex(), location)
, condition(condition)
, thenbody(thenbody)
, elsebody(elsebody)
, thenLocation(thenLocation)
, elseLocation(elseLocation)
, DEPRECATED_hasEnd(DEPRECATED_hasEnd)
{
}
@ -418,13 +416,12 @@ void AstStatIf::visit(AstVisitor* visitor)
}
}
AstStatWhile::AstStatWhile(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasDo, const Location& doLocation, bool DEPRECATED_hasEnd)
AstStatWhile::AstStatWhile(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasDo, const Location& doLocation)
: AstStat(ClassIndex(), location)
, condition(condition)
, body(body)
, hasDo(hasDo)
, doLocation(doLocation)
, DEPRECATED_hasEnd(DEPRECATED_hasEnd)
{
}
@ -526,7 +523,7 @@ void AstStatLocal::visit(AstVisitor* visitor)
}
AstStatFor::AstStatFor(const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo,
const Location& doLocation, bool DEPRECATED_hasEnd)
const Location& doLocation)
: AstStat(ClassIndex(), location)
, var(var)
, from(from)
@ -535,7 +532,6 @@ AstStatFor::AstStatFor(const Location& location, AstLocal* var, AstExpr* from, A
, body(body)
, hasDo(hasDo)
, doLocation(doLocation)
, DEPRECATED_hasEnd(DEPRECATED_hasEnd)
{
}
@ -557,7 +553,7 @@ void AstStatFor::visit(AstVisitor* visitor)
}
AstStatForIn::AstStatForIn(const Location& location, const AstArray<AstLocal*>& vars, const AstArray<AstExpr*>& values, AstStatBlock* body,
bool hasIn, const Location& inLocation, bool hasDo, const Location& doLocation, bool DEPRECATED_hasEnd)
bool hasIn, const Location& inLocation, bool hasDo, const Location& doLocation)
: AstStat(ClassIndex(), location)
, vars(vars)
, values(values)
@ -566,7 +562,6 @@ AstStatForIn::AstStatForIn(const Location& location, const AstArray<AstLocal*>&
, inLocation(inLocation)
, hasDo(hasDo)
, doLocation(doLocation)
, DEPRECATED_hasEnd(DEPRECATED_hasEnd)
{
}

View file

@ -16,7 +16,6 @@ LUAU_FASTINTVARIABLE(LuauParseErrorLimit, 100)
// Warning: If you are introducing new syntax, ensure that it is behind a separate
// flag so that we don't break production games by reverting syntax changes.
// See docs/SyntaxChanges.md for an explanation.
LUAU_FASTFLAGVARIABLE(LuauClipExtraHasEndProps, false)
LUAU_FASTFLAG(LuauCheckedFunctionSyntax)
LUAU_FASTFLAGVARIABLE(LuauReadWritePropertySyntax, false)
@ -373,18 +372,15 @@ AstStat* Parser::parseIf()
AstStat* elsebody = nullptr;
Location end = start;
std::optional<Location> elseLocation;
bool DEPRECATED_hasEnd = false;
if (lexer.current().type == Lexeme::ReservedElseif)
{
if (FFlag::LuauClipExtraHasEndProps)
thenbody->hasEnd = true;
thenbody->hasEnd = true;
unsigned int oldRecursionCount = recursionCounter;
incrementRecursionCounter("elseif");
elseLocation = lexer.current().location;
elsebody = parseIf();
end = elsebody->location;
DEPRECATED_hasEnd = elsebody->as<AstStatIf>()->DEPRECATED_hasEnd;
recursionCounter = oldRecursionCount;
}
else
@ -393,8 +389,7 @@ AstStat* Parser::parseIf()
if (lexer.current().type == Lexeme::ReservedElse)
{
if (FFlag::LuauClipExtraHasEndProps)
thenbody->hasEnd = true;
thenbody->hasEnd = true;
elseLocation = lexer.current().location;
matchThenElse = lexer.current();
nextLexeme();
@ -406,21 +401,17 @@ AstStat* Parser::parseIf()
end = lexer.current().location;
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchThenElse);
DEPRECATED_hasEnd = hasEnd;
if (FFlag::LuauClipExtraHasEndProps)
if (elsebody)
{
if (elsebody)
{
if (AstStatBlock* elseBlock = elsebody->as<AstStatBlock>())
elseBlock->hasEnd = hasEnd;
}
else
thenbody->hasEnd = hasEnd;
if (AstStatBlock* elseBlock = elsebody->as<AstStatBlock>())
elseBlock->hasEnd = hasEnd;
}
else
thenbody->hasEnd = hasEnd;
}
return allocator.alloc<AstStatIf>(Location(start, end), cond, thenbody, elsebody, thenLocation, elseLocation, DEPRECATED_hasEnd);
return allocator.alloc<AstStatIf>(Location(start, end), cond, thenbody, elsebody, thenLocation, elseLocation);
}
// while exp do block end
@ -444,10 +435,9 @@ AstStat* Parser::parseWhile()
Location end = lexer.current().location;
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
if (FFlag::LuauClipExtraHasEndProps)
body->hasEnd = hasEnd;
body->hasEnd = hasEnd;
return allocator.alloc<AstStatWhile>(Location(start, end), cond, body, hasDo, matchDo.location, hasEnd);
return allocator.alloc<AstStatWhile>(Location(start, end), cond, body, hasDo, matchDo.location);
}
// repeat block until exp
@ -467,8 +457,7 @@ AstStat* Parser::parseRepeat()
functionStack.back().loopDepth--;
bool hasUntil = expectMatchEndAndConsume(Lexeme::ReservedUntil, matchRepeat);
if (FFlag::LuauClipExtraHasEndProps)
body->hasEnd = hasUntil;
body->hasEnd = hasUntil;
AstExpr* cond = parseExpr();
@ -565,10 +554,9 @@ AstStat* Parser::parseFor()
Location end = lexer.current().location;
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
if (FFlag::LuauClipExtraHasEndProps)
body->hasEnd = hasEnd;
body->hasEnd = hasEnd;
return allocator.alloc<AstStatFor>(Location(start, end), var, from, to, step, body, hasDo, matchDo.location, hasEnd);
return allocator.alloc<AstStatFor>(Location(start, end), var, from, to, step, body, hasDo, matchDo.location);
}
else
{
@ -609,11 +597,10 @@ AstStat* Parser::parseFor()
Location end = lexer.current().location;
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
if (FFlag::LuauClipExtraHasEndProps)
body->hasEnd = hasEnd;
body->hasEnd = hasEnd;
return allocator.alloc<AstStatForIn>(
Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location, hasEnd);
Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location);
}
}
@ -1100,11 +1087,10 @@ std::pair<AstExprFunction*, AstLocal*> Parser::parseFunctionBody(
Location end = lexer.current().location;
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchFunction);
if (FFlag::LuauClipExtraHasEndProps)
body->hasEnd = hasEnd;
body->hasEnd = hasEnd;
return {allocator.alloc<AstExprFunction>(Location(start, end), generics, genericPacks, self, vars, vararg, varargLocation, body,
functionStack.size(), debugname, typelist, varargAnnotation, hasEnd, argLocation),
functionStack.size(), debugname, typelist, varargAnnotation, argLocation),
funLocal};
}

View file

@ -139,6 +139,10 @@ public:
void fsqrt(RegisterA64 dst, RegisterA64 src);
void fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index);
void ins_4s(RegisterA64 dst, uint8_t dstIndex, RegisterA64 src, uint8_t srcIndex);
void dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index);
// Floating-point rounding and conversions
void frinta(RegisterA64 dst, RegisterA64 src);
void frintm(RegisterA64 dst, RegisterA64 src);
@ -207,6 +211,7 @@ private:
void placeSR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift = 0, int N = 0);
void placeSR2(const char* name, RegisterA64 dst, RegisterA64 src, uint8_t op, uint8_t op2 = 0);
void placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t op2);
void placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t sizes, uint8_t op, uint8_t op2);
void placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op);
void placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op);
void placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift = 0);

View file

@ -119,13 +119,18 @@ public:
void vaddss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsubsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsubps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmulsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmulps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vdivsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vdivps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vandps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vandpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vandnpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vxorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vorps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vucomisd(OperandX64 src1, OperandX64 src2);
@ -159,6 +164,9 @@ public:
void vblendvpd(RegisterX64 dst, RegisterX64 src1, OperandX64 mask, RegisterX64 src3);
void vpshufps(RegisterX64 dst, RegisterX64 src1, OperandX64 src2, uint8_t shuffle);
void vpinsrd(RegisterX64 dst, RegisterX64 src1, OperandX64 src2, uint8_t offset);
// Run final checks
bool finalize();
@ -176,9 +184,11 @@ public:
}
// Constant allocation (uses rip-relative addressing)
OperandX64 i32(int32_t value);
OperandX64 i64(int64_t value);
OperandX64 f32(float value);
OperandX64 f64(double value);
OperandX64 u32x4(uint32_t x, uint32_t y, uint32_t z, uint32_t w);
OperandX64 f32x4(float x, float y, float z, float w);
OperandX64 f64x2(double x, double y);
OperandX64 bytes(const void* ptr, size_t size, size_t align = 8);
@ -260,6 +270,7 @@ private:
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
DenseHashMap<uint32_t, int32_t> constCache32;
DenseHashMap<uint64_t, int32_t> constCache64;
bool finalized = false;

View file

@ -55,6 +55,34 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags = 0,
using AnnotatorFn = void (*)(void* context, std::string& result, int fid, int instpos);
// Output "#" before IR blocks and instructions
enum class IncludeIrPrefix
{
No,
Yes
};
// Output user count and last use information of blocks and instructions
enum class IncludeUseInfo
{
No,
Yes
};
// Output CFG informations like block predecessors, successors and etc
enum class IncludeCfgInfo
{
No,
Yes
};
// Output VM register live in/out information for blocks
enum class IncludeRegFlowInfo
{
No,
Yes
};
struct AssemblyOptions
{
enum Target
@ -76,10 +104,10 @@ struct AssemblyOptions
bool includeIr = false;
bool includeOutlinedCode = false;
bool includeIrPrefix = true; // "#" before IR blocks and instructions
bool includeUseInfo = true;
bool includeCfgInfo = true;
bool includeRegFlowInfo = true;
IncludeIrPrefix includeIrPrefix = IncludeIrPrefix::Yes;
IncludeUseInfo includeUseInfo = IncludeUseInfo::Yes;
IncludeCfgInfo includeCfgInfo = IncludeCfgInfo::Yes;
IncludeRegFlowInfo includeRegFlowInfo = IncludeRegFlowInfo::Yes;
// Optional annotator function can be provided to describe each instruction, it takes function id and sequential instruction id
AnnotatorFn annotator = nullptr;

View file

@ -52,6 +52,11 @@ enum class IrCmd : uint8_t
// A: Rn
LOAD_INT,
// Load a float field from vector as a double number
// A: Rn or Kn
// B: int (offset from the start of TValue)
LOAD_FLOAT,
// Load a TValue from memory
// A: Rn or Kn or pointer (TValue)
// B: int (optional 'A' pointer offset)
@ -173,6 +178,17 @@ enum class IrCmd : uint8_t
// A: double
ABS_NUM,
// Add/Sub/Mul/Div/Idiv two vectors
// A, B: TValue
ADD_VEC,
SUB_VEC,
MUL_VEC,
DIV_VEC,
// Negate a vector
// A: TValue
UNM_VEC,
// Compute Luau 'not' operation on destructured TValue
// A: tag
// B: int (value)
@ -286,6 +302,10 @@ enum class IrCmd : uint8_t
// A: double
NUM_TO_UINT,
// Converts a double number to a vector with the value in X/Y/Z
// A: double
NUM_TO_VECTOR,
// Adjust stack top (L->top) to point at 'B' TValues *after* the specified register
// This is used to return multiple values
// A: Rn

View file

@ -2,6 +2,7 @@
#pragma once
#include "Luau/IrData.h"
#include "Luau/CodeGen.h"
#include <string>
#include <vector>
@ -31,11 +32,12 @@ void toString(IrToStringContext& ctx, IrOp op);
void toString(std::string& result, IrConst constant);
void toString(std::string& result, const BytecodeTypes& bcTypes);
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, bool includeUseInfo);
void toStringDetailed(
IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, bool includeUseInfo, bool includeCfgInfo, bool includeRegFlowInfo);
IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, IncludeUseInfo includeUseInfo);
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, IncludeUseInfo includeUseInfo, IncludeCfgInfo includeCfgInfo,
IncludeRegFlowInfo includeRegFlowInfo);
std::string toString(const IrFunction& function, bool includeUseInfo);
std::string toString(const IrFunction& function, IncludeUseInfo includeUseInfo);
std::string dump(const IrFunction& function);

View file

@ -145,6 +145,7 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_FLOAT:
case IrCmd::LOAD_TVALUE:
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
@ -167,6 +168,11 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::ROUND_NUM:
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
case IrCmd::ADD_VEC:
case IrCmd::SUB_VEC:
case IrCmd::MUL_VEC:
case IrCmd::DIV_VEC:
case IrCmd::UNM_VEC:
case IrCmd::NOT_ANY:
case IrCmd::CMP_ANY:
case IrCmd::TABLE_LEN:
@ -180,6 +186,7 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::UINT_TO_NUM:
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
case IrCmd::NUM_TO_VECTOR:
case IrCmd::SUBSTITUTE:
case IrCmd::INVOKE_FASTCALL:
case IrCmd::BITAND_UINT:

View file

@ -19,6 +19,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_FLOAT:
case IrCmd::LOAD_TVALUE:
visitor.maybeUse(inst.a); // Argument can also be a VmConst
break;

View file

@ -569,30 +569,66 @@ void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src)
void AssemblyBuilderA64::fadd(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
LUAU_ASSERT(dst.kind == KindA64::d && src1.kind == KindA64::d && src2.kind == KindA64::d);
if (dst.kind == KindA64::d)
{
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
placeR3("fadd", dst, src1, src2, 0b11110'01'1, 0b0010'10);
placeR3("fadd", dst, src1, src2, 0b11110'01'1, 0b0010'10);
}
else
{
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
placeR3("fadd", dst, src1, src2, 0b11110'00'1, 0b0010'10);
}
}
void AssemblyBuilderA64::fdiv(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
LUAU_ASSERT(dst.kind == KindA64::d && src1.kind == KindA64::d && src2.kind == KindA64::d);
if (dst.kind == KindA64::d)
{
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
placeR3("fdiv", dst, src1, src2, 0b11110'01'1, 0b0001'10);
placeR3("fdiv", dst, src1, src2, 0b11110'01'1, 0b0001'10);
}
else
{
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
placeR3("fdiv", dst, src1, src2, 0b11110'00'1, 0b0001'10);
}
}
void AssemblyBuilderA64::fmul(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
LUAU_ASSERT(dst.kind == KindA64::d && src1.kind == KindA64::d && src2.kind == KindA64::d);
if (dst.kind == KindA64::d)
{
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
placeR3("fmul", dst, src1, src2, 0b11110'01'1, 0b0000'10);
placeR3("fmul", dst, src1, src2, 0b11110'01'1, 0b0000'10);
}
else
{
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
placeR3("fmul", dst, src1, src2, 0b11110'00'1, 0b0000'10);
}
}
void AssemblyBuilderA64::fneg(RegisterA64 dst, RegisterA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
if (dst.kind == KindA64::d)
{
LUAU_ASSERT(src.kind == KindA64::d);
placeR1("fneg", dst, src, 0b000'11110'01'1'0000'10'10000);
placeR1("fneg", dst, src, 0b000'11110'01'1'0000'10'10000);
}
else
{
LUAU_ASSERT(dst.kind == KindA64::s && src.kind == KindA64::s);
placeR1("fneg", dst, src, 0b000'11110'00'1'0000'10'10000);
}
}
void AssemblyBuilderA64::fsqrt(RegisterA64 dst, RegisterA64 src)
@ -604,9 +640,77 @@ void AssemblyBuilderA64::fsqrt(RegisterA64 dst, RegisterA64 src)
void AssemblyBuilderA64::fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
{
LUAU_ASSERT(dst.kind == KindA64::d && src1.kind == KindA64::d && src2.kind == KindA64::d);
if (dst.kind == KindA64::d)
{
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
placeR3("fsub", dst, src1, src2, 0b11110'01'1, 0b0011'10);
placeR3("fsub", dst, src1, src2, 0b11110'01'1, 0b0011'10);
}
else
{
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
placeR3("fsub", dst, src1, src2, 0b11110'00'1, 0b0011'10);
}
}
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
{
LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::w);
LUAU_ASSERT(index < 4);
if (logText)
logAppend(" %-12sv%d.s[%d],w%d\n", "ins", dst.index, index, src.index);
uint32_t op = 0b0'1'0'01110000'00100'0'0011'1;
place(dst.index | (src.index << 5) | (op << 10) | (index << 19));
commit();
}
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, uint8_t dstIndex, RegisterA64 src, uint8_t srcIndex)
{
LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::q);
LUAU_ASSERT(dstIndex < 4);
LUAU_ASSERT(srcIndex < 4);
if (logText)
logAppend(" %-12sv%d.s[%d],v%d.s[%d]\n", "ins", dst.index, dstIndex, src.index, srcIndex);
uint32_t op = 0b0'1'1'01110000'00100'0'0000'1;
place(dst.index | (src.index << 5) | (op << 10) | (dstIndex << 19) | (srcIndex << 13));
commit();
}
void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
{
if (dst.kind == KindA64::s)
{
LUAU_ASSERT(src.kind == KindA64::q);
LUAU_ASSERT(index < 4);
if (logText)
logAppend(" %-12ss%d,v%d.s[%d]\n", "dup", dst.index, src.index, index);
uint32_t op = 0b01'0'11110000'00100'0'0000'1;
place(dst.index | (src.index << 5) | (op << 10) | (index << 19));
}
else
{
LUAU_ASSERT(src.kind == KindA64::q);
LUAU_ASSERT(index < 4);
if (logText)
logAppend(" %-12sv%d.4s,v%d.s[%d]\n", "dup", dst.index, src.index, index);
uint32_t op = 0b010'01110000'00100'0'0000'1;
place(dst.index | (src.index << 5) | (op << 10) | (index << 19));
}
commit();
}
void AssemblyBuilderA64::frinta(RegisterA64 dst, RegisterA64 src)
@ -839,7 +943,7 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64
if (logText)
log(name, dst, src1, src2);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::s);
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
@ -848,6 +952,18 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64
commit();
}
void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t sizes, uint8_t op, uint8_t op2)
{
if (logText)
log(name, dst, src1, src2);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::q);
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
place(dst.index | (src1.index << 5) | (op2 << 10) | (src2.index << 16) | (op << 21) | (sizes << 29));
commit();
}
void AssemblyBuilderA64::placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op)
{
if (logText)

View file

@ -6,6 +6,8 @@
#include <stdarg.h>
#include <stdio.h>
LUAU_FASTFLAGVARIABLE(LuauCache32BitAsmConsts, false)
namespace Luau
{
namespace CodeGen
@ -79,6 +81,7 @@ static ABIX64 getCurrentX64ABI()
AssemblyBuilderX64::AssemblyBuilderX64(bool logText, ABIX64 abi)
: logText(logText)
, abi(abi)
, constCache32(~0u)
, constCache64(~0ull)
{
data.resize(4096);
@ -738,16 +741,36 @@ void AssemblyBuilderX64::vsubsd(OperandX64 dst, OperandX64 src1, OperandX64 src2
placeAvx("vsubsd", dst, src1, src2, 0x5c, false, AVX_0F, AVX_F2);
}
void AssemblyBuilderX64::vsubps(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vsubps", dst, src1, src2, 0x5c, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vmulsd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vmulsd", dst, src1, src2, 0x59, false, AVX_0F, AVX_F2);
}
void AssemblyBuilderX64::vmulps(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vmulps", dst, src1, src2, 0x59, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vdivsd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vdivsd", dst, src1, src2, 0x5e, false, AVX_0F, AVX_F2);
}
void AssemblyBuilderX64::vdivps(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vdivps", dst, src1, src2, 0x5e, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vandps(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vandps", dst, src1, src2, 0x54, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vandpd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vandpd", dst, src1, src2, 0x54, false, AVX_0F, AVX_66);
@ -763,6 +786,11 @@ void AssemblyBuilderX64::vxorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2
placeAvx("vxorpd", dst, src1, src2, 0x57, false, AVX_0F, AVX_66);
}
void AssemblyBuilderX64::vorps(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vorps", dst, src1, src2, 0x56, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
{
placeAvx("vorpd", dst, src1, src2, 0x56, false, AVX_0F, AVX_66);
@ -909,6 +937,16 @@ void AssemblyBuilderX64::vblendvpd(RegisterX64 dst, RegisterX64 src1, OperandX64
placeAvx("vblendvpd", dst, src1, mask, src3.index << 4, 0x4b, false, AVX_0F3A, AVX_66);
}
void AssemblyBuilderX64::vpshufps(RegisterX64 dst, RegisterX64 src1, OperandX64 src2, uint8_t shuffle)
{
placeAvx("vpshufps", dst, src1, src2, shuffle, 0xc6, false, AVX_0F, AVX_NP);
}
void AssemblyBuilderX64::vpinsrd(RegisterX64 dst, RegisterX64 src1, OperandX64 src2, uint8_t offset)
{
placeAvx("vpinsrd", dst, src1, src2, offset, 0x22, false, AVX_0F3A, AVX_66);
}
bool AssemblyBuilderX64::finalize()
{
code.resize(codePos - code.data());
@ -961,6 +999,26 @@ void AssemblyBuilderX64::setLabel(Label& label)
log(label);
}
OperandX64 AssemblyBuilderX64::i32(int32_t value)
{
uint32_t as32BitKey = value;
if (as32BitKey != ~0u)
{
if (int32_t* prev = constCache32.find(as32BitKey))
return OperandX64(SizeX64::dword, noreg, 1, rip, *prev);
}
size_t pos = allocateData(4, 4);
writeu32(&data[pos], value);
int32_t offset = int32_t(pos - data.size());
if (as32BitKey != ~0u)
constCache32[as32BitKey] = offset;
return OperandX64(SizeX64::dword, noreg, 1, rip, offset);
}
OperandX64 AssemblyBuilderX64::i64(int64_t value)
{
uint64_t as64BitKey = value;
@ -983,9 +1041,33 @@ OperandX64 AssemblyBuilderX64::i64(int64_t value)
OperandX64 AssemblyBuilderX64::f32(float value)
{
size_t pos = allocateData(4, 4);
writef32(&data[pos], value);
return OperandX64(SizeX64::dword, noreg, 1, rip, int32_t(pos - data.size()));
if (FFlag::LuauCache32BitAsmConsts)
{
uint32_t as32BitKey;
static_assert(sizeof(as32BitKey) == sizeof(value), "Expecting float to be 32-bit");
memcpy(&as32BitKey, &value, sizeof(value));
if (as32BitKey != ~0u)
{
if (int32_t* prev = constCache32.find(as32BitKey))
return OperandX64(SizeX64::dword, noreg, 1, rip, *prev);
}
size_t pos = allocateData(4, 4);
writef32(&data[pos], value);
int32_t offset = int32_t(pos - data.size());
if (as32BitKey != ~0u)
constCache32[as32BitKey] = offset;
return OperandX64(SizeX64::dword, noreg, 1, rip, offset);
}
else
{
size_t pos = allocateData(4, 4);
writef32(&data[pos], value);
return OperandX64(SizeX64::dword, noreg, 1, rip, int32_t(pos - data.size()));
}
}
OperandX64 AssemblyBuilderX64::f64(double value)
@ -1010,6 +1092,16 @@ OperandX64 AssemblyBuilderX64::f64(double value)
return OperandX64(SizeX64::qword, noreg, 1, rip, offset);
}
OperandX64 AssemblyBuilderX64::u32x4(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
{
size_t pos = allocateData(16, 16);
writeu32(&data[pos], x);
writeu32(&data[pos + 4], y);
writeu32(&data[pos + 8], z);
writeu32(&data[pos + 12], w);
return OperandX64(SizeX64::xmmword, noreg, 1, rip, int32_t(pos - data.size()));
}
OperandX64 AssemblyBuilderX64::f32x4(float x, float y, float z, float w)
{
size_t pos = allocateData(16, 16);
@ -1442,7 +1534,6 @@ void AssemblyBuilderX64::placeImm8(int32_t imm)
{
int8_t imm8 = int8_t(imm);
LUAU_ASSERT(imm8 == imm);
place(imm8);
}

View file

@ -21,6 +21,7 @@
#include "CodeGenX64.h"
#include "lapi.h"
#include "lmem.h"
#include <memory>
#include <optional>
@ -56,6 +57,8 @@ LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K
// Current value is based on some member variables being limited to 16 bits
LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockInstructionLimit, 65'536) // 64 K
LUAU_FASTFLAGVARIABLE(DisableNativeCodegenIfBreakpointIsSet, false)
namespace Luau
{
namespace CodeGen
@ -164,6 +167,45 @@ static int onEnter(lua_State* L, Proto* proto)
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
void onDisable(lua_State* L, Proto* proto)
{
// do nothing if proto already uses bytecode
if (proto->codeentry == proto->code)
return;
// ensure that VM does not call native code for this proto
proto->codeentry = proto->code;
// prevent native code from entering proto with breakpoints
proto->exectarget = 0;
// walk all thread call stacks and clear the LUA_CALLINFO_NATIVE flag from any
// entries pointing to the current proto that has native code enabled.
luaM_visitgco(L, proto, [](void* context, lua_Page* page, GCObject* gco) {
Proto* proto = (Proto*)context;
if (gco->gch.tt != LUA_TTHREAD)
return false;
lua_State* th = gco2th(gco);
for (CallInfo* ci = th->ci; ci > th->base_ci; ci--)
{
if (isLua(ci))
{
Proto* p = clvalue(ci->func)->l.p;
if (p == proto)
{
ci->flags &= ~LUA_CALLINFO_NATIVE;
}
}
}
return false;
});
}
#if defined(__aarch64__)
unsigned int getCpuFeaturesA64()
{
@ -257,6 +299,7 @@ void create(lua_State* L, AllocationCallback* allocationCallback, void* allocati
ecb->close = onCloseState;
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->disable = FFlag::DisableNativeCodegenIfBreakpointIsSet ? onDisable : nullptr;
}
void create(lua_State* L)

View file

@ -108,7 +108,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
if (options.includeIr)
{
if (options.includeIrPrefix)
if (options.includeIrPrefix == IncludeIrPrefix::Yes)
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, options.includeUseInfo, options.includeCfgInfo, options.includeRegFlowInfo);
@ -174,7 +174,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
if (options.includeIr)
{
if (options.includeIrPrefix)
if (options.includeIrPrefix == IncludeIrPrefix::Yes)
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, inst, index, options.includeUseInfo);
@ -201,7 +201,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
lowering.finishBlock(block, nextBlock);
if (options.includeIr && options.includeIrPrefix)
if (options.includeIr && options.includeIrPrefix == IncludeIrPrefix::Yes)
build.logAppend("#\n");
if (block.expectedNextBlock == ~0u)

View file

@ -89,6 +89,8 @@ const char* getCmdName(IrCmd cmd)
return "LOAD_DOUBLE";
case IrCmd::LOAD_INT:
return "LOAD_INT";
case IrCmd::LOAD_FLOAT:
return "LOAD_FLOAT";
case IrCmd::LOAD_TVALUE:
return "LOAD_TVALUE";
case IrCmd::LOAD_ENV:
@ -149,6 +151,16 @@ const char* getCmdName(IrCmd cmd)
return "SQRT_NUM";
case IrCmd::ABS_NUM:
return "ABS_NUM";
case IrCmd::ADD_VEC:
return "ADD_VEC";
case IrCmd::SUB_VEC:
return "SUB_VEC";
case IrCmd::MUL_VEC:
return "MUL_VEC";
case IrCmd::DIV_VEC:
return "DIV_VEC";
case IrCmd::UNM_VEC:
return "UNM_VEC";
case IrCmd::NOT_ANY:
return "NOT_ANY";
case IrCmd::CMP_ANY:
@ -193,6 +205,8 @@ const char* getCmdName(IrCmd cmd)
return "NUM_TO_INT";
case IrCmd::NUM_TO_UINT:
return "NUM_TO_UINT";
case IrCmd::NUM_TO_VECTOR:
return "NUM_TO_VECTOR";
case IrCmd::ADJUST_STACK_TO_REG:
return "ADJUST_STACK_TO_REG";
case IrCmd::ADJUST_STACK_TO_TOP:
@ -581,13 +595,14 @@ static RegisterSet getJumpTargetExtraLiveIn(IrToStringContext& ctx, const IrBloc
return extraRs;
}
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, bool includeUseInfo)
void toStringDetailed(
IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, IncludeUseInfo includeUseInfo)
{
size_t start = ctx.result.size();
toString(ctx, inst, instIdx);
if (includeUseInfo)
if (includeUseInfo == IncludeUseInfo::Yes)
{
padToDetailColumn(ctx.result, start);
@ -624,11 +639,11 @@ void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blo
}
}
void toStringDetailed(
IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, bool includeUseInfo, bool includeCfgInfo, bool includeRegFlowInfo)
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, IncludeUseInfo includeUseInfo, IncludeCfgInfo includeCfgInfo,
IncludeRegFlowInfo includeRegFlowInfo)
{
// Report captured registers for entry block
if (includeRegFlowInfo && block.useCount == 0 && block.kind != IrBlockKind::Dead && ctx.cfg.captured.regs.any())
if (includeRegFlowInfo == IncludeRegFlowInfo::Yes && block.useCount == 0 && block.kind != IrBlockKind::Dead && ctx.cfg.captured.regs.any())
{
append(ctx.result, "; captured regs: ");
appendRegisterSet(ctx, ctx.cfg.captured, ", ");
@ -640,7 +655,7 @@ void toStringDetailed(
toString(ctx, block, blockIdx);
append(ctx.result, ":");
if (includeUseInfo)
if (includeUseInfo == IncludeUseInfo::Yes)
{
padToDetailColumn(ctx.result, start);
@ -652,7 +667,7 @@ void toStringDetailed(
}
// Predecessor list
if (includeCfgInfo && blockIdx < ctx.cfg.predecessorsOffsets.size())
if (includeCfgInfo == IncludeCfgInfo::Yes && blockIdx < ctx.cfg.predecessorsOffsets.size())
{
BlockIteratorWrapper pred = predecessors(ctx.cfg, blockIdx);
@ -666,7 +681,7 @@ void toStringDetailed(
}
// Successor list
if (includeCfgInfo && blockIdx < ctx.cfg.successorsOffsets.size())
if (includeCfgInfo == IncludeCfgInfo::Yes && blockIdx < ctx.cfg.successorsOffsets.size())
{
BlockIteratorWrapper succ = successors(ctx.cfg, blockIdx);
@ -680,7 +695,7 @@ void toStringDetailed(
}
// Live-in VM regs
if (includeRegFlowInfo && blockIdx < ctx.cfg.in.size())
if (includeRegFlowInfo == IncludeRegFlowInfo::Yes && blockIdx < ctx.cfg.in.size())
{
const RegisterSet& in = ctx.cfg.in[blockIdx];
@ -693,7 +708,7 @@ void toStringDetailed(
}
// Live-out VM regs
if (includeRegFlowInfo && blockIdx < ctx.cfg.out.size())
if (includeRegFlowInfo == IncludeRegFlowInfo::Yes && blockIdx < ctx.cfg.out.size())
{
const RegisterSet& out = ctx.cfg.out[blockIdx];
@ -706,7 +721,7 @@ void toStringDetailed(
}
}
std::string toString(const IrFunction& function, bool includeUseInfo)
std::string toString(const IrFunction& function, IncludeUseInfo includeUseInfo)
{
std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
@ -718,7 +733,7 @@ std::string toString(const IrFunction& function, bool includeUseInfo)
if (block.kind == IrBlockKind::Dead)
continue;
toStringDetailed(ctx, block, uint32_t(i), includeUseInfo, /*includeCfgInfo*/ true, /*includeRegFlowInfo*/ true);
toStringDetailed(ctx, block, uint32_t(i), includeUseInfo, IncludeCfgInfo::Yes, IncludeRegFlowInfo::Yes);
if (block.start == ~0u)
{
@ -747,7 +762,7 @@ std::string toString(const IrFunction& function, bool includeUseInfo)
std::string dump(const IrFunction& function)
{
std::string result = toString(function, /* includeUseInfo */ true);
std::string result = toString(function, IncludeUseInfo::Yes);
printf("%s\n", result.c_str());

View file

@ -290,6 +290,16 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.ldr(inst.regA64, addr);
break;
}
case IrCmd::LOAD_FLOAT:
{
inst.regA64 = regs.allocReg(KindA64::d, index);
RegisterA64 temp = castReg(KindA64::s, inst.regA64); // safe to alias a fresh register
AddressA64 addr = tempAddr(inst.a, intOp(inst.b));
build.ldr(temp, addr);
build.fcvt(inst.regA64, temp);
break;
}
case IrCmd::LOAD_TVALUE:
{
inst.regA64 = regs.allocReg(KindA64::q, index);
@ -657,6 +667,84 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.fabs(inst.regA64, temp);
break;
}
case IrCmd::ADD_VEC:
{
inst.regA64 = regs.allocReuse(KindA64::q, index, {inst.a, inst.b});
RegisterA64 tempa = regs.allocTemp(KindA64::s);
RegisterA64 tempb = regs.allocTemp(KindA64::s);
for (uint8_t i = 0; i < 3; i++)
{
build.dup_4s(tempa, regOp(inst.a), i);
build.dup_4s(tempb, regOp(inst.b), i);
build.fadd(tempa, tempa, tempb);
build.ins_4s(inst.regA64, i, castReg(KindA64::q, tempa), 0);
}
break;
}
case IrCmd::SUB_VEC:
{
inst.regA64 = regs.allocReuse(KindA64::q, index, {inst.a, inst.b});
RegisterA64 tempa = regs.allocTemp(KindA64::s);
RegisterA64 tempb = regs.allocTemp(KindA64::s);
for (uint8_t i = 0; i < 3; i++)
{
build.dup_4s(tempa, regOp(inst.a), i);
build.dup_4s(tempb, regOp(inst.b), i);
build.fsub(tempa, tempa, tempb);
build.ins_4s(inst.regA64, i, castReg(KindA64::q, tempa), 0);
}
break;
}
case IrCmd::MUL_VEC:
{
inst.regA64 = regs.allocReuse(KindA64::q, index, {inst.a, inst.b});
RegisterA64 tempa = regs.allocTemp(KindA64::s);
RegisterA64 tempb = regs.allocTemp(KindA64::s);
for (uint8_t i = 0; i < 3; i++)
{
build.dup_4s(tempa, regOp(inst.a), i);
build.dup_4s(tempb, regOp(inst.b), i);
build.fmul(tempa, tempa, tempb);
build.ins_4s(inst.regA64, i, castReg(KindA64::q, tempa), 0);
}
break;
}
case IrCmd::DIV_VEC:
{
inst.regA64 = regs.allocReuse(KindA64::q, index, {inst.a, inst.b});
RegisterA64 tempa = regs.allocTemp(KindA64::s);
RegisterA64 tempb = regs.allocTemp(KindA64::s);
for (uint8_t i = 0; i < 3; i++)
{
build.dup_4s(tempa, regOp(inst.a), i);
build.dup_4s(tempb, regOp(inst.b), i);
build.fdiv(tempa, tempa, tempb);
build.ins_4s(inst.regA64, i, castReg(KindA64::q, tempa), 0);
}
break;
}
case IrCmd::UNM_VEC:
{
inst.regA64 = regs.allocReuse(KindA64::q, index, {inst.a});
RegisterA64 tempa = regs.allocTemp(KindA64::s);
for (uint8_t i = 0; i < 3; i++)
{
build.dup_4s(tempa, regOp(inst.a), i);
build.fneg(tempa, tempa);
build.ins_4s(inst.regA64, i, castReg(KindA64::q, tempa), 0);
}
break;
}
case IrCmd::NOT_ANY:
{
inst.regA64 = regs.allocReuse(KindA64::w, index, {inst.a, inst.b});
@ -1010,6 +1098,21 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.fcvtzs(castReg(KindA64::x, inst.regA64), temp);
break;
}
case IrCmd::NUM_TO_VECTOR:
{
inst.regA64 = regs.allocReg(KindA64::q, index);
RegisterA64 tempd = tempDouble(inst.a);
RegisterA64 temps = castReg(KindA64::s, tempd);
RegisterA64 tempw = regs.allocTemp(KindA64::w);
build.fcvt(temps, tempd);
build.dup_4s(inst.regA64, castReg(KindA64::q, temps), 0);
build.mov(tempw, LUA_TVECTOR);
build.ins_4s(inst.regA64, tempw, 3);
break;
}
case IrCmd::ADJUST_STACK_TO_REG:
{
RegisterA64 temp = regs.allocTemp(KindA64::x);

View file

@ -108,6 +108,17 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.mov(inst.regX64, luauRegValueInt(vmRegOp(inst.a)));
break;
case IrCmd::LOAD_FLOAT:
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
if (inst.a.kind == IrOpKind::VmReg)
build.vcvtss2sd(inst.regX64, inst.regX64, dword[rBase + vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, value) + intOp(inst.b)]);
else if (inst.a.kind == IrOpKind::VmConst)
build.vcvtss2sd(
inst.regX64, inst.regX64, dword[rConstants + vmConstOp(inst.a) * sizeof(TValue) + offsetof(TValue, value) + intOp(inst.b)]);
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_TVALUE:
{
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
@ -586,6 +597,81 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.vandpd(inst.regX64, inst.regX64, build.i64(~(1LL << 63)));
break;
case IrCmd::ADD_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vaddps(inst.regX64, tmp1.reg, tmp2.reg);
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
break;
}
case IrCmd::SUB_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vsubps(inst.regX64, tmp1.reg, tmp2.reg);
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
break;
}
case IrCmd::MUL_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vmulps(inst.regX64, tmp1.reg, tmp2.reg);
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
break;
}
case IrCmd::DIV_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vdivps(inst.regX64, tmp1.reg, tmp2.reg);
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
break;
}
case IrCmd::UNM_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
RegisterX64 src = regOp(inst.a);
if (inst.regX64 == src)
{
build.vxorpd(inst.regX64, inst.regX64, build.f32x4(-0.0, -0.0, -0.0, -0.0));
}
else
{
build.vmovsd(inst.regX64, src, src);
build.vxorpd(inst.regX64, inst.regX64, build.f32x4(-0.0, -0.0, -0.0, -0.0));
}
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
break;
}
case IrCmd::NOT_ANY:
{
// TODO: if we have a single user which is a STORE_INT, we are missing the opportunity to write directly to target
@ -878,6 +964,25 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
build.vcvttsd2si(qwordReg(inst.regX64), memRegDoubleOp(inst.a));
break;
case IrCmd::NUM_TO_VECTOR:
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
if (inst.a.kind == IrOpKind::Constant)
{
float value = float(doubleOp(inst.a));
uint32_t asU32;
static_assert(sizeof(asU32) == sizeof(value), "Expecting float to be 32-bit");
memcpy(&asU32, &value, sizeof(value));
build.vmovaps(inst.regX64, build.u32x4(asU32, asU32, asU32, LUA_TVECTOR));
}
else
{
build.vcvtsd2ss(inst.regX64, inst.regX64, memRegDoubleOp(inst.a));
build.vpshufps(inst.regX64, inst.regX64, inst.regX64, 0b00'00'00'00);
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
}
break;
case IrCmd::ADJUST_STACK_TO_REG:
{
ScopedRegX64 tmp{regs, SizeX64::qword};
@ -2146,6 +2251,22 @@ Label& IrLoweringX64::labelOp(IrOp op) const
return blockOp(op).label;
}
OperandX64 IrLoweringX64::vectorAndMaskOp()
{
if (vectorAndMask.base == noreg)
vectorAndMask = build.u32x4(~0u, ~0u, ~0u, 0);
return vectorAndMask;
}
OperandX64 IrLoweringX64::vectorOrMaskOp()
{
if (vectorOrMask.base == noreg)
vectorOrMask = build.u32x4(0, 0, 0, LUA_TVECTOR);
return vectorOrMask;
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View file

@ -61,6 +61,9 @@ struct IrLoweringX64
IrBlock& blockOp(IrOp op) const;
Label& labelOp(IrOp op) const;
OperandX64 vectorAndMaskOp();
OperandX64 vectorOrMaskOp();
struct InterruptHandler
{
Label self;
@ -87,6 +90,9 @@ struct IrLoweringX64
std::vector<InterruptHandler> interruptHandlers;
std::vector<ExitHandler> exitHandlers;
DenseHashMap<uint32_t, uint32_t> exitHandlerMap;
OperandX64 vectorAndMask = noreg;
OperandX64 vectorOrMask = noreg;
};
} // namespace X64

View file

@ -13,6 +13,7 @@
#include "ltm.h"
LUAU_FASTFLAGVARIABLE(LuauCodegenLuData, false)
LUAU_FASTFLAGVARIABLE(LuauCodegenVector, false)
namespace Luau
{
@ -50,6 +51,21 @@ static IrOp getInitializedFallback(IrBuilder& build, IrOp& fallback)
return fallback;
}
static IrOp loadDoubleOrConstant(IrBuilder& build, IrOp arg)
{
if (arg.kind == IrOpKind::VmConst)
{
LUAU_ASSERT(build.function.proto);
TValue protok = build.function.proto->k[vmConstOp(arg)];
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
return build.constDouble(protok.value.n);
}
return build.inst(IrCmd::LOAD_DOUBLE, arg);
}
void translateInstLoadNil(IrBuilder& build, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
@ -335,9 +351,97 @@ void translateInstJumpxEqS(IrBuilder& build, const Instruction* pc, int pcpos)
static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, IrOp opb, IrOp opc, int pcpos, TMS tm)
{
IrOp fallback;
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
if (FFlag::LuauCodegenVector)
{
// Special fast-paths for vectors, matching the cases we have in VM
if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_ADD || tm == TM_SUB || tm == TM_MUL || tm == TM_DIV))
{
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
IrOp result;
switch (tm)
{
case TM_ADD:
result = build.inst(IrCmd::ADD_VEC, vb, vc);
break;
case TM_SUB:
result = build.inst(IrCmd::SUB_VEC, vb, vc);
break;
case TM_MUL:
result = build.inst(IrCmd::MUL_VEC, vb, vc);
break;
case TM_DIV:
result = build.inst(IrCmd::DIV_VEC, vb, vc);
break;
default:
break;
}
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
return;
}
else if (bcTypes.a == LBC_TYPE_NUMBER && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_MUL || tm == TM_DIV))
{
if (rb != -1)
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
IrOp vb = build.inst(IrCmd::NUM_TO_VECTOR, loadDoubleOrConstant(build, opb));
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
IrOp result;
switch (tm)
{
case TM_MUL:
result = build.inst(IrCmd::MUL_VEC, vb, vc);
break;
case TM_DIV:
result = build.inst(IrCmd::DIV_VEC, vb, vc);
break;
default:
break;
}
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
return;
}
else if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_NUMBER && (tm == TM_MUL || tm == TM_DIV))
{
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
if (rc != -1)
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
IrOp vc = build.inst(IrCmd::NUM_TO_VECTOR, loadDoubleOrConstant(build, opc));
IrOp result;
switch (tm)
{
case TM_MUL:
result = build.inst(IrCmd::MUL_VEC, vb, vc);
break;
case TM_DIV:
result = build.inst(IrCmd::DIV_VEC, vb, vc);
break;
default:
break;
}
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
return;
}
}
IrOp fallback;
// fast-path: number
if (rb != -1)
{
@ -356,18 +460,25 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
IrOp vb, vc;
IrOp result;
if (opb.kind == IrOpKind::VmConst)
if (FFlag::LuauCodegenVector)
{
LUAU_ASSERT(build.function.proto);
TValue protok = build.function.proto->k[vmConstOp(opb)];
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
vb = build.constDouble(protok.value.n);
vb = loadDoubleOrConstant(build, opb);
}
else
{
vb = build.inst(IrCmd::LOAD_DOUBLE, opb);
if (opb.kind == IrOpKind::VmConst)
{
LUAU_ASSERT(build.function.proto);
TValue protok = build.function.proto->k[vmConstOp(opb)];
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
vb = build.constDouble(protok.value.n);
}
else
{
vb = build.inst(IrCmd::LOAD_DOUBLE, opb);
}
}
if (opc.kind == IrOpKind::VmConst)
@ -474,12 +585,23 @@ void translateInstNot(IrBuilder& build, const Instruction* pc)
void translateInstMinus(IrBuilder& build, const Instruction* pc, int pcpos)
{
IrOp fallback;
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
if (FFlag::LuauCodegenVector && bcTypes.a == LBC_TYPE_VECTOR)
{
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
IrOp va = build.inst(IrCmd::UNM_VEC, vb);
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), va);
return;
}
IrOp fallback;
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TNUMBER),
bcTypes.a == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback));
@ -1087,10 +1209,45 @@ void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
int rb = LUAU_INSN_B(*pc);
uint32_t aux = pc[1];
IrOp fallback = build.block(IrBlockKind::Fallback);
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
if (FFlag::LuauCodegenVector && bcTypes.a == LBC_TYPE_VECTOR)
{
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
TString* str = gco2ts(build.function.proto->k[aux].value.gc);
const char* field = getstr(str);
if (*field == 'X' || *field == 'x')
{
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(0));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
else if (*field == 'Y' || *field == 'y')
{
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(4));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
else if (*field == 'Z' || *field == 'z')
{
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(8));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
else
{
build.inst(IrCmd::FALLBACK_GETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
}
return;
}
IrOp fallback = build.block(IrBlockKind::Fallback);
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));

View file

@ -31,6 +31,8 @@ IrValueKind getCmdValueKind(IrCmd cmd)
return IrValueKind::Double;
case IrCmd::LOAD_INT:
return IrValueKind::Int;
case IrCmd::LOAD_FLOAT:
return IrValueKind::Double;
case IrCmd::LOAD_TVALUE:
return IrValueKind::Tvalue;
case IrCmd::LOAD_ENV:
@ -66,6 +68,12 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
return IrValueKind::Double;
case IrCmd::ADD_VEC:
case IrCmd::SUB_VEC:
case IrCmd::MUL_VEC:
case IrCmd::DIV_VEC:
case IrCmd::UNM_VEC:
return IrValueKind::Tvalue;
case IrCmd::NOT_ANY:
case IrCmd::CMP_ANY:
return IrValueKind::Int;
@ -98,6 +106,8 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
return IrValueKind::Int;
case IrCmd::NUM_TO_VECTOR:
return IrValueKind::Tvalue;
case IrCmd::ADJUST_STACK_TO_REG:
case IrCmd::ADJUST_STACK_TO_TOP:
return IrValueKind::None;

View file

@ -94,6 +94,7 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_FLOAT:
case IrCmd::LOAD_TVALUE:
case IrCmd::CMP_ANY:
case IrCmd::JUMP_IF_TRUTHY:
@ -130,6 +131,11 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
case IrCmd::MAX_NUM:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_CMP_NUM:
case IrCmd::FLOOR_NUM:
case IrCmd::CEIL_NUM:
case IrCmd::ROUND_NUM:
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
break;
default:

View file

@ -18,6 +18,7 @@ LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
LUAU_FASTFLAGVARIABLE(LuauReuseBufferChecks, false)
LUAU_FASTFLAG(LuauCodegenVector)
namespace Luau
{
@ -582,6 +583,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
state.substituteOrRecordVmRegLoad(inst);
break;
}
case IrCmd::LOAD_FLOAT:
break;
case IrCmd::LOAD_TVALUE:
if (inst.a.kind == IrOpKind::VmReg)
state.substituteOrRecordVmRegLoad(inst);
@ -706,6 +709,18 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
}
uint8_t tag = state.tryGetTag(inst.b);
// We know the tag of some instructions that result in TValue
if (FFlag::LuauCodegenVector && tag == 0xff)
{
if (IrInst* arg = function.asInstOp(inst.b))
{
if (arg->cmd == IrCmd::ADD_VEC || arg->cmd == IrCmd::SUB_VEC || arg->cmd == IrCmd::MUL_VEC || arg->cmd == IrCmd::DIV_VEC ||
arg->cmd == IrCmd::UNM_VEC)
tag = LUA_TVECTOR;
}
}
IrOp value = state.tryGetValue(inst.b);
if (inst.a.kind == IrOpKind::VmReg)
@ -1244,7 +1259,6 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
break;
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
@ -1257,6 +1271,12 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::GET_TYPE:
case IrCmd::GET_TYPEOF:
case IrCmd::FINDUPVAL:
case IrCmd::ADD_VEC:
case IrCmd::SUB_VEC:
case IrCmd::MUL_VEC:
case IrCmd::DIV_VEC:
case IrCmd::UNM_VEC:
case IrCmd::NUM_TO_VECTOR:
break;
case IrCmd::DO_ARITH:

View file

@ -5,6 +5,8 @@
#include <utility>
LUAU_FASTFLAGVARIABLE(LuauCodegenMathMemArgs, false)
namespace Luau
{
namespace CodeGen
@ -108,6 +110,21 @@ static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
}
break;
}
case IrCmd::FLOOR_NUM:
case IrCmd::CEIL_NUM:
case IrCmd::ROUND_NUM:
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
{
if (FFlag::LuauCodegenMathMemArgs && inst.a.kind == IrOpKind::Inst)
{
IrInst& arg = function.instOp(inst.a);
if (arg.useCount == 1 && arg.cmd == IrCmd::LOAD_DOUBLE && (arg.a.kind == IrOpKind::VmReg || arg.a.kind == IrOpKind::VmConst))
replace(function, inst.a, arg.a);
}
break;
}
default:
break;
}

View file

@ -333,8 +333,10 @@ void luaG_pusherror(lua_State* L, const char* error)
void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
{
void (*ondisable)(lua_State*, Proto*) = L->global->ecb.disable;
// since native code doesn't support breakpoints, we would need to update all call frames with LUAU_CALLINFO_NATIVE that refer to p
if (p->lineinfo && !p->execdata)
if (p->lineinfo && (ondisable || !p->execdata))
{
for (int i = 0; i < p->sizecode; ++i)
{
@ -360,6 +362,11 @@ void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
p->code[i] |= op;
LUAU_ASSERT(LUAU_INSN_OP(p->code[i]) == op);
// currently we don't restore native code when breakpoint is disabled.
// this will be addressed in the future.
if (enable && p->execdata && ondisable)
ondisable(L, p);
// note: this is important!
// we only patch the *first* instruction in each proto that's attributed to a given line
// this can be changed, but if requires making patching a bit more nuanced so that we don't patch AUX words

View file

@ -154,6 +154,7 @@ struct lua_ExecutionCallbacks
void (*close)(lua_State* L); // called when global VM state is closed
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*disable)(lua_State* L, Proto* proto); // called when function has to be switched from native to bytecode in the debugger
};
/*

View file

@ -8,6 +8,8 @@
#include <string.h>
#include <stdio.h>
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauInterruptablePatternMatch, false)
// macro to `unsign' a character
#define uchar(c) ((unsigned char)(c))
@ -429,6 +431,21 @@ static const char* match(MatchState* ms, const char* s, const char* p)
{
if (ms->matchdepth-- == 0)
luaL_error(ms->L, "pattern too complex");
if (DFFlag::LuauInterruptablePatternMatch)
{
lua_State* L = ms->L;
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt;
if (LUAU_UNLIKELY(!!interrupt))
{
// this interrupt is not yieldable
L->nCcalls++;
interrupt(L, -1);
L->nCcalls--;
}
}
init: // using goto's to optimize tail recursion
if (p != ms->p_end)
{ // end of pattern?

View file

@ -3033,7 +3033,7 @@ int luau_precall(lua_State* L, StkId func, int nresults)
ci->savedpc = p->code;
#if VM_HAS_NATIVE
if (p->execdata)
if (p->exectarget != 0 && p->execdata)
ci->flags = LUA_CALLINFO_NATIVE;
#endif

View file

@ -367,11 +367,16 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPMath")
{
SINGLE_COMPARE(fabs(d1, d2), 0x1E60C041);
SINGLE_COMPARE(fadd(d1, d2, d3), 0x1E632841);
SINGLE_COMPARE(fadd(s29, s29, s28), 0x1E3C2BBD);
SINGLE_COMPARE(fdiv(d1, d2, d3), 0x1E631841);
SINGLE_COMPARE(fdiv(s29, s29, s28), 0x1E3C1BBD);
SINGLE_COMPARE(fmul(d1, d2, d3), 0x1E630841);
SINGLE_COMPARE(fmul(s29, s29, s28), 0x1E3C0BBD);
SINGLE_COMPARE(fneg(d1, d2), 0x1E614041);
SINGLE_COMPARE(fneg(s30, s30), 0x1E2143DE);
SINGLE_COMPARE(fsqrt(d1, d2), 0x1E61C041);
SINGLE_COMPARE(fsub(d1, d2, d3), 0x1E633841);
SINGLE_COMPARE(fsub(s29, s29, s28), 0x1E3C3BBD);
SINGLE_COMPARE(frinta(d1, d2), 0x1E664041);
SINGLE_COMPARE(frintm(d1, d2), 0x1E654041);
@ -426,6 +431,14 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPLoadStore")
SINGLE_COMPARE(str(s0, mem(x1, 16)), 0xBD001020);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPInsertExtract")
{
SINGLE_COMPARE(ins_4s(q29, w17, 3), 0x4E1C1E3D);
SINGLE_COMPARE(ins_4s(q31, 0, q29, 0), 0x6E0407BF);
SINGLE_COMPARE(dup_4s(s29, q31, 2), 0x5E1407FD);
SINGLE_COMPARE(dup_4s(q29, q30, 0), 0x4E0407DD);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPCompare")
{
SINGLE_COMPARE(fcmp(d0, d1), 0x1E612000);
@ -535,6 +548,11 @@ TEST_CASE("LogTest")
build.add(x1, x2, w3, 3);
build.ins_4s(q29, w17, 3);
build.ins_4s(q31, 1, q29, 2);
build.dup_4s(s29, q31, 2);
build.dup_4s(q29, q30, 0);
build.setLabel(l);
build.ret();
@ -572,6 +590,10 @@ TEST_CASE("LogTest")
ldr x0,[x1,#1]!
ldr x0,[x1]!,#1
add x1,x2,w3 UXTW #3
ins v29.s[3],w17
ins v31.s[1],v29.s[2]
dup s29,v31.s[2]
dup v29.4s,v30.s[0]
.L1:
ret
)";

View file

@ -3,9 +3,12 @@
#include "Luau/StringUtils.h"
#include "doctest.h"
#include "ScopedFlags.h"
#include <string.h>
LUAU_FASTFLAG(LuauCache32BitAsmConsts)
using namespace Luau::CodeGen;
using namespace Luau::CodeGen::X64;
@ -469,8 +472,13 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXBinaryInstructionForms")
SINGLE_COMPARE(vmulsd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x2b, 0x59, 0xc6);
SINGLE_COMPARE(vdivsd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x2b, 0x5e, 0xc6);
SINGLE_COMPARE(vsubps(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x28, 0x5c, 0xc6);
SINGLE_COMPARE(vmulps(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x28, 0x59, 0xc6);
SINGLE_COMPARE(vdivps(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x28, 0x5e, 0xc6);
SINGLE_COMPARE(vorpd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x29, 0x56, 0xc6);
SINGLE_COMPARE(vxorpd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x29, 0x57, 0xc6);
SINGLE_COMPARE(vorps(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x28, 0x56, 0xc6);
SINGLE_COMPARE(vandpd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x29, 0x54, 0xc6);
SINGLE_COMPARE(vandnpd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0x29, 0x55, 0xc6);
@ -547,6 +555,9 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXTernaryInstructionForms")
vroundsd(xmm8, xmm13, xmmword[r13 + rdx], RoundingModeX64::RoundToPositiveInfinity), 0xc4, 0x43, 0x11, 0x0b, 0x44, 0x15, 0x00, 0x0a);
SINGLE_COMPARE(vroundsd(xmm9, xmm14, xmmword[rcx + r10], RoundingModeX64::RoundToZero), 0xc4, 0x23, 0x09, 0x0b, 0x0c, 0x11, 0x0b);
SINGLE_COMPARE(vblendvpd(xmm7, xmm12, xmmword[rcx + r10], xmm5), 0xc4, 0xa3, 0x19, 0x4b, 0x3c, 0x11, 0x50);
SINGLE_COMPARE(vpshufps(xmm7, xmm12, xmmword[rcx + r10], 0b11010100), 0xc4, 0xa1, 0x18, 0xc6, 0x3c, 0x11, 0xd4);
SINGLE_COMPARE(vpinsrd(xmm7, xmm12, xmmword[rcx + r10], 2), 0xc4, 0xa3, 0x19, 0x22, 0x3c, 0x11, 0x02);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "MiscInstructions")
@ -720,7 +731,7 @@ TEST_CASE("ConstantStorage")
AssemblyBuilderX64 build(/* logText= */ false);
for (int i = 0; i <= 3000; i++)
build.vaddss(xmm0, xmm0, build.f32(1.0f));
build.vaddss(xmm0, xmm0, build.i32(i));
build.finalize();
@ -728,13 +739,31 @@ TEST_CASE("ConstantStorage")
for (int i = 0; i <= 3000; i++)
{
CHECK(build.data[i * 4 + 0] == 0x00);
CHECK(build.data[i * 4 + 1] == 0x00);
CHECK(build.data[i * 4 + 2] == 0x80);
CHECK(build.data[i * 4 + 3] == 0x3f);
CHECK(build.data[i * 4 + 0] == ((3000 - i) & 0xff));
CHECK(build.data[i * 4 + 1] == ((3000 - i) >> 8));
CHECK(build.data[i * 4 + 2] == 0x00);
CHECK(build.data[i * 4 + 3] == 0x00);
}
}
TEST_CASE("ConstantStorageDedup")
{
ScopedFastFlag luauCache32BitAsmConsts{FFlag::LuauCache32BitAsmConsts, true};
AssemblyBuilderX64 build(/* logText= */ false);
for (int i = 0; i <= 3000; i++)
build.vaddss(xmm0, xmm0, build.f32(1.0f));
build.finalize();
CHECK(build.data.size() == 4);
CHECK(build.data[0] == 0x00);
CHECK(build.data[1] == 0x00);
CHECK(build.data[2] == 0x80);
CHECK(build.data[3] == 0x3f);
}
TEST_CASE("ConstantCaching")
{
AssemblyBuilderX64 build(/* logText= */ false);

View file

@ -11,15 +11,11 @@
using namespace Luau;
LUAU_FASTFLAG(LuauClipExtraHasEndProps);
struct JsonEncoderFixture
{
Allocator allocator;
AstNameTable names{allocator};
ScopedFastFlag sff{FFlag::LuauClipExtraHasEndProps, true};
ParseResult parse(std::string_view src)
{
ParseOptions opts;
@ -95,8 +91,6 @@ TEST_CASE("basic_escaping")
TEST_CASE("encode_AstStatBlock")
{
ScopedFastFlag sff{FFlag::LuauClipExtraHasEndProps, true};
AstLocal astlocal{AstName{"a_local"}, Location(), nullptr, 0, 0, nullptr};
AstLocal* astlocalarray[] = {&astlocal};

View file

@ -107,6 +107,15 @@ struct ACFixtureImpl : BaseType
globals, globals.globalScope, source, "@test", /* captureComments */ false, /* typeCheckForAutocomplete */ true);
freeze(globals.globalTypes);
if (FFlag::DebugLuauDeferredConstraintResolution)
{
GlobalTypes& globals = this->frontend.globals;
unfreeze(globals.globalTypes);
LoadDefinitionFileResult result = this->frontend.loadDefinitionFile(
globals, globals.globalScope, source, "@test", /* captureComments */ false, /* typeCheckForAutocomplete */ true);
freeze(globals.globalTypes);
}
REQUIRE_MESSAGE(result.success, "loadDefinition: unable to load definition file");
return result;
}

View file

@ -26,9 +26,10 @@ extern bool verbose;
extern bool codegen;
extern int optimizationLevel;
LUAU_FASTFLAG(LuauTaggedLuData);
LUAU_FASTFLAG(LuauSciNumberSkipTrailDot);
LUAU_FASTINT(CodegenHeuristicsInstructionLimit);
LUAU_FASTFLAG(LuauTaggedLuData)
LUAU_FASTFLAG(LuauSciNumberSkipTrailDot)
LUAU_DYNAMIC_FASTFLAG(LuauInterruptablePatternMatch)
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
static lua_CompileOptions defaultOptions()
{
@ -1607,6 +1608,47 @@ TEST_CASE("Interrupt")
// abandon the thread
lua_pop(L, 1);
}
lua_callbacks(L)->interrupt = [](lua_State* L, int gc) {
if (gc >= 0)
return;
index++;
if (index == 1'000)
{
index = 0;
luaL_error(L, "timeout");
}
};
ScopedFastFlag luauInterruptablePatternMatch{DFFlag::LuauInterruptablePatternMatch, true};
for (int test = 1; test <= 5; ++test)
{
lua_State* T = lua_newthread(L);
std::string name = "strhang" + std::to_string(test);
lua_getglobal(T, name.c_str());
index = 0;
int status = lua_resume(T, nullptr, 0);
CHECK(status == LUA_ERRRUN);
lua_pop(L, 1);
}
{
lua_State* T = lua_newthread(L);
lua_getglobal(T, "strhangpcall");
index = 0;
int status = lua_resume(T, nullptr, 0);
CHECK(status == LUA_OK);
lua_pop(L, 1);
}
}
TEST_CASE("UserdataApi")

View file

@ -137,7 +137,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptCheckTag")
optimizeMemoryOperandsX64(build.function);
// Load from memory is 'inlined' into CHECK_TAG
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
CHECK_TAG R2, tnil, bb_fallback_1
CHECK_TAG K5, tnil, bb_fallback_1
@ -163,7 +163,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptBinaryArith")
optimizeMemoryOperandsX64(build.function);
// Load from memory is 'inlined' into second argument
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R1
%2 = ADD_NUM %0, R2
@ -193,7 +193,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptEqTag1")
optimizeMemoryOperandsX64(build.function);
// Load from memory is 'inlined' into first argument
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%1 = LOAD_TAG R2
JUMP_EQ_TAG R1, %1, bb_1, bb_2
@ -230,7 +230,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptEqTag2")
// Load from memory is 'inlined' into second argument is it can't be done for the first one
// We also swap first and second argument to generate memory access on the LHS
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R1
STORE_TAG R6, %0
@ -267,7 +267,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptEqTag3")
optimizeMemoryOperandsX64(build.function);
// Load from memory is 'inlined' into first argument
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
%1 = GET_ARR_ADDR %0, 0i
@ -304,7 +304,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FinalX64OptJumpCmpNum")
optimizeMemoryOperandsX64(build.function);
// Load from memory is 'inlined' into first argument
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%1 = LOAD_DOUBLE R2
JUMP_CMP_NUM R1, %1, bb_1, bb_2
@ -359,7 +359,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "Numeric")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R0, 30i
STORE_INT R1, -2147483648i
@ -403,7 +403,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumericConversions")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_DOUBLE R0, 8
STORE_DOUBLE R1, 3740139520
@ -431,7 +431,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumericConversionsBlocked")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%1 = NUM_TO_INT 1e+20
STORE_INT R0, %1
@ -489,7 +489,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "Bit32")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_INT R0
STORE_INT R0, 14i
@ -547,7 +547,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "Bit32RangeReduction")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R10, 62914560i
STORE_INT R10, 61440i
@ -574,7 +574,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ReplacementPreservesUses")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ true) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::Yes) == R"(
bb_0: ; useCount: 0
%0 = LOAD_INT R0 ; useCount: 1, lastUse: %0
%1 = BITNOT_UINT %0 ; useCount: 1, lastUse: %0
@ -601,7 +601,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumericNan")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_DOUBLE R0, 2
STORE_DOUBLE R0, nan
@ -633,7 +633,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ControlFlowEq")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
JUMP bb_1
@ -682,7 +682,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumToIndex")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R0, 4i
RETURN 0u
@ -717,7 +717,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "Guards")
updateUseCounts(build.function);
constantFold();
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
RETURN 0u
@ -838,7 +838,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RememberTagsAndValues")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
STORE_INT R1, 10i
@ -882,7 +882,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "PropagateThroughTvalue")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
STORE_DOUBLE R0, 0.5
@ -911,7 +911,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SkipCheckTag")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
RETURN 0u
@ -938,7 +938,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SkipOncePerBlockChecks")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
CHECK_SAFE_ENV
CHECK_GC
@ -977,7 +977,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RememberTableState")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R0
CHECK_NO_METATABLE %0, bb_fallback_1
@ -1023,7 +1023,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RememberNewTableState")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = NEW_TABLE 16u, 32u
STORE_POINTER R0, %0
@ -1055,7 +1055,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SkipUselessBarriers")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
RETURN 0u
@ -1086,7 +1086,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ConcatInvalidation")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
STORE_INT R1, 10i
@ -1135,7 +1135,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "BuiltinFastcallsMayInvalidateMemory")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_DOUBLE R0, 0.5
%1 = LOAD_POINTER R0
@ -1168,7 +1168,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RedundantStoreCheckConstantType")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R0, 10i
STORE_DOUBLE R0, 0.5
@ -1198,7 +1198,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagCheckPropagation")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R0
CHECK_TAG %0, tnumber, bb_fallback_1
@ -1230,7 +1230,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagCheckPropagationConflicting")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R0
CHECK_TAG %0, tnumber, bb_fallback_1
@ -1266,7 +1266,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TruthyTestRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R1
CHECK_TAG %0, tnumber, bb_fallback_3
@ -1305,7 +1305,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FalsyTestRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R1
CHECK_TAG %0, tnumber, bb_fallback_3
@ -1340,7 +1340,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagEqRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R1
CHECK_TAG %0, tboolean
@ -1372,7 +1372,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "IntEqRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R1, 5i
JUMP bb_1
@ -1403,7 +1403,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NumCmpRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_DOUBLE R1, 4
JUMP bb_2
@ -1431,7 +1431,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DataFlowsThroughDirectJumpToUniqueSuccessor
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
JUMP bb_1
@ -1464,7 +1464,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DataDoesNotFlowThroughDirectJumpToNonUnique
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
JUMP bb_1
@ -1500,7 +1500,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "EntryBlockUseRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
JUMP bb_1
@ -1535,7 +1535,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RecursiveSccUseRemoval1")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
RETURN R0, 0i
@ -1577,7 +1577,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RecursiveSccUseRemoval2")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
JUMP bb_1
@ -1613,7 +1613,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "IntNumIntPeepholes")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_INT R0
%1 = LOAD_INT R1
@ -1649,7 +1649,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "InvalidateReglinkVersion")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R2, tstring
%1 = LOAD_TVALUE R2
@ -1710,7 +1710,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SimplePathExtraction")
constPropInBlockChains(build, true);
createLinearBlocks(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R2
CHECK_TAG %0, tnumber, bb_fallback_1
@ -1786,7 +1786,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPathExtractionForBlocksWithLiveOutValues"
constPropInBlockChains(build, true);
createLinearBlocks(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TAG R2
CHECK_TAG %0, tnumber, bb_fallback_1
@ -1838,7 +1838,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "InfiniteLoopInPathAnalysis")
constPropInBlockChains(build, true);
createLinearBlocks(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
JUMP bb_1
@ -1867,7 +1867,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "PartialStoreInvalidation")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TVALUE R0
STORE_TVALUE R1, %0
@ -1895,7 +1895,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "VaridicRegisterRangeInvalidation")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R2, tnumber
FALLBACK_GETVARARGS 0u, R1, -1i
@ -1921,7 +1921,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "LoadPropagatesOnlyRightType")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_INT R0, 2i
%1 = LOAD_DOUBLE R0
@ -1967,7 +1967,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateHashSlotChecks")
// In the future, we might even see duplicate identical TValue loads go away
// In the future, we might even see loads of different VM regs with the same value go away
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
%1 = GET_SLOT_NODE_ADDR %0, 3u, K1
@ -2031,7 +2031,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateHashSlotChecksAvoidNil")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
%1 = GET_SLOT_NODE_ADDR %0, 3u, K1
@ -2092,7 +2092,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateArrayElemChecksSameIndex")
// In the future, we might even see duplicate identical TValue loads go away
// In the future, we might even see loads of different VM regs with the same value go away
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
CHECK_ARRAY_SIZE %0, 0i, bb_fallback_1
@ -2152,7 +2152,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateArrayElemChecksSameValue")
// In the future, we might even see duplicate identical TValue loads go away
// In the future, we might even see loads of different VM regs with the same value go away
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
%1 = LOAD_DOUBLE R2
@ -2208,7 +2208,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateArrayElemChecksLowerIndex")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
CHECK_ARRAY_SIZE %0, 1i, bb_fallback_1
@ -2264,7 +2264,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateArrayElemChecksInvalidations")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
CHECK_ARRAY_SIZE %0, 0i, bb_fallback_1
@ -2320,7 +2320,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ArrayElemChecksNegativeIndex")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R1
CHECK_ARRAY_SIZE %0, 0i, bb_fallback_1
@ -2382,7 +2382,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateBufferLengthChecks")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TVALUE R0
STORE_TVALUE R2, %0
@ -2428,7 +2428,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "BufferLenghtChecksNegativeIndex")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_TVALUE R0
STORE_TVALUE R2, %0
@ -2468,7 +2468,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SimpleDiamond")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1, bb_2
; in regs: R0, R1, R2, R3
@ -2518,7 +2518,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ImplicitFixedRegistersInVarargCall")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1
; in regs: R0, R1, R2
@ -2553,7 +2553,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ExplicitUseOfRegisterInVarargSequence")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1
; out regs: R0...
@ -2586,7 +2586,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "VariadicSequenceRestart")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1
; in regs: R0, R1
@ -2625,7 +2625,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FallbackDoesNotFlowUp")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_fallback_1, bb_2
; in regs: R0
@ -2677,7 +2677,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "VariadicSequencePeeling")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1, bb_2
; in regs: R0, R1
@ -2730,7 +2730,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "BuiltinVariadicStart")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; successors: bb_1
; in regs: R0
@ -2760,7 +2760,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SetTable")
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; in regs: R0, R1
SET_TABLE R0, R1, 1u
@ -2839,7 +2839,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RemoveDuplicateCalculation")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
%1 = UNM_NUM %0
@ -2875,7 +2875,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "LateTableStateLink")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = DUP_TABLE R0
STORE_POINTER R0, %0
@ -2906,7 +2906,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RegisterVersioning")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
%1 = UNM_NUM %0
@ -2935,7 +2935,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SetListIsABlocker")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
SETLIST
@ -2964,7 +2964,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "CallIsABlocker")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
CALL R1, 1i, R2, 1i
@ -2993,7 +2993,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPropagationOfCapturedRegs")
computeCfgInfo(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
; captured regs: R0
bb_0:
@ -3025,7 +3025,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoDeadLoadReuse")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%4 = LOAD_DOUBLE R0
%5 = ADD_NUM 0, %4
@ -3052,7 +3052,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoDeadValueReuse")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
%3 = NUM_TO_INT %0
@ -3094,7 +3094,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TValueLoadToSplitStore")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_DOUBLE R0
%1 = ADD_NUM %0, 4
@ -3127,7 +3127,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagStoreUpdatesValueVersion")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
%0 = LOAD_POINTER R0
STORE_POINTER R1, %0
@ -3155,7 +3155,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagStoreUpdatesSetUpval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_TAG R0, tnumber
STORE_DOUBLE R0, 0.5
@ -3186,7 +3186,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "TagSelfEqualityCheckRemoval")
updateUseCounts(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
JUMP bb_1

View file

@ -14,6 +14,8 @@
LUAU_FASTFLAG(LuauFixDivrkInference)
LUAU_FASTFLAG(LuauCompileRevK)
LUAU_FASTFLAG(LuauCodegenVector)
LUAU_FASTFLAG(LuauCodegenMathMemArgs)
static std::string getCodegenAssembly(const char* source)
{
@ -27,10 +29,10 @@ static std::string getCodegenAssembly(const char* source)
options.includeIr = true;
options.includeOutlinedCode = false;
options.includeIrPrefix = false;
options.includeUseInfo = false;
options.includeCfgInfo = false;
options.includeRegFlowInfo = false;
options.includeIrPrefix = Luau::CodeGen::IncludeIrPrefix::No;
options.includeUseInfo = Luau::CodeGen::IncludeUseInfo::No;
options.includeCfgInfo = Luau::CodeGen::IncludeCfgInfo::No;
options.includeRegFlowInfo = Luau::CodeGen::IncludeRegFlowInfo::No;
Luau::Allocator allocator;
Luau::AstNameTable names(allocator);
@ -66,6 +68,7 @@ TEST_CASE("VectorReciprocal")
{
ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true};
ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true};
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vecrcp(a: vector)
@ -80,11 +83,238 @@ bb_0:
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
JUMP bb_fallback_3
bb_4:
%6 = NUM_TO_VECTOR 1
%7 = LOAD_TVALUE R0
%8 = DIV_VEC %6, %7
STORE_TVALUE R1, %8
INTERRUPT 1u
RETURN R1, 1i
)");
}
TEST_CASE("VectorComponentRead")
{
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function compsum(a: vector)
return a.X + a.Y + a.Z
end
)"),
R"(
; function compsum($arg0) line 2
bb_0:
CHECK_TAG R0, tvector, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
%6 = LOAD_FLOAT R0, 0i
STORE_DOUBLE R3, %6
STORE_TAG R3, tnumber
%11 = LOAD_FLOAT R0, 4i
STORE_DOUBLE R4, %11
STORE_TAG R4, tnumber
%20 = ADD_NUM %6, %11
STORE_DOUBLE R2, %20
STORE_TAG R2, tnumber
%25 = LOAD_FLOAT R0, 8i
STORE_DOUBLE R3, %25
%34 = ADD_NUM %20, %25
STORE_DOUBLE R1, %34
STORE_TAG R1, tnumber
INTERRUPT 8u
RETURN R1, 1i
)");
}
TEST_CASE("VectorAdd")
{
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3add(a: vector, b: vector)
return a + b
end
)"),
R"(
; function vec3add($arg0, $arg1) line 2
bb_0:
CHECK_TAG R0, tvector, exit(entry)
CHECK_TAG R1, tvector, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
%10 = LOAD_TVALUE R0
%11 = LOAD_TVALUE R1
%12 = ADD_VEC %10, %11
STORE_TVALUE R2, %12
INTERRUPT 1u
RETURN R2, 1i
)");
}
TEST_CASE("VectorMinus")
{
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3minus(a: vector)
return -a
end
)"),
R"(
; function vec3minus($arg0) line 2
bb_0:
CHECK_TAG R0, tvector, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
%6 = LOAD_TVALUE R0
%7 = UNM_VEC %6
STORE_TVALUE R1, %7
INTERRUPT 1u
RETURN R1, 1i
)");
}
TEST_CASE("VectorSubMulDiv")
{
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
return a * b - c / d
end
)"),
R"(
; function vec3combo($arg0, $arg1, $arg2, $arg3) line 2
bb_0:
CHECK_TAG R0, tvector, exit(entry)
CHECK_TAG R1, tvector, exit(entry)
CHECK_TAG R2, tvector, exit(entry)
CHECK_TAG R3, tvector, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
%14 = LOAD_TVALUE R0
%15 = LOAD_TVALUE R1
%16 = MUL_VEC %14, %15
STORE_TVALUE R5, %16
%22 = LOAD_TVALUE R2
%23 = LOAD_TVALUE R3
%24 = DIV_VEC %22, %23
STORE_TVALUE R6, %24
%32 = SUB_VEC %16, %24
STORE_TVALUE R4, %32
INTERRUPT 3u
RETURN R4, 1i
)");
}
TEST_CASE("VectorMulDivMixed")
{
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true};
ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
return a * 2 + b / 4 + 0.5 * c + 40 / d
end
)"),
R"(
; function vec3combo($arg0, $arg1, $arg2, $arg3) line 2
bb_0:
CHECK_TAG R0, tvector, exit(entry)
CHECK_TAG R1, tvector, exit(entry)
CHECK_TAG R2, tvector, exit(entry)
CHECK_TAG R3, tvector, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
%12 = LOAD_TVALUE R0
%13 = NUM_TO_VECTOR 2
%14 = MUL_VEC %12, %13
STORE_TVALUE R7, %14
%18 = LOAD_TVALUE R1
%19 = NUM_TO_VECTOR 4
%20 = DIV_VEC %18, %19
STORE_TVALUE R8, %20
%28 = ADD_VEC %14, %20
STORE_TVALUE R6, %28
STORE_DOUBLE R8, 0.5
STORE_TAG R8, tnumber
%37 = NUM_TO_VECTOR 0.5
%38 = LOAD_TVALUE R2
%39 = MUL_VEC %37, %38
STORE_TVALUE R7, %39
%47 = ADD_VEC %28, %39
STORE_TVALUE R5, %47
%51 = NUM_TO_VECTOR 40
%52 = LOAD_TVALUE R3
%53 = DIV_VEC %51, %52
STORE_TVALUE R6, %53
%61 = ADD_VEC %47, %53
STORE_TVALUE R4, %61
INTERRUPT 8u
RETURN R4, 1i
)");
}
TEST_CASE("ExtraMathMemoryOperands")
{
ScopedFastFlag luauCodegenMathMemArgs{FFlag::LuauCodegenMathMemArgs, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function foo(a: number, b: number, c: number, d: number, e: number)
return math.floor(a) + math.ceil(b) + math.round(c) + math.sqrt(d) + math.abs(e)
end
)"),
R"(
; function foo($arg0, $arg1, $arg2, $arg3, $arg4) line 2
bb_0:
CHECK_TAG R0, tnumber, exit(entry)
CHECK_TAG R1, tnumber, exit(entry)
CHECK_TAG R2, tnumber, exit(entry)
CHECK_TAG R3, tnumber, exit(entry)
CHECK_TAG R4, tnumber, exit(entry)
JUMP bb_2
bb_2:
JUMP bb_bytecode_1
bb_bytecode_1:
CHECK_SAFE_ENV exit(1)
%16 = FLOOR_NUM R0
STORE_DOUBLE R9, %16
STORE_TAG R9, tnumber
%23 = CEIL_NUM R1
STORE_DOUBLE R10, %23
STORE_TAG R10, tnumber
%32 = ADD_NUM %16, %23
STORE_DOUBLE R8, %32
STORE_TAG R8, tnumber
%39 = ROUND_NUM R2
STORE_DOUBLE R9, %39
%48 = ADD_NUM %32, %39
STORE_DOUBLE R7, %48
STORE_TAG R7, tnumber
%55 = SQRT_NUM R3
STORE_DOUBLE R8, %55
%64 = ADD_NUM %48, %55
STORE_DOUBLE R6, %64
STORE_TAG R6, tnumber
%71 = ABS_NUM R4
STORE_DOUBLE R7, %71
%80 = ADD_NUM %64, %71
STORE_DOUBLE R5, %80
STORE_TAG R5, tnumber
INTERRUPT 29u
RETURN R5, 1i
)");
}
TEST_SUITE_END();

View file

@ -579,10 +579,10 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "keyof_rfc_example")
LUAU_REQUIRE_ERROR_COUNT(1, result);
TypePackMismatch* tpm = get<TypePackMismatch>(result.errors[0]);
REQUIRE(tpm);
CHECK_EQ("\"cat\" | \"dog\" | \"monkey\" | \"fox\"", toString(tpm->wantedTp));
CHECK_EQ("\"cactus\"", toString(tpm->givenTp));
TypeMismatch* tm = get<TypeMismatch>(result.errors[0]);
REQUIRE(tm);
CHECK_EQ("\"cat\" | \"dog\" | \"fox\" | \"monkey\"", toString(tm->wantedType));
CHECK_EQ("\"cactus\"", toString(tm->givenType));
}
TEST_SUITE_END();

View file

@ -15,6 +15,7 @@
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauOkWithIteratingOverTableProperties)
TEST_SUITE_BEGIN("TypeInferLoops");
@ -575,6 +576,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "unreachable_code_after_infinite_loop")
TEST_CASE_FIXTURE(BuiltinsFixture, "loop_typecheck_crash_on_empty_optional")
{
ScopedFastFlag sff{FFlag::LuauOkWithIteratingOverTableProperties, true};
CheckResult result = check(R"(
local t = {}
for _ in t do
@ -583,7 +586,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "loop_typecheck_crash_on_empty_optional")
end
)");
LUAU_REQUIRE_ERROR_COUNT(2, result);
LUAU_REQUIRE_ERROR_COUNT(1, result);
}
TEST_CASE_FIXTURE(Fixture, "fuzz_fail_missing_instantitation_follow")
@ -624,7 +627,22 @@ TEST_CASE_FIXTURE(Fixture, "loop_iter_basic")
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(*builtinTypes->numberType, *requireType("key"));
// The old solver just infers the wrong type here.
// The right type for `key` is `number?`
if (FFlag::DebugLuauDeferredConstraintResolution)
{
TypeId keyTy = requireType("key");
const UnionType* ut = get<UnionType>(keyTy);
REQUIRE(ut);
REQUIRE(ut->options.size() == 2);
CHECK_EQ(builtinTypes->nilType, ut->options[0]);
CHECK_EQ(*builtinTypes->numberType, *ut->options[1]);
}
else
CHECK_EQ(*builtinTypes->numberType, *requireType("key"));
}
TEST_CASE_FIXTURE(Fixture, "loop_iter_trailing_nil")
@ -643,17 +661,15 @@ TEST_CASE_FIXTURE(Fixture, "loop_iter_trailing_nil")
TEST_CASE_FIXTURE(Fixture, "loop_iter_no_indexer_strict")
{
ScopedFastFlag sff{FFlag::LuauOkWithIteratingOverTableProperties, true};
CheckResult result = check(R"(
local t = {}
for k, v in t do
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
GenericError* ge = get<GenericError>(result.errors[0]);
REQUIRE(ge);
CHECK_EQ("Cannot iterate over a table without indexer", ge->message);
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "loop_iter_no_indexer_nonstrict")
@ -786,6 +802,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "cli_68448_iterators_need_not_accept_nil")
TEST_CASE_FIXTURE(Fixture, "iterate_over_free_table")
{
ScopedFastFlag sff{FFlag::LuauOkWithIteratingOverTableProperties, true};
CheckResult result = check(R"(
function print(x) end
@ -798,12 +816,7 @@ TEST_CASE_FIXTURE(Fixture, "iterate_over_free_table")
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
GenericError* ge = get<GenericError>(result.errors[0]);
REQUIRE(ge);
CHECK("Cannot iterate over a table without indexer" == ge->message);
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_explore_raycast_minimization")
@ -934,4 +947,59 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_iteration_on_never_gives_never")
CHECK(toString(requireType("ans")) == "never");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "iterate_over_properties")
{
ScopedFastFlag sff{FFlag::LuauOkWithIteratingOverTableProperties, true};
CheckResult result = check(R"(
local function f()
local t = { p = 5, q = "hello" }
for k, v in t do
return k, v
end
error("")
end
local k, v = f()
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("unknown", toString(requireType("k")));
CHECK_EQ("unknown", toString(requireType("v")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "iterate_over_properties_nonstrict")
{
ScopedFastFlag sff{FFlag::LuauOkWithIteratingOverTableProperties, true};
CheckResult result = check(R"(
--!nonstrict
local function f()
local t = { p = 5, q = "hello" }
for k, v in t do
return k, v
end
error("")
end
local k, v = f()
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "lti_fuzzer_uninitialized_loop_crash")
{
CheckResult result = check(R"(
for l0=_,_ do
return _()
end
)");
LUAU_REQUIRE_ERROR_COUNT(3, result);
}
TEST_SUITE_END();

View file

@ -219,7 +219,7 @@ TEST_CASE_FIXTURE(Fixture, "inferred_methods_of_free_tables_have_the_same_level_
check(R"(
function Base64FileReader(data)
local reader = {}
local index: number
local index: number = 0
function reader:PeekByte()
return data:byte(index)

View file

@ -1933,6 +1933,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknown_to_table")
return i, v
end
end
error("")
end
)");

View file

@ -369,8 +369,12 @@ TEST_CASE_FIXTURE(TypeStateFixture, "prototyped_recursive_functions")
CHECK("(() -> ())?" == toString(requireType("f")));
}
TEST_CASE_FIXTURE(TypeStateFixture, "prototyped_recursive_functions_but_has_future_assignments")
TEST_CASE_FIXTURE(BuiltinsFixture, "prototyped_recursive_functions_but_has_future_assignments")
{
// early return if the flag isn't set since this is blocking gated commits
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local f
function f()

View file

@ -75,4 +75,37 @@ function infloop10()
end
end
local haystack = string.rep("x", 100)
local pattern = string.rep("x?", 100) .. string.rep("x", 100)
function strhang1()
string.find(haystack, pattern)
end
function strhang2()
string.match(haystack, pattern)
end
function strhang3()
string.gsub(haystack, pattern, "%0")
end
function strhang4()
for k, v in string.gmatch(haystack, pattern) do
end
end
function strhang5()
local x = string.rep('x', 1000)
string.match(x, string.rep('x.*', 100) .. 'y')
end
function strhangpcall()
for i = 1,100 do
local status, msg = pcall(string.find, haystack, pattern)
assert(status == false)
assert(msg == "timeout")
end
end
return "OK"

View file

@ -293,4 +293,28 @@ end
assert(loopIteratorProtocol(0, table.create(100, 5)) == 5058)
local function vec3compsum(a: vector)
return a.X + a.Y + a.Z
end
assert(vec3compsum(vector(1, 2, 4)) == 7.0)
local function vec3add(a: vector, b: vector) return a + b end
local function vec3sub(a: vector, b: vector) return a - b end
local function vec3mul(a: vector, b: vector) return a * b end
local function vec3div(a: vector, b: vector) return a / b end
local function vec3neg(a: vector) return -a end
assert(vec3add(vector(10, 20, 40), vector(1, 0, 2)) == vector(11, 20, 42))
assert(vec3sub(vector(10, 20, 40), vector(1, 0, 2)) == vector(9, 20, 38))
assert(vec3mul(vector(10, 20, 40), vector(1, 0, 2)) == vector(10, 0, 80))
assert(vec3div(vector(10, 20, 40), vector(1, 0, 2)) == vector(10, math.huge, 20))
assert(vec3neg(vector(10, 20, 40)) == vector(-10, -20, -40))
local function vec3mulnum(a: vector, b: number) return a * b end
local function vec3mulconst(a: vector) return a * 4 end
assert(vec3mulnum(vector(10, 20, 40), 4) == vector(40, 80, 160))
assert(vec3mulconst(vector(10, 20, 40), 4) == vector(40, 80, 160))
return('OK')

View file

@ -48,7 +48,6 @@ BuiltinTests.setmetatable_should_not_mutate_persisted_types
BuiltinTests.sort
BuiltinTests.sort_with_bad_predicate
BuiltinTests.sort_with_predicate
BuiltinTests.string_format_arg_count_mismatch
BuiltinTests.string_format_as_method
BuiltinTests.string_format_correctly_ordered_types
BuiltinTests.string_format_report_all_type_errors_at_correct_positions
@ -59,10 +58,6 @@ BuiltinTests.table_freeze_is_generic
BuiltinTests.table_insert_correctly_infers_type_of_array_2_args_overload
BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload
BuiltinTests.tonumber_returns_optional_number_type
BuiltinTests.xpcall
ControlFlowAnalysis.for_record_do_if_not_x_break
ControlFlowAnalysis.for_record_do_if_not_x_continue
ControlFlowAnalysis.if_not_x_break
ControlFlowAnalysis.if_not_x_break_elif_not_y_break
ControlFlowAnalysis.if_not_x_break_elif_not_y_continue
ControlFlowAnalysis.if_not_x_break_elif_not_y_fallthrough_elif_not_z_break
@ -70,7 +65,6 @@ ControlFlowAnalysis.if_not_x_break_elif_rand_break_elif_not_y_break
ControlFlowAnalysis.if_not_x_break_elif_rand_break_elif_not_y_fallthrough
ControlFlowAnalysis.if_not_x_break_if_not_y_break
ControlFlowAnalysis.if_not_x_break_if_not_y_continue
ControlFlowAnalysis.if_not_x_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_fallthrough_elif_not_z_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_throw_elif_not_z_fallthrough
@ -80,13 +74,7 @@ ControlFlowAnalysis.if_not_x_continue_if_not_y_continue
ControlFlowAnalysis.if_not_x_continue_if_not_y_throw
ControlFlowAnalysis.if_not_x_return_elif_not_y_break
ControlFlowAnalysis.if_not_x_return_elif_not_y_fallthrough_elif_not_z_break
ControlFlowAnalysis.prototyping_and_visiting_alias_has_the_same_scope_breaking
ControlFlowAnalysis.prototyping_and_visiting_alias_has_the_same_scope_continuing
ControlFlowAnalysis.tagged_unions
ControlFlowAnalysis.tagged_unions_breaking
ControlFlowAnalysis.tagged_unions_continuing
ControlFlowAnalysis.type_alias_does_not_leak_out_breaking
ControlFlowAnalysis.type_alias_does_not_leak_out_continuing
DefinitionTests.class_definition_indexer
DefinitionTests.class_definition_overload_metamethods
DefinitionTests.class_definition_string_props
@ -118,7 +106,6 @@ FrontendTest.it_should_be_safe_to_stringify_errors_when_full_type_graph_is_disca
FrontendTest.nocheck_cycle_used_by_checked
FrontendTest.trace_requires_in_nonstrict_mode
GenericsTests.apply_type_function_nested_generics1
GenericsTests.apply_type_function_nested_generics2
GenericsTests.better_mismatch_error_messages
GenericsTests.bound_tables_do_not_clone_original_fields
GenericsTests.check_generic_function
@ -129,7 +116,6 @@ GenericsTests.check_nested_generic_function
GenericsTests.check_recursive_generic_function
GenericsTests.correctly_instantiate_polymorphic_member_functions
GenericsTests.do_not_always_instantiate_generic_intersection_types
GenericsTests.do_not_infer_generic_functions
GenericsTests.dont_substitute_bound_types
GenericsTests.error_detailed_function_mismatch_generic_pack
GenericsTests.error_detailed_function_mismatch_generic_types
@ -147,7 +133,6 @@ GenericsTests.generic_type_pack_unification3
GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments
GenericsTests.hof_subtype_instantiation_regression
GenericsTests.infer_generic_function
GenericsTests.infer_generic_function_function_argument
GenericsTests.infer_generic_function_function_argument_2
GenericsTests.infer_generic_function_function_argument_3
GenericsTests.infer_generic_function_function_argument_overloaded
@ -156,6 +141,7 @@ GenericsTests.infer_generic_local_function
GenericsTests.infer_generic_property
GenericsTests.infer_nested_generic_function
GenericsTests.inferred_local_vars_can_be_polytypes
GenericsTests.instantiate_cyclic_generic_function
GenericsTests.instantiated_function_argument_names
GenericsTests.local_vars_can_be_polytypes
GenericsTests.no_stack_overflow_from_quantifying
@ -166,6 +152,7 @@ GenericsTests.rank_N_types_via_typeof
GenericsTests.self_recursive_instantiated_param
GenericsTests.type_parameters_can_be_polytypes
GenericsTests.typefuns_sharing_types
IntersectionTypes.CLI-44817
IntersectionTypes.error_detailed_intersection_all
IntersectionTypes.error_detailed_intersection_part
IntersectionTypes.intersect_bool_and_false
@ -231,7 +218,6 @@ ProvisionalTests.table_insert_with_a_singleton_argument
ProvisionalTests.table_unification_infinite_recursion
ProvisionalTests.typeguard_inference_incomplete
ProvisionalTests.while_body_are_also_refined
ProvisionalTests.xpcall_returns_what_f_returns
RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined
RefinementTest.dataflow_analysis_can_tell_refinements_when_its_appropriate_to_refine_into_nil_or_never
@ -243,6 +229,7 @@ RefinementTest.else_with_no_explicit_expression_should_also_refine_the_tagged_un
RefinementTest.fail_to_refine_a_property_of_subscript_expression
RefinementTest.falsiness_of_TruthyPredicate_narrows_into_nil
RefinementTest.function_call_with_colon_after_refining_not_to_be_nil
RefinementTest.globals_can_be_narrowed_too
RefinementTest.impossible_type_narrow_is_not_an_error
RefinementTest.index_on_a_refined_property
RefinementTest.isa_type_refinement_must_be_known_ahead_of_time
@ -259,6 +246,7 @@ RefinementTest.refinements_should_preserve_error_suppression
RefinementTest.string_not_equal_to_string_or_nil
RefinementTest.truthy_constraint_on_properties
RefinementTest.type_annotations_arent_relevant_when_doing_dataflow_analysis
RefinementTest.type_guard_narrowed_into_nothingness
RefinementTest.type_narrow_to_vector
RefinementTest.typeguard_cast_free_table_to_vector
RefinementTest.typeguard_in_assert_position
@ -303,6 +291,7 @@ TableTests.generic_table_instantiation_potential_regression
TableTests.indexer_mismatch
TableTests.indexers_get_quantified_too
TableTests.inequality_operators_imply_exactly_matching_types
TableTests.infer_indexer_from_array_like_table
TableTests.infer_indexer_from_its_variable_type_and_unifiable
TableTests.inferred_return_type_of_free_table
TableTests.instantiate_table_cloning_3
@ -377,7 +366,6 @@ ToString.toStringNamedFunction_generic_pack
ToString.toStringNamedFunction_map
TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType
TryUnifyTests.result_of_failed_typepack_unification_is_constrained
TryUnifyTests.typepack_unification_should_trim_free_tails
TryUnifyTests.uninhabited_table_sub_anything
TryUnifyTests.uninhabited_table_sub_never
TryUnifyTests.variadics_should_use_reversed_properly
@ -402,12 +390,10 @@ TypeFamilyTests.family_as_fn_arg
TypeFamilyTests.family_as_fn_ret
TypeFamilyTests.function_internal_families
TypeFamilyTests.internal_families_raise_errors
TypeFamilyTests.keyof_rfc_example
TypeFamilyTests.table_internal_families
TypeFamilyTests.type_families_inhabited_with_normalization
TypeFamilyTests.unsolvable_family
TypeInfer.bidirectional_checking_of_callback_property
TypeInfer.check_expr_recursion_limit
TypeInfer.check_type_infer_recursion_count
TypeInfer.checking_should_not_ice
TypeInfer.cli_39932_use_unifier_in_ensure_methods
@ -422,7 +408,6 @@ TypeInfer.infer_assignment_value_types
TypeInfer.infer_locals_via_assignment_from_its_call_site
TypeInfer.infer_through_group_expr
TypeInfer.infer_type_assertion_value_type
TypeInfer.no_infinite_loop_when_trying_to_unify_uh_this
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.promote_tail_type_packs
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter
@ -447,7 +432,6 @@ TypeInferAnyError.intersection_of_any_can_have_props
TypeInferAnyError.metatable_of_any_can_be_a_table
TypeInferAnyError.quantify_any_does_not_bind_to_itself
TypeInferAnyError.replace_every_free_type_when_unifying_a_complex_function_with_any
TypeInferAnyError.type_error_addition
TypeInferClasses.callable_classes
TypeInferClasses.cannot_unify_class_instance_with_primitive
TypeInferClasses.class_type_mismatch_with_name_conflict
@ -512,7 +496,6 @@ TypeInferFunctions.param_1_and_2_both_takes_the_same_generic_but_their_arguments
TypeInferFunctions.record_matching_overload
TypeInferFunctions.regex_benchmark_string_format_minimization
TypeInferFunctions.report_exiting_without_return_nonstrict
TypeInferFunctions.report_exiting_without_return_strict
TypeInferFunctions.return_type_by_overload
TypeInferFunctions.too_few_arguments_variadic
TypeInferFunctions.too_few_arguments_variadic_generic
@ -537,21 +520,21 @@ TypeInferLoops.for_in_loop_with_incompatible_args_to_iterator
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_in_with_an_iterator_of_type_any
TypeInferLoops.for_in_with_generic_next
TypeInferLoops.for_in_with_just_one_iterator_is_ok
TypeInferLoops.for_loop
TypeInferLoops.ipairs_produces_integral_indices
TypeInferLoops.iterate_over_free_table
TypeInferLoops.iterate_over_properties
TypeInferLoops.iteration_no_table_passed
TypeInferLoops.iteration_regression_issue_69967
TypeInferLoops.iteration_regression_issue_69967_alt
TypeInferLoops.loop_iter_basic
TypeInferLoops.loop_iter_metamethod_nil
TypeInferLoops.loop_iter_metamethod_ok
TypeInferLoops.loop_iter_metamethod_ok_with_inference
TypeInferLoops.loop_iter_no_indexer_strict
TypeInferLoops.loop_iter_trailing_nil
TypeInferLoops.loop_typecheck_crash_on_empty_optional
TypeInferLoops.properly_infer_iteratee_is_a_free_table
TypeInferLoops.repeat_loop
TypeInferLoops.unreachable_code_after_infinite_loop
TypeInferLoops.varlist_declared_by_for_in_loop_should_be_free
TypeInferLoops.while_loop
TypeInferModules.bound_free_table_export_is_ok
@ -559,12 +542,12 @@ TypeInferModules.custom_require_global
TypeInferModules.do_not_modify_imported_types
TypeInferModules.do_not_modify_imported_types_5
TypeInferModules.require
TypeInferModules.require_failed_module
TypeInferOOP.CheckMethodsOfSealed
TypeInferOOP.cycle_between_object_constructor_and_alias
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_another_overload_works
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_not_defined_with_colon
TypeInferOOP.inferred_methods_of_free_tables_have_the_same_level_as_the_enclosing_table
TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
TypeInferOOP.method_depends_on_table
TypeInferOOP.methods_are_topologically_sorted
@ -582,7 +565,6 @@ TypeInferOperators.equality_operations_succeed_if_any_union_branch_succeeds
TypeInferOperators.error_on_invalid_operand_types_to_relational_operators2
TypeInferOperators.luau_polyfill_is_array
TypeInferOperators.mm_comparisons_must_return_a_boolean
TypeInferOperators.normalize_strings_comparison
TypeInferOperators.operator_eq_verifies_types_do_intersect
TypeInferOperators.reducing_and
TypeInferOperators.refine_and_or
@ -615,12 +597,10 @@ TypePackTests.unify_variadic_tails_in_arguments
TypeSingletons.enums_using_singletons_mismatch
TypeSingletons.error_detailed_tagged_union_mismatch_bool
TypeSingletons.error_detailed_tagged_union_mismatch_string
TypeSingletons.function_args_infer_singletons
TypeSingletons.function_call_with_singletons
TypeSingletons.function_call_with_singletons_mismatch
TypeSingletons.return_type_of_f_is_not_widened
TypeSingletons.table_properties_type_error_escapes
TypeSingletons.widen_the_supertype_if_it_is_free_and_subtype_has_singleton
TypeStatesTest.prototyped_recursive_functions_but_has_future_assignments
UnionTypes.error_detailed_optional
UnionTypes.error_detailed_union_all
UnionTypes.generic_function_with_optional_arg