Sync to upstream/release/626 (#1258)

### New Type Solver

* Fixed crash in numeric binary operation type families
* Results of an indexing operation are now comparable to `nil` without a
false positive error
* Fixed a crash when a type that failed normalization was accessed
* Iterating on a free value now implies that it is iterable

---

### Internal Contributors

Co-authored-by: Aaron Weiss <aaronweiss@roblox.com>
Co-authored-by: Alexander McCord <amccord@roblox.com>
Co-authored-by: James McNellis <jmcnellis@roblox.com>
Co-authored-by: Vighnesh Vijay <vvijay@roblox.com>
This commit is contained in:
vegorov-rbx 2024-05-16 16:02:03 -07:00 committed by GitHub
parent 2a80f5e1d1
commit fe0a819472
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 867 additions and 1125 deletions

View file

@ -373,7 +373,8 @@ private:
*/
std::vector<std::optional<TypeId>> getExpectedCallTypesForFunctionOverloads(const TypeId fnType);
TypeId createFamilyInstance(TypeFamilyInstanceType instance, const ScopePtr& scope, Location location);
TypeId createTypeFamilyInstance(
const TypeFamily& family, std::vector<TypeId> typeArguments, std::vector<TypePackId> packArguments, const ScopePtr& scope, Location location);
};
/** Borrow a vector of pointers from a vector of owning pointers to constraints.

View file

@ -0,0 +1,13 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Scope.h"
#include "Luau/NotNull.h"
#include "Luau/TypeFwd.h"
namespace Luau
{
std::optional<TypeId> generalize(NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, TypeId ty);
}

View file

@ -414,7 +414,7 @@ void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location loca
discriminantTy = arena->addType(NegationType{discriminantTy});
if (eq)
discriminantTy = arena->addTypeFamily(kBuiltinTypeFamilies.singletonFamily, {discriminantTy});
discriminantTy = createTypeFamilyInstance(kBuiltinTypeFamilies.singletonFamily, {discriminantTy}, {}, scope, location);
for (const RefinementKey* key = proposition->key; key; key = key->parent)
{
@ -526,13 +526,7 @@ void ConstraintGenerator::applyRefinements(const ScopePtr& scope, Location locat
{
if (mustDeferIntersection(ty) || mustDeferIntersection(dt))
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.refineFamily},
{ty, dt},
{},
},
scope, location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.refineFamily, {ty, dt}, {}, scope, location);
ty = resultType;
}
@ -2009,35 +2003,17 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprUnary* unary)
{
case AstExprUnary::Op::Not:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.notFamily},
{operandType},
{},
},
scope, unary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.notFamily, {operandType}, {}, scope, unary->location);
return Inference{resultType, refinementArena.negation(refinement)};
}
case AstExprUnary::Op::Len:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.lenFamily},
{operandType},
{},
},
scope, unary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.lenFamily, {operandType}, {}, scope, unary->location);
return Inference{resultType, refinementArena.negation(refinement)};
}
case AstExprUnary::Op::Minus:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.unmFamily},
{operandType},
{},
},
scope, unary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.unmFamily, {operandType}, {}, scope, unary->location);
return Inference{resultType, refinementArena.negation(refinement)};
}
default: // msvc can't prove that this is exhaustive.
@ -2053,168 +2029,96 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprBinary* binar
{
case AstExprBinary::Op::Add:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.addFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.addFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Sub:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.subFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.subFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Mul:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.mulFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.mulFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Div:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.divFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.divFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::FloorDiv:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.idivFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.idivFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Pow:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.powFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.powFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Mod:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.modFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.modFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Concat:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.concatFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.concatFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::And:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.andFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.andFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Or:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.orFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.orFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::CompareLt:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.ltFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.ltFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::CompareGe:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.ltFamily},
{rightType, leftType}, // lua decided that `__ge(a, b)` is instead just `__lt(b, a)`
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.ltFamily,
{rightType, leftType}, // lua decided that `__ge(a, b)` is instead just `__lt(b, a)`
{}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::CompareLe:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.leFamily},
{leftType, rightType},
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.leFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::CompareGt:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.leFamily},
{rightType, leftType}, // lua decided that `__gt(a, b)` is instead just `__le(b, a)`
{},
},
scope, binary->location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.leFamily,
{rightType, leftType}, // lua decided that `__gt(a, b)` is instead just `__le(b, a)`
{}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::CompareEq:
case AstExprBinary::Op::CompareNe:
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.eqFamily},
{leftType, rightType},
{},
},
scope, binary->location);
DefId leftDef = dfg->getDef(binary->left);
DefId rightDef = dfg->getDef(binary->right);
bool leftSubscripted = containsSubscriptedDefinition(leftDef);
bool rightSubscripted = containsSubscriptedDefinition(rightDef);
if (leftSubscripted && rightSubscripted)
{
// we cannot add nil in this case because then we will blindly accept comparisons that we should not.
}
else if (leftSubscripted)
leftType = makeUnion(scope, binary->location, leftType, builtinTypes->nilType);
else if (rightSubscripted)
rightType = makeUnion(scope, binary->location, rightType, builtinTypes->nilType);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.eqFamily, {leftType, rightType}, {}, scope, binary->location);
return Inference{resultType, std::move(refinement)};
}
case AstExprBinary::Op::Op__Count:
@ -3290,26 +3194,14 @@ void ConstraintGenerator::reportCodeTooComplex(Location location)
TypeId ConstraintGenerator::makeUnion(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs)
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.unionFamily},
{lhs, rhs},
{},
},
scope, location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.unionFamily, {lhs, rhs}, {}, scope, location);
return resultType;
}
TypeId ConstraintGenerator::makeIntersect(const ScopePtr& scope, Location location, TypeId lhs, TypeId rhs)
{
TypeId resultType = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.intersectFamily},
{lhs, rhs},
{},
},
scope, location);
TypeId resultType = createTypeFamilyInstance(kBuiltinTypeFamilies.intersectFamily, {lhs, rhs}, {}, scope, location);
return resultType;
}
@ -3387,13 +3279,7 @@ void ConstraintGenerator::fillInInferredBindings(const ScopePtr& globalScope, As
scope->bindings[symbol] = Binding{tys.front(), location};
else
{
TypeId ty = createFamilyInstance(
TypeFamilyInstanceType{
NotNull{&kBuiltinTypeFamilies.unionFamily},
std::move(tys),
{},
},
globalScope, location);
TypeId ty = createTypeFamilyInstance(kBuiltinTypeFamilies.unionFamily, std::move(tys), {}, globalScope, location);
scope->bindings[symbol] = Binding{ty, location};
}
@ -3463,9 +3349,10 @@ std::vector<std::optional<TypeId>> ConstraintGenerator::getExpectedCallTypesForF
return expectedTypes;
}
TypeId ConstraintGenerator::createFamilyInstance(TypeFamilyInstanceType instance, const ScopePtr& scope, Location location)
TypeId ConstraintGenerator::createTypeFamilyInstance(
const TypeFamily& family, std::vector<TypeId> typeArguments, std::vector<TypePackId> packArguments, const ScopePtr& scope, Location location)
{
TypeId result = arena->addType(std::move(instance));
TypeId result = arena->addTypeFamily(family, typeArguments, packArguments);
addConstraint(scope, location, ReduceConstraint{result});
return result;
}

View file

@ -5,6 +5,7 @@
#include "Luau/Common.h"
#include "Luau/ConstraintSolver.h"
#include "Luau/DcrLogger.h"
#include "Luau/Generalization.h"
#include "Luau/Instantiation.h"
#include "Luau/Instantiation2.h"
#include "Luau/Location.h"
@ -577,9 +578,7 @@ bool ConstraintSolver::tryDispatch(const GeneralizationConstraint& c, NotNull<co
std::optional<QuantifierResult> generalized;
Unifier2 u2{NotNull{arena}, builtinTypes, constraint->scope, NotNull{&iceReporter}};
std::optional<TypeId> generalizedTy = u2.generalize(c.sourceType);
std::optional<TypeId> generalizedTy = generalize(NotNull{arena}, builtinTypes, constraint->scope, c.sourceType);
if (generalizedTy)
generalized = QuantifierResult{*generalizedTy}; // FIXME insertedGenerics and insertedGenericPacks
else
@ -609,7 +608,7 @@ bool ConstraintSolver::tryDispatch(const GeneralizationConstraint& c, NotNull<co
for (TypeId ty : c.interiorTypes)
{
u2.generalize(ty);
generalize(NotNull{arena}, builtinTypes, constraint->scope, ty);
unblock(ty, constraint->location);
}
@ -682,7 +681,16 @@ bool ConstraintSolver::tryDispatch(const IterableConstraint& c, NotNull<const Co
TypeId nextTy = follow(iterator.head[0]);
if (get<FreeType>(nextTy))
return block_(nextTy);
{
TypeId keyTy = freshType(arena, builtinTypes, constraint->scope);
TypeId valueTy = freshType(arena, builtinTypes, constraint->scope);
TypeId tableTy = arena->addType(TableType{TableState::Sealed, {}, constraint->scope});
getMutable<TableType>(tableTy)->indexer = TableIndexer{keyTy, valueTy};
pushConstraint(constraint->scope, constraint->location, SubtypeConstraint{nextTy, tableTy});
pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, arena->addTypePack({keyTy, valueTy}), /*resultIsLValue=*/true});
return true;
}
if (get<FunctionType>(nextTy))
{
@ -1924,24 +1932,19 @@ bool ConstraintSolver::tryDispatch(const EqualityConstraint& c, NotNull<const Co
bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force)
{
auto block_ = [&](auto&& t) {
if (force)
{
// TODO: I believe it is the case that, if we are asked to force
// this constraint, then we can do nothing but fail. I'd like to
// find a code sample that gets here.
LUAU_ASSERT(false);
}
else
block(t, constraint);
return false;
};
// We may have to block here if we don't know what the iteratee type is,
// if it's a free table, if we don't know it has a metatable, and so on.
iteratorTy = follow(iteratorTy);
if (get<FreeType>(iteratorTy))
return block_(iteratorTy);
{
TypeId keyTy = freshType(arena, builtinTypes, constraint->scope);
TypeId valueTy = freshType(arena, builtinTypes, constraint->scope);
TypeId tableTy = arena->addType(TableType{TableState::Sealed, {}, constraint->scope});
getMutable<TableType>(tableTy)->indexer = TableIndexer{keyTy, valueTy};
pushConstraint(constraint->scope, constraint->location, SubtypeConstraint{iteratorTy, tableTy});
pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, arena->addTypePack({keyTy, valueTy}), /*resultIsLValue=*/true});
return true;
}
auto unpack = [&](TypeId ty) {
TypePackId variadic = arena->addTypePack(VariadicTypePack{ty});
@ -2752,15 +2755,15 @@ void ConstraintSolver::shiftReferences(TypeId source, TypeId target)
std::optional<TypeId> ConstraintSolver::generalizeFreeType(NotNull<Scope> scope, TypeId type)
{
if (get<FreeType>(type))
TypeId t = follow(type);
if (get<FreeType>(t))
{
auto refCount = unresolvedConstraints.find(type);
auto refCount = unresolvedConstraints.find(t);
if (!refCount || *refCount > 1)
return {};
}
Unifier2 u2{NotNull{arena}, builtinTypes, scope, NotNull{&iceReporter}};
return u2.generalize(type);
return generalize(NotNull{arena}, builtinTypes, scope, type);
}
bool ConstraintSolver::hasUnresolvedConstraints(TypeId ty)

View file

@ -763,7 +763,8 @@ DataFlowResult DataFlowGraphBuilder::visitExpr(DfgScope* scope, AstExprCall* c)
for (AstExpr* arg : c->args)
visitExpr(scope, arg);
return {defArena->freshCell(), nullptr};
// calls should be treated as subscripted.
return {defArena->freshCell(/* subscripted */ true), nullptr};
}
DataFlowResult DataFlowGraphBuilder::visitExpr(DfgScope* scope, AstExprIndexName* i)

View file

@ -2,7 +2,6 @@
#include "Luau/BuiltinDefinitions.h"
LUAU_FASTFLAGVARIABLE(LuauCheckedEmbeddedDefinitions2, false);
LUAU_FASTFLAG(LuauCheckedFunctionSyntax);
namespace Luau
{
@ -452,7 +451,7 @@ std::string getBuiltinDefinitionSource()
std::string result = kBuiltinDefinitionLuaSrc;
// Annotates each non generic function as checked
if (FFlag::LuauCheckedEmbeddedDefinitions2 && FFlag::LuauCheckedFunctionSyntax)
if (FFlag::LuauCheckedEmbeddedDefinitions2)
result = kBuiltinDefinitionLuaSrcChecked;
return result;

View file

@ -0,0 +1,526 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Generalization.h"
#include "Luau/Scope.h"
#include "Luau/Type.h"
#include "Luau/TypeArena.h"
#include "Luau/TypePack.h"
#include "Luau/VisitType.h"
namespace Luau
{
struct MutatingGeneralizer : TypeOnceVisitor
{
NotNull<BuiltinTypes> builtinTypes;
NotNull<Scope> scope;
DenseHashMap<const void*, size_t> positiveTypes;
DenseHashMap<const void*, size_t> negativeTypes;
std::vector<TypeId> generics;
std::vector<TypePackId> genericPacks;
bool isWithinFunction = false;
MutatingGeneralizer(NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<const void*, size_t> positiveTypes,
DenseHashMap<const void*, size_t> negativeTypes)
: TypeOnceVisitor(/* skipBoundTypes */ true)
, builtinTypes(builtinTypes)
, scope(scope)
, positiveTypes(std::move(positiveTypes))
, negativeTypes(std::move(negativeTypes))
{
}
static void replace(DenseHashSet<TypeId>& seen, TypeId haystack, TypeId needle, TypeId replacement)
{
haystack = follow(haystack);
if (seen.find(haystack))
return;
seen.insert(haystack);
if (UnionType* ut = getMutable<UnionType>(haystack))
{
for (auto iter = ut->options.begin(); iter != ut->options.end();)
{
// FIXME: I bet this function has reentrancy problems
TypeId option = follow(*iter);
if (option == needle && get<NeverType>(replacement))
{
iter = ut->options.erase(iter);
continue;
}
if (option == needle)
{
*iter = replacement;
iter++;
continue;
}
// advance the iterator, nothing after this can use it.
iter++;
if (seen.find(option))
continue;
seen.insert(option);
if (get<UnionType>(option))
replace(seen, option, needle, haystack);
else if (get<IntersectionType>(option))
replace(seen, option, needle, haystack);
}
if (ut->options.size() == 1)
{
TypeId onlyType = ut->options[0];
LUAU_ASSERT(onlyType != haystack);
emplaceType<BoundType>(asMutable(haystack), onlyType);
}
return;
}
if (IntersectionType* it = getMutable<IntersectionType>(needle))
{
for (auto iter = it->parts.begin(); iter != it->parts.end();)
{
// FIXME: I bet this function has reentrancy problems
TypeId part = follow(*iter);
if (part == needle && get<UnknownType>(replacement))
{
iter = it->parts.erase(iter);
continue;
}
if (part == needle)
{
*iter = replacement;
iter++;
continue;
}
// advance the iterator, nothing after this can use it.
iter++;
if (seen.find(part))
continue;
seen.insert(part);
if (get<UnionType>(part))
replace(seen, part, needle, haystack);
else if (get<IntersectionType>(part))
replace(seen, part, needle, haystack);
}
if (it->parts.size() == 1)
{
TypeId onlyType = it->parts[0];
LUAU_ASSERT(onlyType != needle);
emplaceType<BoundType>(asMutable(needle), onlyType);
}
return;
}
}
bool visit(TypeId ty, const FunctionType& ft) override
{
const bool oldValue = isWithinFunction;
isWithinFunction = true;
traverse(ft.argTypes);
traverse(ft.retTypes);
isWithinFunction = oldValue;
return false;
}
bool visit(TypeId ty, const FreeType&) override
{
const FreeType* ft = get<FreeType>(ty);
LUAU_ASSERT(ft);
traverse(ft->lowerBound);
traverse(ft->upperBound);
// It is possible for the above traverse() calls to cause ty to be
// transmuted. We must reacquire ft if this happens.
ty = follow(ty);
ft = get<FreeType>(ty);
if (!ft)
return false;
const size_t positiveCount = getCount(positiveTypes, ty);
const size_t negativeCount = getCount(negativeTypes, ty);
if (!positiveCount && !negativeCount)
return false;
const bool hasLowerBound = !get<NeverType>(follow(ft->lowerBound));
const bool hasUpperBound = !get<UnknownType>(follow(ft->upperBound));
DenseHashSet<TypeId> seen{nullptr};
seen.insert(ty);
if (!hasLowerBound && !hasUpperBound)
{
if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
// It is possible that this free type has other free types in its upper
// or lower bounds. If this is the case, we must replace those
// references with never (for the lower bound) or unknown (for the upper
// bound).
//
// If we do not do this, we get tautological bounds like a <: a <: unknown.
else if (positiveCount && !hasUpperBound)
{
TypeId lb = follow(ft->lowerBound);
if (FreeType* lowerFree = getMutable<FreeType>(lb); lowerFree && lowerFree->upperBound == ty)
lowerFree->upperBound = builtinTypes->unknownType;
else
{
DenseHashSet<TypeId> replaceSeen{nullptr};
replace(replaceSeen, lb, ty, builtinTypes->unknownType);
}
if (lb != ty)
emplaceType<BoundType>(asMutable(ty), lb);
else if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
// if the lower bound is the type in question, we don't actually have a lower bound.
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
else
{
TypeId ub = follow(ft->upperBound);
if (FreeType* upperFree = getMutable<FreeType>(ub); upperFree && upperFree->lowerBound == ty)
upperFree->lowerBound = builtinTypes->neverType;
else
{
DenseHashSet<TypeId> replaceSeen{nullptr};
replace(replaceSeen, ub, ty, builtinTypes->neverType);
}
if (ub != ty)
emplaceType<BoundType>(asMutable(ty), ub);
else if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
// if the upper bound is the type in question, we don't actually have an upper bound.
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
return false;
}
size_t getCount(const DenseHashMap<const void*, size_t>& map, const void* ty)
{
if (const size_t* count = map.find(ty))
return *count;
else
return 0;
}
bool visit(TypeId ty, const TableType&) override
{
const size_t positiveCount = getCount(positiveTypes, ty);
const size_t negativeCount = getCount(negativeTypes, ty);
// FIXME: Free tables should probably just be replaced by upper bounds on free types.
//
// eg never <: 'a <: {x: number} & {z: boolean}
if (!positiveCount && !negativeCount)
return true;
TableType* tt = getMutable<TableType>(ty);
LUAU_ASSERT(tt);
tt->state = TableState::Sealed;
return true;
}
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
if (!subsumes(scope, ftp.scope))
return true;
tp = follow(tp);
const size_t positiveCount = getCount(positiveTypes, tp);
const size_t negativeCount = getCount(negativeTypes, tp);
if (1 == positiveCount + negativeCount)
emplaceTypePack<BoundTypePack>(asMutable(tp), builtinTypes->unknownTypePack);
else
{
emplaceTypePack<GenericTypePack>(asMutable(tp), scope);
genericPacks.push_back(tp);
}
return true;
}
};
struct FreeTypeSearcher : TypeVisitor
{
NotNull<Scope> scope;
explicit FreeTypeSearcher(NotNull<Scope> scope)
: TypeVisitor(/*skipBoundTypes*/ true)
, scope(scope)
{
}
enum Polarity
{
Positive,
Negative,
Both,
};
Polarity polarity = Positive;
void flip()
{
switch (polarity)
{
case Positive:
polarity = Negative;
break;
case Negative:
polarity = Positive;
break;
case Both:
break;
}
}
DenseHashSet<const void*> seenPositive{nullptr};
DenseHashSet<const void*> seenNegative{nullptr};
bool seenWithPolarity(const void* ty)
{
switch (polarity)
{
case Positive:
{
if (seenPositive.contains(ty))
return true;
seenPositive.insert(ty);
return false;
}
case Negative:
{
if (seenNegative.contains(ty))
return true;
seenNegative.insert(ty);
return false;
}
case Both:
{
if (seenPositive.contains(ty) && seenNegative.contains(ty))
return true;
seenPositive.insert(ty);
seenNegative.insert(ty);
return false;
}
}
return false;
}
// The keys in these maps are either TypeIds or TypePackIds. It's safe to
// mix them because we only use these pointers as unique keys. We never
// indirect them.
DenseHashMap<const void*, size_t> negativeTypes{0};
DenseHashMap<const void*, size_t> positiveTypes{0};
bool visit(TypeId ty) override
{
if (seenWithPolarity(ty))
return false;
LUAU_ASSERT(ty);
return true;
}
bool visit(TypeId ty, const FreeType& ft) override
{
if (seenWithPolarity(ty))
return false;
if (!subsumes(scope, ft.scope))
return true;
switch (polarity)
{
case Positive:
positiveTypes[ty]++;
break;
case Negative:
negativeTypes[ty]++;
break;
case Both:
positiveTypes[ty]++;
negativeTypes[ty]++;
break;
}
return true;
}
bool visit(TypeId ty, const TableType& tt) override
{
if (seenWithPolarity(ty))
return false;
if ((tt.state == TableState::Free || tt.state == TableState::Unsealed) && subsumes(scope, tt.scope))
{
switch (polarity)
{
case Positive:
positiveTypes[ty]++;
break;
case Negative:
negativeTypes[ty]++;
break;
case Both:
positiveTypes[ty]++;
negativeTypes[ty]++;
break;
}
}
for (const auto& [_name, prop] : tt.props)
{
if (prop.isReadOnly())
traverse(*prop.readTy);
else
{
LUAU_ASSERT(prop.isShared());
Polarity p = polarity;
polarity = Both;
traverse(prop.type());
polarity = p;
}
}
if (tt.indexer)
{
traverse(tt.indexer->indexType);
traverse(tt.indexer->indexResultType);
}
return false;
}
bool visit(TypeId ty, const FunctionType& ft) override
{
if (seenWithPolarity(ty))
return false;
flip();
traverse(ft.argTypes);
flip();
traverse(ft.retTypes);
return false;
}
bool visit(TypeId, const ClassType&) override
{
return false;
}
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
if (seenWithPolarity(tp))
return false;
if (!subsumes(scope, ftp.scope))
return true;
switch (polarity)
{
case Positive:
positiveTypes[tp]++;
break;
case Negative:
negativeTypes[tp]++;
break;
case Both:
positiveTypes[tp]++;
negativeTypes[tp]++;
break;
}
return true;
}
};
std::optional<TypeId> generalize(NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, TypeId ty)
{
ty = follow(ty);
if (ty->owningArena != arena || ty->persistent)
return ty;
if (const FunctionType* ft = get<FunctionType>(ty); ft && (!ft->generics.empty() || !ft->genericPacks.empty()))
return ty;
FreeTypeSearcher fts{scope};
fts.traverse(ty);
MutatingGeneralizer gen{builtinTypes, scope, std::move(fts.positiveTypes), std::move(fts.negativeTypes)};
gen.traverse(ty);
/* MutatingGeneralizer mutates types in place, so it is possible that ty has
* been transmuted to a BoundType. We must follow it again and verify that
* we are allowed to mutate it before we attach generics to it.
*/
ty = follow(ty);
if (ty->owningArena != arena || ty->persistent)
return ty;
FunctionType* ftv = getMutable<FunctionType>(ty);
if (ftv)
{
ftv->generics = std::move(gen.generics);
ftv->genericPacks = std::move(gen.genericPacks);
}
return ty;
}
} // namespace Luau

View file

@ -1591,7 +1591,6 @@ struct TypeChecker2
functionDeclStack.push_back(inferredFnTy);
std::shared_ptr<const NormalizedType> normalizedFnTy = normalizer.normalize(inferredFnTy);
const FunctionType* inferredFtv = get<FunctionType>(normalizedFnTy->functions.parts.front());
if (!normalizedFnTy)
{
reportError(CodeTooComplex{}, fn->location);
@ -1686,16 +1685,23 @@ struct TypeChecker2
if (fn->returnAnnotation)
visit(*fn->returnAnnotation);
// If the function type has a family annotation, we need to see if we can suggest an annotation
TypeFamilyReductionGuesser guesser{NotNull{&module->internalTypes}, builtinTypes, NotNull{&normalizer}};
for (TypeId retTy : inferredFtv->retTypes)
if (normalizedFnTy)
{
if (get<TypeFamilyInstanceType>(follow(retTy)))
const FunctionType* inferredFtv = get<FunctionType>(normalizedFnTy->functions.parts.front());
LUAU_ASSERT(inferredFtv);
TypeFamilyReductionGuesser guesser{NotNull{&module->internalTypes}, builtinTypes, NotNull{&normalizer}};
for (TypeId retTy : inferredFtv->retTypes)
{
TypeFamilyReductionGuessResult result = guesser.guessTypeFamilyReductionForFunction(*fn, inferredFtv, retTy);
if (result.shouldRecommendAnnotation)
reportError(
ExplicitFunctionAnnotationRecommended{std::move(result.guessedFunctionAnnotations), result.guessedReturnType}, fn->location);
if (get<TypeFamilyInstanceType>(follow(retTy)))
{
TypeFamilyReductionGuessResult result = guesser.guessTypeFamilyReductionForFunction(*fn, inferredFtv, retTy);
if (result.shouldRecommendAnnotation)
reportError(ExplicitFunctionAnnotationRecommended{std::move(result.guessedFunctionAnnotations), result.guessedReturnType},
fn->location);
}
}
}

View file

@ -1507,7 +1507,7 @@ TypeFamilyReductionResult<TypeId> singletonFamilyFn(TypeId instance, NotNull<Typ
TypeId followed = type;
// we want to follow through a negation here as well.
if (auto negation = get<NegationType>(followed))
followed = follow(negation->ty);
followed = follow(negation->ty);
// if we have a singleton type or `nil`, which is its own singleton type...
if (get<SingletonType>(followed) || isNil(followed))

View file

@ -825,315 +825,6 @@ struct FreeTypeSearcher : TypeVisitor
}
};
struct MutatingGeneralizer : TypeOnceVisitor
{
NotNull<BuiltinTypes> builtinTypes;
NotNull<Scope> scope;
DenseHashMap<const void*, size_t> positiveTypes;
DenseHashMap<const void*, size_t> negativeTypes;
std::vector<TypeId> generics;
std::vector<TypePackId> genericPacks;
bool isWithinFunction = false;
MutatingGeneralizer(NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<const void*, size_t> positiveTypes,
DenseHashMap<const void*, size_t> negativeTypes)
: TypeOnceVisitor(/* skipBoundTypes */ true)
, builtinTypes(builtinTypes)
, scope(scope)
, positiveTypes(std::move(positiveTypes))
, negativeTypes(std::move(negativeTypes))
{
}
static void replace(DenseHashSet<TypeId>& seen, TypeId haystack, TypeId needle, TypeId replacement)
{
haystack = follow(haystack);
if (seen.find(haystack))
return;
seen.insert(haystack);
if (UnionType* ut = getMutable<UnionType>(haystack))
{
for (auto iter = ut->options.begin(); iter != ut->options.end();)
{
// FIXME: I bet this function has reentrancy problems
TypeId option = follow(*iter);
if (option == needle && get<NeverType>(replacement))
{
iter = ut->options.erase(iter);
continue;
}
if (option == needle)
{
*iter = replacement;
iter++;
continue;
}
// advance the iterator, nothing after this can use it.
iter++;
if (seen.find(option))
continue;
seen.insert(option);
if (get<UnionType>(option))
replace(seen, option, needle, haystack);
else if (get<IntersectionType>(option))
replace(seen, option, needle, haystack);
}
if (ut->options.size() == 1)
{
TypeId onlyType = ut->options[0];
LUAU_ASSERT(onlyType != haystack);
emplaceType<BoundType>(asMutable(haystack), onlyType);
}
return;
}
if (IntersectionType* it = getMutable<IntersectionType>(needle))
{
for (auto iter = it->parts.begin(); iter != it->parts.end();)
{
// FIXME: I bet this function has reentrancy problems
TypeId part = follow(*iter);
if (part == needle && get<UnknownType>(replacement))
{
iter = it->parts.erase(iter);
continue;
}
if (part == needle)
{
*iter = replacement;
iter++;
continue;
}
// advance the iterator, nothing after this can use it.
iter++;
if (seen.find(part))
continue;
seen.insert(part);
if (get<UnionType>(part))
replace(seen, part, needle, haystack);
else if (get<IntersectionType>(part))
replace(seen, part, needle, haystack);
}
if (it->parts.size() == 1)
{
TypeId onlyType = it->parts[0];
LUAU_ASSERT(onlyType != needle);
emplaceType<BoundType>(asMutable(needle), onlyType);
}
return;
}
}
bool visit(TypeId ty, const FunctionType& ft) override
{
const bool oldValue = isWithinFunction;
isWithinFunction = true;
traverse(ft.argTypes);
traverse(ft.retTypes);
isWithinFunction = oldValue;
return false;
}
bool visit(TypeId ty, const FreeType&) override
{
const FreeType* ft = get<FreeType>(ty);
LUAU_ASSERT(ft);
traverse(ft->lowerBound);
traverse(ft->upperBound);
// It is possible for the above traverse() calls to cause ty to be
// transmuted. We must reacquire ft if this happens.
ty = follow(ty);
ft = get<FreeType>(ty);
if (!ft)
return false;
const size_t positiveCount = getCount(positiveTypes, ty);
const size_t negativeCount = getCount(negativeTypes, ty);
if (!positiveCount && !negativeCount)
return false;
const bool hasLowerBound = !get<NeverType>(follow(ft->lowerBound));
const bool hasUpperBound = !get<UnknownType>(follow(ft->upperBound));
DenseHashSet<TypeId> seen{nullptr};
seen.insert(ty);
if (!hasLowerBound && !hasUpperBound)
{
if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
// It is possible that this free type has other free types in its upper
// or lower bounds. If this is the case, we must replace those
// references with never (for the lower bound) or unknown (for the upper
// bound).
//
// If we do not do this, we get tautological bounds like a <: a <: unknown.
else if (positiveCount && !hasUpperBound)
{
TypeId lb = follow(ft->lowerBound);
if (FreeType* lowerFree = getMutable<FreeType>(lb); lowerFree && lowerFree->upperBound == ty)
lowerFree->upperBound = builtinTypes->unknownType;
else
{
DenseHashSet<TypeId> replaceSeen{nullptr};
replace(replaceSeen, lb, ty, builtinTypes->unknownType);
}
if (lb != ty)
emplaceType<BoundType>(asMutable(ty), lb);
else if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
// if the lower bound is the type in question, we don't actually have a lower bound.
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
else
{
TypeId ub = follow(ft->upperBound);
if (FreeType* upperFree = getMutable<FreeType>(ub); upperFree && upperFree->lowerBound == ty)
upperFree->lowerBound = builtinTypes->neverType;
else
{
DenseHashSet<TypeId> replaceSeen{nullptr};
replace(replaceSeen, ub, ty, builtinTypes->neverType);
}
if (ub != ty)
emplaceType<BoundType>(asMutable(ty), ub);
else if (!isWithinFunction || (positiveCount + negativeCount == 1))
emplaceType<BoundType>(asMutable(ty), builtinTypes->unknownType);
else
{
// if the upper bound is the type in question, we don't actually have an upper bound.
emplaceType<GenericType>(asMutable(ty), scope);
generics.push_back(ty);
}
}
return false;
}
size_t getCount(const DenseHashMap<const void*, size_t>& map, const void* ty)
{
if (const size_t* count = map.find(ty))
return *count;
else
return 0;
}
bool visit(TypeId ty, const TableType&) override
{
const size_t positiveCount = getCount(positiveTypes, ty);
const size_t negativeCount = getCount(negativeTypes, ty);
// FIXME: Free tables should probably just be replaced by upper bounds on free types.
//
// eg never <: 'a <: {x: number} & {z: boolean}
if (!positiveCount && !negativeCount)
return true;
TableType* tt = getMutable<TableType>(ty);
LUAU_ASSERT(tt);
tt->state = TableState::Sealed;
return true;
}
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
if (!subsumes(scope, ftp.scope))
return true;
tp = follow(tp);
const size_t positiveCount = getCount(positiveTypes, tp);
const size_t negativeCount = getCount(negativeTypes, tp);
if (1 == positiveCount + negativeCount)
emplaceTypePack<BoundTypePack>(asMutable(tp), builtinTypes->unknownTypePack);
else
{
emplaceTypePack<GenericTypePack>(asMutable(tp), scope);
genericPacks.push_back(tp);
}
return true;
}
};
std::optional<TypeId> Unifier2::generalize(TypeId ty)
{
ty = follow(ty);
if (ty->owningArena != arena || ty->persistent)
return ty;
if (const FunctionType* ft = get<FunctionType>(ty); ft && (!ft->generics.empty() || !ft->genericPacks.empty()))
return ty;
FreeTypeSearcher fts{scope};
fts.traverse(ty);
MutatingGeneralizer gen{builtinTypes, scope, std::move(fts.positiveTypes), std::move(fts.negativeTypes)};
gen.traverse(ty);
/* MutatingGeneralizer mutates types in place, so it is possible that ty has
* been transmuted to a BoundType. We must follow it again and verify that
* we are allowed to mutate it before we attach generics to it.
*/
ty = follow(ty);
if (ty->owningArena != arena || ty->persistent)
return ty;
FunctionType* ftv = getMutable<FunctionType>(ty);
if (ftv)
{
ftv->generics = std::move(gen.generics);
ftv->genericPacks = std::move(gen.genericPacks);
}
return ty;
}
TypeId Unifier2::mkUnion(TypeId left, TypeId right)
{
left = follow(left);

View file

@ -8,7 +8,6 @@
#include <limits.h>
LUAU_FASTFLAGVARIABLE(LuauLexerLookaheadRemembersBraceType, false)
LUAU_FASTFLAGVARIABLE(LuauCheckedFunctionSyntax, false)
namespace Luau
{
@ -995,17 +994,14 @@ Lexeme Lexer::readNext()
}
case '@':
{
if (FFlag::LuauCheckedFunctionSyntax)
{
// We're trying to lex the token @checked
LUAU_ASSERT(peekch() == '@');
// We're trying to lex the token @checked
LUAU_ASSERT(peekch() == '@');
std::pair<AstName, Lexeme::Type> maybeChecked = readName();
if (maybeChecked.second != Lexeme::ReservedChecked)
return Lexeme(Location(start, position()), Lexeme::Error);
std::pair<AstName, Lexeme::Type> maybeChecked = readName();
if (maybeChecked.second != Lexeme::ReservedChecked)
return Lexeme(Location(start, position()), Lexeme::Error);
return Lexeme(Location(start, position()), maybeChecked.second, maybeChecked.first.value);
}
return Lexeme(Location(start, position()), maybeChecked.second, maybeChecked.first.value);
}
default:
if (isDigit(peekch()))

View file

@ -16,7 +16,6 @@ LUAU_FASTINTVARIABLE(LuauParseErrorLimit, 100)
// Warning: If you are introducing new syntax, ensure that it is behind a separate
// flag so that we don't break production games by reverting syntax changes.
// See docs/SyntaxChanges.md for an explanation.
LUAU_FASTFLAG(LuauCheckedFunctionSyntax)
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
namespace Luau
@ -838,7 +837,7 @@ AstStat* Parser::parseDeclaration(const Location& start)
{
nextLexeme();
bool checkedFunction = false;
if (FFlag::LuauCheckedFunctionSyntax && lexer.current().type == Lexeme::ReservedChecked)
if (lexer.current().type == Lexeme::ReservedChecked)
{
checkedFunction = true;
nextLexeme();
@ -1731,9 +1730,8 @@ AstTypeOrPack Parser::parseSimpleType(bool allowPack, bool inDeclarationContext)
{
return {parseTableType(/* inDeclarationContext */ inDeclarationContext), {}};
}
else if (FFlag::LuauCheckedFunctionSyntax && inDeclarationContext && lexer.current().type == Lexeme::ReservedChecked)
else if (inDeclarationContext && lexer.current().type == Lexeme::ReservedChecked)
{
LUAU_ASSERT(FFlag::LuauCheckedFunctionSyntax);
nextLexeme();
return parseFunctionType(allowPack, /* isCheckedFunction */ true);
}

View file

@ -12,6 +12,12 @@
struct lua_State;
#if defined(__x86_64__) || defined(_M_X64)
#define CODEGEN_TARGET_X64
#elif defined(__aarch64__) || defined(_M_ARM64)
#define CODEGEN_TARGET_A64
#endif
namespace Luau
{
namespace CodeGen

View file

@ -11,8 +11,6 @@
#include <algorithm>
#include <algorithm>
LUAU_FASTFLAG(LuauCodegenDirectUserdataFlow)
LUAU_FASTFLAG(LuauLoadTypeInfo) // Because new VM typeinfo load changes the format used by Codegen, same flag is used
LUAU_FASTFLAGVARIABLE(LuauCodegenTypeInfo, false) // New analysis is flagged separately

View file

@ -7,7 +7,7 @@
#include <string.h>
#include <stdlib.h>
#if defined(_WIN32) && defined(_M_X64)
#if defined(_WIN32) && defined(CODEGEN_TARGET_X64)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
@ -26,7 +26,7 @@ extern "C" void __deregister_frame(const void*) __attribute__((weak));
extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#if defined(__APPLE__) && defined(CODEGEN_TARGET_A64)
#include <sys/sysctl.h>
#include <mach-o/loader.h>
#include <dlfcn.h>
@ -48,7 +48,7 @@ namespace Luau
namespace CodeGen
{
#if defined(__APPLE__) && defined(__aarch64__)
#if defined(__APPLE__) && defined(CODEGEN_TARGET_A64)
static int findDynamicUnwindSections(uintptr_t addr, unw_dynamic_unwind_sections_t* info)
{
// Define a minimal mach header for JIT'd code.
@ -109,7 +109,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
char* unwindData = (char*)block;
unwind->finalize(unwindData, unwindSize, block, blockSize);
#if defined(_WIN32) && defined(_M_X64)
#if defined(_WIN32) && defined(CODEGEN_TARGET_X64)
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
@ -126,7 +126,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
visitFdeEntries(unwindData, __register_frame);
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#if defined(__APPLE__) && defined(CODEGEN_TARGET_A64)
// Starting from macOS 14, we need to register unwind section callback to state that our ABI doesn't require pointer authentication
// This might conflict with other JITs that do the same; unfortunately this is the best we can do for now.
static unw_add_find_dynamic_unwind_sections_t unw_add_find_dynamic_unwind_sections =
@ -141,7 +141,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
void destroyBlockUnwindInfo(void* context, void* unwindData)
{
#if defined(_WIN32) && defined(_M_X64)
#if defined(_WIN32) && defined(CODEGEN_TARGET_X64)
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
@ -161,12 +161,12 @@ void destroyBlockUnwindInfo(void* context, void* unwindData)
bool isUnwindSupported()
{
#if defined(_WIN32) && defined(_M_X64)
#if defined(_WIN32) && defined(CODEGEN_TARGET_X64)
return true;
#elif defined(__ANDROID__)
// Current unwind information is not compatible with Android
return false;
#elif defined(__APPLE__) && defined(__aarch64__)
#elif defined(__APPLE__) && defined(CODEGEN_TARGET_A64)
char ver[256];
size_t verLength = sizeof(ver);
// libunwind on macOS 12 and earlier (which maps to osrelease 21) assumes JIT frames use pointer authentication without a way to override that

View file

@ -27,7 +27,7 @@
#include <memory>
#include <optional>
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
#ifdef _MSC_VER
#include <intrin.h> // __cpuid
#else
@ -35,7 +35,7 @@
#endif
#endif
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
@ -58,8 +58,6 @@ LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K
// Current value is based on some member variables being limited to 16 bits
LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockInstructionLimit, 65'536) // 64 K
LUAU_FASTFLAG(LuauCodegenContext)
namespace Luau
{
namespace CodeGen
@ -97,180 +95,9 @@ std::string toString(const CodeGenCompilationResult& result)
return "";
}
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
void* gPerfLogContext = nullptr;
PerfLogFn gPerfLogFn = nullptr;
struct OldNativeProto
{
Proto* p;
void* execdata;
uintptr_t exectarget;
};
// Additional data attached to Proto::execdata
// Guaranteed to be aligned to 16 bytes
struct ExtraExecData
{
size_t execDataSize;
size_t codeSize;
};
static int alignTo(int value, int align)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
CODEGEN_ASSERT(align > 0 && (align & (align - 1)) == 0);
return (value + (align - 1)) & ~(align - 1);
}
// Returns the size of execdata required to store all code offsets and ExtraExecData structure at proper alignment
// Always a multiple of 4 bytes
static int calculateExecDataSize(Proto* proto)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
int size = proto->sizecode * sizeof(uint32_t);
size = alignTo(size, 16);
size += sizeof(ExtraExecData);
return size;
}
// Returns pointer to the ExtraExecData inside the Proto::execdata
// Even though 'execdata' is a field in Proto, we require it to support cases where it's not attached to Proto during construction
ExtraExecData* getExtraExecData(Proto* proto, void* execdata)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
int size = proto->sizecode * sizeof(uint32_t);
size = alignTo(size, 16);
return reinterpret_cast<ExtraExecData*>(reinterpret_cast<char*>(execdata) + size);
}
static OldNativeProto createOldNativeProto(Proto* proto, const IrBuilder& ir)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
int execDataSize = calculateExecDataSize(proto);
CODEGEN_ASSERT(execDataSize % 4 == 0);
uint32_t* execData = new uint32_t[execDataSize / 4];
uint32_t instTarget = ir.function.entryLocation;
for (int i = 0; i < proto->sizecode; i++)
{
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
execData[i] = ir.function.bcMapping[i].asmLocation - instTarget;
}
// Set first instruction offset to 0 so that entering this function still executes any generated entry code.
execData[0] = 0;
ExtraExecData* extra = getExtraExecData(proto, execData);
memset(extra, 0, sizeof(ExtraExecData));
extra->execDataSize = execDataSize;
// entry target will be relocated when assembly is finalized
return {proto, execData, instTarget};
}
static void destroyExecData(void* execdata)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
delete[] static_cast<uint32_t*>(execdata);
}
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
CODEGEN_ASSERT(p->source);
const char* source = getstr(p->source);
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
char name[256];
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, addr, size, name);
}
template<typename AssemblyBuilder>
static std::optional<OldNativeProto> createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount,
const HostIrHooks& hooks, CodeGenCompilationResult& result)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
IrBuilder ir(hooks);
ir.buildFunctionIr(proto);
unsigned instCount = unsigned(ir.function.instructions.size());
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
{
result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit;
return std::nullopt;
}
totalIrInstCount += instCount;
if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr, result))
return std::nullopt;
return createOldNativeProto(proto, ir);
}
static NativeState* getNativeState(lua_State* L)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
return static_cast<NativeState*>(L->global->ecb.context);
}
static void onCloseState(lua_State* L)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
delete getNativeState(L);
L->global->ecb = lua_ExecutionCallbacks();
}
static void onDestroyFunction(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
destroyExecData(proto->execdata);
proto->execdata = nullptr;
proto->exectarget = 0;
proto->codeentry = proto->code;
}
static int onEnter(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
NativeState* data = getNativeState(L);
CODEGEN_ASSERT(proto->execdata);
CODEGEN_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
// Returns 1 to finish the function in the VM
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
// used to disable native execution, unconditionally
static int onEnterDisabled(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
return 1;
}
void onDisable(lua_State* L, Proto* proto)
{
@ -311,18 +138,7 @@ void onDisable(lua_State* L, Proto* proto)
});
}
static size_t getMemorySize(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
ExtraExecData* extra = getExtraExecData(proto, proto->execdata);
// While execDataSize is exactly the size of the allocation we made and hold for 'execdata' field, the code size is approximate
// This is because code+data page is shared and owned by all Proto from a single module and each one can keep the whole region alive
// So individual Proto being freed by GC will not reflect memory use by native code correctly
return extra->execDataSize + extra->codeSize;
}
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
unsigned int getCpuFeaturesA64()
{
unsigned int result = 0;
@ -358,7 +174,7 @@ bool isSupported()
return false;
#endif
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
int cpuinfo[4] = {};
#ifdef _MSC_VER
__cpuid(cpuinfo, 1);
@ -373,287 +189,58 @@ bool isSupported()
return false;
return true;
#elif defined(__aarch64__)
#elif defined(CODEGEN_TARGET_A64)
return true;
#else
return false;
#endif
}
static void create_OLD(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
CODEGEN_ASSERT(!FFlag::LuauCodegenContext);
CODEGEN_ASSERT(isSupported());
std::unique_ptr<NativeState> data = std::make_unique<NativeState>(allocationCallback, allocationCallbackContext);
#if defined(_WIN32)
data->unwindBuilder = std::make_unique<UnwindBuilderWin>();
#else
data->unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
#endif
data->codeAllocator.context = data->unwindBuilder.get();
data->codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
data->codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
initFunctions(*data);
#if defined(__x86_64__) || defined(_M_X64)
if (!X64::initHeaderFunctions(*data))
return;
#elif defined(__aarch64__)
if (!A64::initHeaderFunctions(*data))
return;
#endif
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, uintptr_t(data->context.gateEntry), 4096, "<luau gate>");
lua_ExecutionCallbacks* ecb = &L->global->ecb;
ecb->context = data.release();
ecb->close = onCloseState;
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->disable = onDisable;
ecb->getmemorysize = getMemorySize;
}
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
if (FFlag::LuauCodegenContext)
{
create_NEW(L, allocationCallback, allocationCallbackContext);
}
else
{
create_OLD(L, allocationCallback, allocationCallbackContext);
}
create_NEW(L, allocationCallback, allocationCallbackContext);
}
void create(lua_State* L)
{
if (FFlag::LuauCodegenContext)
{
create_NEW(L);
}
else
{
create(L, nullptr, nullptr);
}
create_NEW(L);
}
void create(lua_State* L, SharedCodeGenContext* codeGenContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
create_NEW(L, codeGenContext);
}
[[nodiscard]] bool isNativeExecutionEnabled(lua_State* L)
{
if (FFlag::LuauCodegenContext)
{
return isNativeExecutionEnabled_NEW(L);
}
else
{
return getNativeState(L) ? (L->global->ecb.enter == onEnter) : false;
}
return isNativeExecutionEnabled_NEW(L);
}
void setNativeExecutionEnabled(lua_State* L, bool enabled)
{
if (FFlag::LuauCodegenContext)
{
setNativeExecutionEnabled_NEW(L, enabled);
}
else
{
if (getNativeState(L))
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
}
}
static CompilationResult compile_OLD(lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
CompilationResult compilationResult;
CODEGEN_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
Proto* root = clvalue(func)->l.p;
if ((options.flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0)
{
compilationResult.result = CodeGenCompilationResult::NotNativeModule;
return compilationResult;
}
// If initialization has failed, do not compile any functions
NativeState* data = getNativeState(L);
if (!data)
{
compilationResult.result = CodeGenCompilationResult::CodeGenNotInitialized;
return compilationResult;
}
std::vector<Proto*> protos;
gatherFunctions(protos, root, options.flags);
// Skip protos that have been compiled during previous invocations of CodeGen::compile
protos.erase(std::remove_if(protos.begin(), protos.end(),
[](Proto* p) {
return p == nullptr || p->execdata != nullptr;
}),
protos.end());
if (protos.empty())
{
compilationResult.result = CodeGenCompilationResult::NothingToCompile;
return compilationResult;
}
if (stats != nullptr)
stats->functionsTotal = uint32_t(protos.size());
#if defined(__aarch64__)
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
#else
X64::AssemblyBuilderX64 build(/* logText= */ false);
#endif
ModuleHelpers helpers;
#if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
#endif
std::vector<OldNativeProto> results;
results.reserve(protos.size());
uint32_t totalIrInstCount = 0;
for (Proto* p : protos)
{
CodeGenCompilationResult protoResult = CodeGenCompilationResult::Success;
if (std::optional<OldNativeProto> np = createNativeFunction(build, helpers, p, totalIrInstCount, options.hooks, protoResult))
results.push_back(*np);
else
compilationResult.protoFailures.push_back({protoResult, p->debugname ? getstr(p->debugname) : "", p->linedefined});
}
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
if (!build.finalize())
{
for (OldNativeProto result : results)
destroyExecData(result.execdata);
compilationResult.result = CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure;
return compilationResult;
}
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
if (results.empty())
return compilationResult;
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
if (!data->codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
int(build.code.size() * sizeof(build.code[0])), nativeData, sizeNativeData, codeStart))
{
for (OldNativeProto result : results)
destroyExecData(result.execdata);
compilationResult.result = CodeGenCompilationResult::AllocationFailed;
return compilationResult;
}
if (gPerfLogFn && results.size() > 0)
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
for (size_t i = 0; i < results.size(); ++i)
{
uint32_t begin = uint32_t(results[i].exectarget);
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
CODEGEN_ASSERT(begin < end);
if (gPerfLogFn)
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
ExtraExecData* extra = getExtraExecData(results[i].p, results[i].execdata);
extra->codeSize = end - begin;
}
for (const OldNativeProto& result : results)
{
// the memory is now managed by VM and will be freed via onDestroyFunction
result.p->execdata = result.execdata;
result.p->exectarget = uintptr_t(codeStart) + result.exectarget;
result.p->codeentry = &kCodeEntryInsn;
}
if (stats != nullptr)
{
for (const OldNativeProto& result : results)
{
stats->bytecodeSizeBytes += result.p->sizecode * sizeof(Instruction);
// Account for the native -> bytecode instruction offsets mapping:
stats->nativeMetadataSizeBytes += result.p->sizecode * sizeof(uint32_t);
}
stats->functionsCompiled += uint32_t(results.size());
stats->nativeCodeSizeBytes += build.code.size();
stats->nativeDataSizeBytes += build.data.size();
}
return compilationResult;
setNativeExecutionEnabled_NEW(L, enabled);
}
CompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
{
Luau::CodeGen::CompilationOptions options{flags};
if (FFlag::LuauCodegenContext)
{
return compile_NEW(L, idx, options, stats);
}
else
{
return compile_OLD(L, idx, options, stats);
}
return compile_NEW(L, idx, options, stats);
}
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
Luau::CodeGen::CompilationOptions options{flags};
return compile_NEW(moduleId, L, idx, options, stats);
}
CompilationResult compile(lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
if (FFlag::LuauCodegenContext)
{
return compile_NEW(L, idx, options, stats);
}
else
{
return compile_OLD(L, idx, options, stats);
}
return compile_NEW(L, idx, options, stats);
}
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return compile_NEW(moduleId, L, idx, options, stats);
}

View file

@ -279,7 +279,7 @@ static std::string getAssemblyImpl(AssemblyBuilder& build, const TValue* func, A
return build.text;
}
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
unsigned int getCpuFeaturesA64();
#endif
@ -292,7 +292,7 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, Lowering
{
case AssemblyOptions::Host:
{
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, cpuFeatures);
#else

View file

@ -12,8 +12,6 @@
#include "lapi.h"
LUAU_FASTFLAGVARIABLE(LuauCodegenContext, false)
LUAU_FASTFLAGVARIABLE(LuauCodegenCheckNullContext, false)
LUAU_FASTINT(LuauCodeGenBlockSize)
@ -34,7 +32,6 @@ unsigned int getCpuFeaturesA64();
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
CODEGEN_ASSERT(p->source);
const char* source = getstr(p->source);
@ -50,8 +47,6 @@ static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
static void logPerfFunctions(
const std::vector<Proto*>& moduleProtos, const uint8_t* nativeModuleBaseAddress, const std::vector<NativeProtoExecDataPtr>& nativeProtos)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
if (gPerfLogFn == nullptr)
return;
@ -83,8 +78,6 @@ static void logPerfFunctions(
template<bool Release, typename NativeProtosVector>
[[nodiscard]] static uint32_t bindNativeProtos(const std::vector<Proto*>& moduleProtos, NativeProtosVector& nativeProtos)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
uint32_t protosBound = 0;
auto protoIt = moduleProtos.begin();
@ -125,7 +118,6 @@ template<bool Release, typename NativeProtosVector>
BaseCodeGenContext::BaseCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
: codeAllocator{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
CODEGEN_ASSERT(isSupported());
#if defined(_WIN32)
@ -143,12 +135,10 @@ BaseCodeGenContext::BaseCodeGenContext(size_t blockSize, size_t maxTotalSize, Al
[[nodiscard]] bool BaseCodeGenContext::initHeaderFunctions()
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
if (!X64::initHeaderFunctions(*this))
return false;
#elif defined(__aarch64__)
#elif defined(CODEGEN_TARGET_A64)
if (!A64::initHeaderFunctions(*this))
return false;
#endif
@ -164,13 +154,10 @@ StandaloneCodeGenContext::StandaloneCodeGenContext(
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
}
[[nodiscard]] std::optional<ModuleBindResult> StandaloneCodeGenContext::tryBindExistingModule(const ModuleId&, const std::vector<Proto*>&)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
// The StandaloneCodeGenContext does not support sharing of native code
return {};
}
@ -178,8 +165,6 @@ StandaloneCodeGenContext::StandaloneCodeGenContext(
[[nodiscard]] ModuleBindResult StandaloneCodeGenContext::bindModule(const std::optional<ModuleId>&, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
@ -205,8 +190,6 @@ StandaloneCodeGenContext::StandaloneCodeGenContext(
void StandaloneCodeGenContext::onCloseState() noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
// The StandaloneCodeGenContext is owned by the one VM that owns it, so when
// that VM is destroyed, we destroy *this as well:
delete this;
@ -214,8 +197,6 @@ void StandaloneCodeGenContext::onCloseState() noexcept
void StandaloneCodeGenContext::onDestroyFunction(void* execdata) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
destroyNativeProtoExecData(static_cast<uint32_t*>(execdata));
}
@ -225,14 +206,11 @@ SharedCodeGenContext::SharedCodeGenContext(
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
, sharedAllocator{&codeAllocator}
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
}
[[nodiscard]] std::optional<ModuleBindResult> SharedCodeGenContext::tryBindExistingModule(
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
NativeModuleRef nativeModule = sharedAllocator.tryGetNativeModule(moduleId);
if (nativeModule.empty())
{
@ -249,8 +227,6 @@ SharedCodeGenContext::SharedCodeGenContext(
[[nodiscard]] ModuleBindResult SharedCodeGenContext::bindModule(const std::optional<ModuleId>& moduleId, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
const std::pair<NativeModuleRef, bool> insertionResult = [&]() -> std::pair<NativeModuleRef, bool> {
if (moduleId.has_value())
{
@ -279,8 +255,6 @@ SharedCodeGenContext::SharedCodeGenContext(
void SharedCodeGenContext::onCloseState() noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
// The lifetime of the SharedCodeGenContext is managed separately from the
// VMs that use it. When a VM is destroyed, we don't need to do anything
// here.
@ -288,23 +262,17 @@ void SharedCodeGenContext::onCloseState() noexcept
void SharedCodeGenContext::onDestroyFunction(void* execdata) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
getNativeProtoExecDataHeader(static_cast<const uint32_t*>(execdata)).nativeModule->release();
}
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext()
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return createSharedCodeGenContext(size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
}
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return createSharedCodeGenContext(
size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext);
}
@ -312,8 +280,6 @@ void SharedCodeGenContext::onDestroyFunction(void* execdata) noexcept
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
UniqueSharedCodeGenContext codeGenContext{new SharedCodeGenContext{blockSize, maxTotalSize, nullptr, nullptr}};
if (!codeGenContext->initHeaderFunctions())
@ -324,38 +290,28 @@ void SharedCodeGenContext::onDestroyFunction(void* execdata) noexcept
void destroySharedCodeGenContext(const SharedCodeGenContext* codeGenContext) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
delete codeGenContext;
}
void SharedCodeGenContextDeleter::operator()(const SharedCodeGenContext* codeGenContext) const noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
destroySharedCodeGenContext(codeGenContext);
}
[[nodiscard]] static BaseCodeGenContext* getCodeGenContext(lua_State* L) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return static_cast<BaseCodeGenContext*>(L->global->ecb.context);
}
static void onCloseState(lua_State* L) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
getCodeGenContext(L)->onCloseState();
L->global->ecb = lua_ExecutionCallbacks{};
}
static void onDestroyFunction(lua_State* L, Proto* proto) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
getCodeGenContext(L)->onDestroyFunction(proto->execdata);
proto->execdata = nullptr;
proto->exectarget = 0;
@ -364,8 +320,6 @@ static void onDestroyFunction(lua_State* L, Proto* proto) noexcept
static int onEnter(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
CODEGEN_ASSERT(proto->execdata);
@ -379,8 +333,6 @@ static int onEnter(lua_State* L, Proto* proto)
static int onEnterDisabled(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return 1;
}
@ -389,8 +341,6 @@ void onDisable(lua_State* L, Proto* proto);
static size_t getMemorySize(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
const NativeProtoExecDataHeader& execDataHeader = getNativeProtoExecDataHeader(static_cast<const uint32_t*>(proto->execdata));
const size_t execDataSize = sizeof(NativeProtoExecDataHeader) + execDataHeader.bytecodeInstructionCount * sizeof(Instruction);
@ -403,7 +353,6 @@ static size_t getMemorySize(lua_State* L, Proto* proto)
static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeGenContext) noexcept
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
CODEGEN_ASSERT(!FFlag::LuauCodegenCheckNullContext || codeGenContext != nullptr);
lua_ExecutionCallbacks* ecb = &L->global->ecb;
@ -418,22 +367,16 @@ static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeG
void create_NEW(lua_State* L)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return create_NEW(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
}
void create_NEW(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return create_NEW(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext);
}
void create_NEW(lua_State* L, size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
std::unique_ptr<StandaloneCodeGenContext> codeGenContext =
std::make_unique<StandaloneCodeGenContext>(blockSize, maxTotalSize, allocationCallback, allocationCallbackContext);
@ -445,15 +388,11 @@ void create_NEW(lua_State* L, size_t blockSize, size_t maxTotalSize, AllocationC
void create_NEW(lua_State* L, SharedCodeGenContext* codeGenContext)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
initializeExecutionCallbacks(L, codeGenContext);
}
[[nodiscard]] static NativeProtoExecDataPtr createNativeProtoExecData(Proto* proto, const IrBuilder& ir)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(proto->sizecode);
uint32_t instTarget = ir.function.entryLocation;
@ -481,8 +420,6 @@ template<typename AssemblyBuilder>
[[nodiscard]] static NativeProtoExecDataPtr createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto,
uint32_t& totalIrInstCount, const HostIrHooks& hooks, CodeGenCompilationResult& result)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
IrBuilder ir(hooks);
ir.buildFunctionIr(proto);
@ -507,7 +444,6 @@ template<typename AssemblyBuilder>
[[nodiscard]] static CompilationResult compileInternal(
const std::optional<ModuleId>& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
CODEGEN_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
@ -547,7 +483,7 @@ template<typename AssemblyBuilder>
}
}
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
#else
@ -555,7 +491,7 @@ template<typename AssemblyBuilder>
#endif
ModuleHelpers helpers;
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
@ -641,29 +577,21 @@ template<typename AssemblyBuilder>
CompilationResult compile_NEW(const ModuleId& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return compileInternal(moduleId, L, idx, options, stats);
}
CompilationResult compile_NEW(lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return compileInternal({}, L, idx, options, stats);
}
[[nodiscard]] bool isNativeExecutionEnabled_NEW(lua_State* L)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
return getCodeGenContext(L) != nullptr && L->global->ecb.enter == onEnter;
}
void setNativeExecutionEnabled_NEW(lua_State* L, bool enabled)
{
CODEGEN_ASSERT(FFlag::LuauCodegenContext);
if (getCodeGenContext(L) != nullptr)
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
}

View file

@ -181,6 +181,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Error.h
Analysis/include/Luau/FileResolver.h
Analysis/include/Luau/Frontend.h
Analysis/include/Luau/Generalization.h
Analysis/include/Luau/GlobalTypes.h
Analysis/include/Luau/InsertionOrderedMap.h
Analysis/include/Luau/Instantiation.h
@ -251,6 +252,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/EmbeddedBuiltinDefinitions.cpp
Analysis/src/Error.cpp
Analysis/src/Frontend.cpp
Analysis/src/Generalization.cpp
Analysis/src/GlobalTypes.cpp
Analysis/src/Instantiation.cpp
Analysis/src/Instantiation2.cpp
@ -420,6 +422,7 @@ if(TARGET Luau.UnitTest)
tests/Fixture.cpp
tests/Fixture.h
tests/Frontend.test.cpp
tests/Generalization.test.cpp
tests/InsertionOrderedMap.test.cpp
tests/Instantiation2.test.cpp
tests/IostreamOptional.h

View file

@ -379,7 +379,7 @@ DEFINE_PROTO_FUZZER(const luau::ModuleSet& message)
if (luau_load(globalState, "=fuzz", bytecode.data(), bytecode.size(), 0) == 0)
{
Luau::CodeGen::AssemblyOptions options;
options.flags = Luau::CodeGen::CodeGen_ColdFunctions;
options.compilationOptions.flags = Luau::CodeGen::CodeGen_ColdFunctions;
options.outputBinary = true;
options.target = kFuzzCodegenTarget;
Luau::CodeGen::getAssembly(globalState, -1, options);

View file

@ -253,7 +253,7 @@ TEST_CASE("Dwarf2UnwindCodesA64")
CHECK(memcmp(data.data(), expected.data(), expected.size()) == 0);
}
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
#if defined(_WIN32)
// Windows x64 ABI
@ -774,7 +774,7 @@ TEST_CASE("GeneratedCodeExecutionWithThrowOutsideTheGateX64")
#endif
#if defined(__aarch64__)
#if defined(CODEGEN_TARGET_A64)
TEST_CASE("GeneratedCodeExecutionA64")
{

View file

@ -0,0 +1,119 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Generalization.h"
#include "Luau/Scope.h"
#include "Luau/ToString.h"
#include "Luau/Type.h"
#include "Luau/TypeArena.h"
#include "Luau/Error.h"
#include "ScopedFlags.h"
#include "doctest.h"
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
TEST_SUITE_BEGIN("Generalization");
struct GeneralizationFixture
{
TypeArena arena;
BuiltinTypes builtinTypes;
Scope scope{builtinTypes.anyTypePack};
ToStringOptions opts;
ScopedFastFlag sff{FFlag::DebugLuauDeferredConstraintResolution, true};
std::pair<TypeId, FreeType*> freshType()
{
FreeType ft{&scope, builtinTypes.neverType, builtinTypes.unknownType};
TypeId ty = arena.addType(ft);
FreeType* ftv = getMutable<FreeType>(ty);
REQUIRE(ftv != nullptr);
return {ty, ftv};
}
std::string toString(TypeId ty)
{
return ::Luau::toString(ty, opts);
}
std::string toString(TypePackId ty)
{
return ::Luau::toString(ty, opts);
}
std::optional<TypeId> generalize(TypeId ty)
{
return ::Luau::generalize(NotNull{&arena}, NotNull{&builtinTypes}, NotNull{&scope}, ty);
}
};
TEST_CASE_FIXTURE(GeneralizationFixture, "generalize_a_type_that_is_bounded_by_another_generalizable_type")
{
auto [t1, ft1] = freshType();
auto [t2, ft2] = freshType();
// t2 <: t1 <: unknown
// unknown <: t2 <: t1
ft1->lowerBound = t2;
ft2->upperBound = t1;
ft2->lowerBound = builtinTypes.unknownType;
auto t2generalized = generalize(t2);
REQUIRE(t2generalized);
CHECK(follow(t1) == follow(t2));
auto t1generalized = generalize(t1);
REQUIRE(t1generalized);
CHECK(builtinTypes.unknownType == follow(t1));
CHECK(builtinTypes.unknownType == follow(t2));
}
// Same as generalize_a_type_that_is_bounded_by_another_generalizable_type
// except that we generalize the types in the opposite order
TEST_CASE_FIXTURE(GeneralizationFixture, "generalize_a_type_that_is_bounded_by_another_generalizable_type_in_reverse_order")
{
auto [t1, ft1] = freshType();
auto [t2, ft2] = freshType();
// t2 <: t1 <: unknown
// unknown <: t2 <: t1
ft1->lowerBound = t2;
ft2->upperBound = t1;
ft2->lowerBound = builtinTypes.unknownType;
auto t1generalized = generalize(t1);
REQUIRE(t1generalized);
CHECK(follow(t1) == follow(t2));
auto t2generalized = generalize(t2);
REQUIRE(t2generalized);
CHECK(builtinTypes.unknownType == follow(t1));
CHECK(builtinTypes.unknownType == follow(t2));
}
TEST_CASE_FIXTURE(GeneralizationFixture, "dont_traverse_into_class_types_when_generalizing")
{
auto [propTy, _] = freshType();
TypeId cursedClass = arena.addType(ClassType{"Cursed", {{"oh_no", Property::readonly(propTy)}}, std::nullopt, std::nullopt, {}, {}, ""});
auto genClass = generalize(cursedClass);
REQUIRE(genClass);
auto genPropTy = get<ClassType>(*genClass)->props.at("oh_no").readTy;
CHECK(is<FreeType>(*genPropTy));
}
TEST_SUITE_END();

View file

@ -15,8 +15,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauCheckedFunctionSyntax);
#define NONSTRICT_REQUIRE_ERR_AT_POS(pos, result, idx) \
do \
{ \
@ -69,7 +67,6 @@ struct NonStrictTypeCheckerFixture : Fixture
CheckResult checkNonStrict(const std::string& code)
{
ScopedFastFlag flags[] = {
{FFlag::LuauCheckedFunctionSyntax, true},
{FFlag::DebugLuauDeferredConstraintResolution, true},
};
LoadDefinitionFileResult res = loadDefinition(definitions);
@ -80,7 +77,6 @@ struct NonStrictTypeCheckerFixture : Fixture
CheckResult checkNonStrictModule(const std::string& moduleName)
{
ScopedFastFlag flags[] = {
{FFlag::LuauCheckedFunctionSyntax, true},
{FFlag::DebugLuauDeferredConstraintResolution, true},
};
LoadDefinitionFileResult res = loadDefinition(definitions);

View file

@ -11,7 +11,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauCheckedFunctionSyntax);
LUAU_FASTFLAG(LuauLexerLookaheadRemembersBraceType);
LUAU_FASTINT(LuauRecursionLimit);
LUAU_FASTINT(LuauTypeLengthLimit);
@ -3051,7 +3050,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_top_level_checked_fn")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
std::string src = R"BUILTIN_SRC(
declare function @checked abs(n: number): number
@ -3071,7 +3069,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_declared_table_checked_member")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
const std::string src = R"BUILTIN_SRC(
declare math : {
@ -3099,7 +3096,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_checked_outside_decl_fails")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
ParseResult pr = tryParse(R"(
local @checked = 3
@ -3113,7 +3109,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_checked_in_and_out_of_decl_fails")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
auto pr = tryParse(R"(
local @checked = 3
@ -3129,7 +3124,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_checked_as_function_name_fails")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
auto pr = tryParse(R"(
function @checked(x: number) : number
@ -3143,7 +3137,6 @@ TEST_CASE_FIXTURE(Fixture, "cannot_use_@_as_variable_name")
{
ParseOptions opts;
opts.allowDeclarationSyntax = true;
ScopedFastFlag sff{FFlag::LuauCheckedFunctionSyntax, true};
auto pr = tryParse(R"(
local @blah = 3

View file

@ -15,8 +15,6 @@
#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
#endif
LUAU_FASTFLAG(LuauCodegenContext)
using namespace Luau::CodeGen;
@ -32,8 +30,6 @@ TEST_CASE("NativeModuleRefRefcounting")
if (!luau_codegen_supported())
return;
ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true};
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
@ -250,8 +246,6 @@ TEST_CASE("NativeProtoRefcounting")
if (!luau_codegen_supported())
return;
ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true};
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
@ -303,8 +297,6 @@ TEST_CASE("NativeProtoState")
if (!luau_codegen_supported())
return;
ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true};
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
@ -364,8 +356,6 @@ TEST_CASE("AnonymousModuleLifetime")
if (!luau_codegen_supported())
return;
ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true};
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
@ -413,8 +403,6 @@ TEST_CASE("SharedAllocation")
if (!luau_codegen_supported())
return;
ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true};
UniqueSharedCodeGenContext sharedCodeGenContext = createSharedCodeGenContext();
std::unique_ptr<lua_State, void (*)(lua_State*)> L1{luaL_newstate(), lua_close};

View file

@ -12,7 +12,6 @@ using namespace Luau;
LUAU_FASTFLAG(LuauRecursiveTypeParameterRestriction);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauCheckedFunctionSyntax);
LUAU_FASTFLAG(DebugLuauSharedSelf);
TEST_SUITE_BEGIN("ToString");
@ -1007,7 +1006,6 @@ Type 'string' could not be converted into 'number' in an invariant context)";
TEST_CASE_FIXTURE(Fixture, "checked_fn_toString")
{
ScopedFastFlag flags[] = {
{FFlag::LuauCheckedFunctionSyntax, true},
{FFlag::DebugLuauDeferredConstraintResolution, true},
};

View file

@ -701,4 +701,19 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "keyof_oss_crash_gh1161")
CHECK(get<FunctionExitsWithoutReturning>(result.errors[0]));
}
TEST_CASE_FIXTURE(FamilyFixture, "fuzzer_numeric_binop_doesnt_assert_on_generalizeFreeType")
{
CheckResult result = check(R"(
Module 'l0':
local _ = (67108864)(_ >= _).insert
do end
do end
_(...,_(_,_(_()),_()))
(67108864)()()
_(_ ~= _ // _,l0)(_(_({n0,})),_(_),_)
_(setmetatable(_,{[...]=_,}))
)");
}
TEST_SUITE_END();

View file

@ -2687,4 +2687,40 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "error_suppression_propagates_through_functio
CHECK("(any) -> (any?, any)" == toString(requireType("first")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "fuzzer_normalizer_out_of_resources")
{
// This luau code should finish typechecking, not segfault upon dereferencing
// the normalized type
CheckResult result = check(R"(
Module 'l0':
local _ = true,...,_
if ... then
while _:_(_._G) do
do end
_ = _ and _
_ = 0 and {# _,}
local _ = "CCCCCCCCCCCCCCCCCCCCCCCCCCC"
local l0 = require(module0)
end
local function l0()
end
elseif _ then
l0 = _
end
do end
while _ do
_ = if _ then _ elseif _ then _,if _ then _ else _
_ = _()
do end
do end
if _ then
end
end
_ = _,{}
)");
}
TEST_SUITE_END();

View file

@ -4460,4 +4460,23 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "ipairs_adds_an_unbounded_indexer")
CHECK("{a}" == toString(requireType("a"), {true}));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "index_results_compare_to_nil")
{
CheckResult result = check(R"(
--!strict
function foo(tbl: {number})
if tbl[2] == nil then
print("foo")
end
if tbl[3] ~= nil then
print("bar")
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View file

@ -132,67 +132,4 @@ TEST_CASE_FIXTURE(Unifier2Fixture, "unify_binds_free_supertype_tail_pack")
CHECK("(number <: 'a)" == toString(freeAndFree));
}
TEST_CASE_FIXTURE(Unifier2Fixture, "generalize_a_type_that_is_bounded_by_another_generalizable_type")
{
auto [t1, ft1] = freshType();
auto [t2, ft2] = freshType();
// t2 <: t1 <: unknown
// unknown <: t2 <: t1
ft1->lowerBound = t2;
ft2->upperBound = t1;
ft2->lowerBound = builtinTypes.unknownType;
auto t2generalized = u2.generalize(t2);
REQUIRE(t2generalized);
CHECK(follow(t1) == follow(t2));
auto t1generalized = u2.generalize(t1);
REQUIRE(t1generalized);
CHECK(builtinTypes.unknownType == follow(t1));
CHECK(builtinTypes.unknownType == follow(t2));
}
// Same as generalize_a_type_that_is_bounded_by_another_generalizable_type
// except that we generalize the types in the opposite order
TEST_CASE_FIXTURE(Unifier2Fixture, "generalize_a_type_that_is_bounded_by_another_generalizable_type_in_reverse_order")
{
auto [t1, ft1] = freshType();
auto [t2, ft2] = freshType();
// t2 <: t1 <: unknown
// unknown <: t2 <: t1
ft1->lowerBound = t2;
ft2->upperBound = t1;
ft2->lowerBound = builtinTypes.unknownType;
auto t1generalized = u2.generalize(t1);
REQUIRE(t1generalized);
CHECK(follow(t1) == follow(t2));
auto t2generalized = u2.generalize(t2);
REQUIRE(t2generalized);
CHECK(builtinTypes.unknownType == follow(t1));
CHECK(builtinTypes.unknownType == follow(t2));
}
TEST_CASE_FIXTURE(Unifier2Fixture, "dont_traverse_into_class_types_when_generalizing")
{
auto [propTy, _] = freshType();
TypeId cursedClass = arena.addType(ClassType{"Cursed", {{"oh_no", Property::readonly(propTy)}}, std::nullopt, std::nullopt, {}, {}, ""});
auto genClass = u2.generalize(cursedClass);
REQUIRE(genClass);
auto genPropTy = get<ClassType>(*genClass)->props.at("oh_no").readTy;
CHECK(is<FreeType>(*genPropTy));
}
TEST_SUITE_END();

View file

@ -18,7 +18,7 @@
#include <windows.h> // IsDebuggerPresent
#endif
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
#include <immintrin.h>
#endif
@ -330,7 +330,7 @@ static void setFastFlags(const std::vector<doctest::String>& flags)
// This function performs system/architecture specific initialization prior to running tests.
static void initSystem()
{
#if defined(__x86_64__) || defined(_M_X64)
#if defined(CODEGEN_TARGET_X64)
// Some unit tests make use of denormalized numbers. So flags to flush to zero or treat denormals as zero
// must be disabled for expected behavior.
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_OFF);

View file

@ -186,7 +186,6 @@ TableTests.infer_array
TableTests.infer_indexer_from_array_like_table
TableTests.infer_indexer_from_its_variable_type_and_unifiable
TableTests.inferred_return_type_of_free_table
TableTests.insert_a_and_f_of_a_into_table_res_in_a_loop
TableTests.invariant_table_properties_means_instantiating_tables_in_assignment_is_unsound
TableTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
TableTests.length_operator_union
@ -363,7 +362,6 @@ TypeInferLoops.for_in_loop_on_non_function
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_loop
TypeInferLoops.ipairs_produces_integral_indices
TypeInferLoops.iterate_over_free_table
TypeInferLoops.iterate_over_properties
TypeInferLoops.iteration_regression_issue_69967_alt
TypeInferLoops.loop_iter_metamethod_nil