diff --git a/Analysis/include/Luau/ConstraintGenerator.h b/Analysis/include/Luau/ConstraintGenerator.h index 4f217142..6cb4b6d6 100644 --- a/Analysis/include/Luau/ConstraintGenerator.h +++ b/Analysis/include/Luau/ConstraintGenerator.h @@ -165,7 +165,7 @@ private: */ ScopePtr childScope(AstNode* node, const ScopePtr& parent); - std::optional lookup(const ScopePtr& scope, DefId def, bool prototype = true); + std::optional lookup(const ScopePtr& scope, Location location, DefId def, bool prototype = true); /** * Adds a new constraint with no dependencies to a given scope. @@ -242,7 +242,7 @@ private: Inference check(const ScopePtr& scope, AstExprConstantBool* bool_, std::optional expectedType, bool forceSingleton); Inference check(const ScopePtr& scope, AstExprLocal* local); Inference check(const ScopePtr& scope, AstExprGlobal* global); - Inference checkIndexName(const ScopePtr& scope, const RefinementKey* key, AstExpr* indexee, std::string index); + Inference checkIndexName(const ScopePtr& scope, const RefinementKey* key, AstExpr* indexee, const std::string& index, Location indexLocation); Inference check(const ScopePtr& scope, AstExprIndexName* indexName); Inference check(const ScopePtr& scope, AstExprIndexExpr* indexExpr); Inference check(const ScopePtr& scope, AstExprFunction* func, std::optional expectedType, bool generalize); diff --git a/Analysis/include/Luau/ToString.h b/Analysis/include/Luau/ToString.h index 091d8dd1..f8001e08 100644 --- a/Analysis/include/Luau/ToString.h +++ b/Analysis/include/Luau/ToString.h @@ -12,7 +12,6 @@ LUAU_FASTINT(LuauTableTypeMaximumStringifierLength) LUAU_FASTINT(LuauTypeMaximumStringifierLength) -LUAU_FASTFLAG(LuauToStringSimpleCompositeTypesSingleLine) namespace Luau { @@ -36,7 +35,6 @@ struct ToStringOptions { ToStringOptions(bool exhaustive = false) : exhaustive(exhaustive) - , compositeTypesSingleLineLimit(FFlag::LuauToStringSimpleCompositeTypesSingleLine ? 5 : 0) { } diff --git a/Analysis/include/Luau/Type.h b/Analysis/include/Luau/Type.h index 25d63e1b..47161886 100644 --- a/Analysis/include/Luau/Type.h +++ b/Analysis/include/Luau/Type.h @@ -1103,4 +1103,7 @@ LUAU_NOINLINE T* emplaceType(Type* ty, Args&&... args) return &ty->ty.emplace(std::forward(args)...); } +template<> +LUAU_NOINLINE Unifiable::Bound* emplaceType(Type* ty, TypeId& tyArg); + } // namespace Luau diff --git a/Analysis/include/Luau/TypePack.h b/Analysis/include/Luau/TypePack.h index 8ce8d7f5..691a8ed9 100644 --- a/Analysis/include/Luau/TypePack.h +++ b/Analysis/include/Luau/TypePack.h @@ -233,4 +233,18 @@ bool isVariadicTail(TypePackId tp, const TxnLog& log, bool includeHiddenVariadic bool containsNever(TypePackId tp); +/* + * Use this to change the kind of a particular type pack. + * + * LUAU_NOINLINE so that the calling frame doesn't have to pay the stack storage for the new variant. + */ +template +LUAU_NOINLINE T* emplaceTypePack(TypePackVar* ty, Args&&... args) +{ + return &ty->ty.emplace(std::forward(args)...); +} + +template<> +LUAU_NOINLINE Unifiable::Bound* emplaceTypePack(TypePackVar* ty, TypePackId& tyArg); + } // namespace Luau diff --git a/Analysis/include/Luau/TypeUtils.h b/Analysis/include/Luau/TypeUtils.h index b02319ce..81c8a5ca 100644 --- a/Analysis/include/Luau/TypeUtils.h +++ b/Analysis/include/Luau/TypeUtils.h @@ -201,6 +201,15 @@ const T* get(std::optional ty) return nullptr; } +template +T* getMutable(std::optional ty) +{ + if (ty) + return getMutable(*ty); + else + return nullptr; +} + template std::optional follow(std::optional ty) { diff --git a/Analysis/src/Constraint.cpp b/Analysis/src/Constraint.cpp index 7d3f9e31..4f35b58f 100644 --- a/Analysis/src/Constraint.cpp +++ b/Analysis/src/Constraint.cpp @@ -28,6 +28,12 @@ struct FreeTypeCollector : TypeOnceVisitor result->insert(ty); return false; } + + bool visit(TypeId ty, const ClassType&) override + { + // ClassTypes never contain free types. + return false; + } }; DenseHashSet Constraint::getFreeTypes() const diff --git a/Analysis/src/ConstraintGenerator.cpp b/Analysis/src/ConstraintGenerator.cpp index 0fa8b9c4..16e2014a 100644 --- a/Analysis/src/ConstraintGenerator.cpp +++ b/Analysis/src/ConstraintGenerator.cpp @@ -287,7 +287,7 @@ ScopePtr ConstraintGenerator::childScope(AstNode* node, const ScopePtr& parent) return scope; } -std::optional ConstraintGenerator::lookup(const ScopePtr& scope, DefId def, bool prototype) +std::optional ConstraintGenerator::lookup(const ScopePtr& scope, Location location, DefId def, bool prototype) { if (get(def)) return scope->lookup(def); @@ -296,7 +296,7 @@ std::optional ConstraintGenerator::lookup(const ScopePtr& scope, DefId d if (auto found = scope->lookup(def)) return *found; else if (!prototype && phi->operands.size() == 1) - return lookup(scope, phi->operands.at(0), prototype); + return lookup(scope, location, phi->operands.at(0), prototype); else if (!prototype) return std::nullopt; @@ -307,14 +307,14 @@ std::optional ConstraintGenerator::lookup(const ScopePtr& scope, DefId d // `scope->lookup(operand)` may return nothing because we only bind a type to that operand // once we've seen that particular `DefId`. In this case, we need to prototype those types // and use those at a later time. - std::optional ty = lookup(scope, operand, /*prototype*/ false); + std::optional ty = lookup(scope, location, operand, /*prototype*/ false); if (!ty) { ty = arena->addType(BlockedType{}); rootScope->lvalueTypes[operand] = *ty; } - res = makeUnion(scope, Location{} /* TODO: can we provide a real location here? */, res, *ty); + res = makeUnion(scope, location, res, *ty); } scope->lvalueTypes[def] = res; @@ -512,7 +512,7 @@ void ConstraintGenerator::applyRefinements(const ScopePtr& scope, Location locat for (auto& [def, partition] : refinements) { - if (std::optional defTy = lookup(scope, def)) + if (std::optional defTy = lookup(scope, location, def)) { TypeId ty = *defTy; if (partition.shouldAppendNilType) @@ -918,7 +918,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatLocalFuncti bool sigFullyDefined = !hasFreeType(sig.signature); if (sigFullyDefined) - asMutable(functionType)->ty.emplace(sig.signature); + emplaceType(asMutable(functionType), sig.signature); DefId def = dfg->getDef(function->name); scope->lvalueTypes[def] = functionType; @@ -969,15 +969,15 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatFunction* f bool sigFullyDefined = !hasFreeType(sig.signature); if (sigFullyDefined) - asMutable(generalizedType)->ty.emplace(sig.signature); + emplaceType(asMutable(generalizedType), sig.signature); DenseHashSet excludeList{nullptr}; DefId def = dfg->getDef(function->name); - std::optional existingFunctionTy = lookup(scope, def); + std::optional existingFunctionTy = follow(lookup(scope, function->name->location, def)); - if (existingFunctionTy && (sigFullyDefined || function->name->is()) && get(*existingFunctionTy)) - asMutable(*existingFunctionTy)->ty.emplace(sig.signature); + if (get(existingFunctionTy) && sigFullyDefined) + emplaceType(asMutable(*existingFunctionTy), sig.signature); if (AstExprLocal* localName = function->name->as()) { @@ -1071,6 +1071,12 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatFunction* f blocked->setOwner(addConstraint(scope, std::move(c))); } + if (BlockedType* bt = getMutable(follow(existingFunctionTy)); bt && !bt->getOwner()) + { + auto uc = addConstraint(scope, function->name->location, Unpack1Constraint{*existingFunctionTy, generalizedType}); + bt->setOwner(uc); + } + return ControlFlow::None; } @@ -1113,13 +1119,13 @@ static void bindFreeType(TypeId a, TypeId b) LUAU_ASSERT(af || bf); if (!bf) - asMutable(a)->ty.emplace(b); + emplaceType(asMutable(a), b); else if (!af) - asMutable(b)->ty.emplace(a); + emplaceType(asMutable(b), a); else if (subsumes(bf->scope, af->scope)) - asMutable(a)->ty.emplace(b); + emplaceType(asMutable(a), b); else if (subsumes(af->scope, bf->scope)) - asMutable(b)->ty.emplace(a); + emplaceType(asMutable(b), a); } ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatAssign* assign) @@ -1153,6 +1159,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatAssign* ass for (TypeId assignee : typeStates) { auto blocked = getMutable(assignee); + if (blocked && !blocked->getOwner()) blocked->setOwner(uc); } @@ -1254,11 +1261,11 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatTypeAlias* LUAU_ASSERT(get(aliasTy)); if (occursCheck(aliasTy, ty)) { - asMutable(aliasTy)->ty.emplace(builtinTypes->anyType); + emplaceType(asMutable(aliasTy), builtinTypes->anyType); reportError(alias->nameLocation, OccursCheckFailed{}); } else - asMutable(aliasTy)->ty.emplace(ty); + emplaceType(asMutable(aliasTy), ty); std::vector typeParams; for (auto tyParam : createGenerics(*defnScope, alias->generics, /* useCache */ true, /* addTypes */ false)) @@ -1856,12 +1863,12 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprLocal* local) // if we have a refinement key, we can look up its type. if (key) - maybeTy = lookup(scope, key->def); + maybeTy = lookup(scope, local->location, key->def); // if the current def doesn't have a type, we might be doing a compound assignment // and therefore might need to look at the rvalue def instead. if (!maybeTy && rvalueDef) - maybeTy = lookup(scope, *rvalueDef); + maybeTy = lookup(scope, local->location, *rvalueDef); if (maybeTy) { @@ -1887,7 +1894,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprGlobal* globa /* prepopulateGlobalScope() has already added all global functions to the environment by this point, so any * global that is not already in-scope is definitely an unknown symbol. */ - if (auto ty = lookup(scope, def, /*prototype=*/false)) + if (auto ty = lookup(scope, global->location, def, /*prototype=*/false)) { rootScope->lvalueTypes[def] = *ty; return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)}; @@ -1899,14 +1906,14 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprGlobal* globa } } -Inference ConstraintGenerator::checkIndexName(const ScopePtr& scope, const RefinementKey* key, AstExpr* indexee, std::string index) +Inference ConstraintGenerator::checkIndexName(const ScopePtr& scope, const RefinementKey* key, AstExpr* indexee, const std::string& index, Location indexLocation) { TypeId obj = check(scope, indexee).ty; TypeId result = arena->addType(BlockedType{}); if (key) { - if (auto ty = lookup(scope, key->def)) + if (auto ty = lookup(scope, indexLocation, key->def)) return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)}; scope->rvalueRefinements[key->def] = result; @@ -1925,7 +1932,7 @@ Inference ConstraintGenerator::checkIndexName(const ScopePtr& scope, const Refin Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexName* indexName) { const RefinementKey* key = dfg->getRefinementKey(indexName); - return checkIndexName(scope, key, indexName->expr, indexName->index.value); + return checkIndexName(scope, key, indexName->expr, indexName->index.value, indexName->indexLocation); } Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* indexExpr) @@ -1933,7 +1940,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* in if (auto constantString = indexExpr->index->as()) { const RefinementKey* key = dfg->getRefinementKey(indexExpr); - return checkIndexName(scope, key, indexExpr->expr, constantString->value.data); + return checkIndexName(scope, key, indexExpr->expr, constantString->value.data, indexExpr->location); } TypeId obj = check(scope, indexExpr->expr).ty; @@ -1944,7 +1951,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* in const RefinementKey* key = dfg->getRefinementKey(indexExpr); if (key) { - if (auto ty = lookup(scope, key->def)) + if (auto ty = lookup(scope, indexExpr->location, key->def)) return Inference{*ty, refinementArena.proposition(key, builtinTypes->truthyType)}; scope->rvalueRefinements[key->def] = result; @@ -2326,7 +2333,7 @@ std::tuple ConstraintGenerator::checkBinary( TypeId ty = follow(typeFun->type); // We're only interested in the root class of any classes. - if (auto ctv = get(ty); !ctv || ctv->parent == builtinTypes->classType) + if (auto ctv = get(ty); ctv && ctv->parent == builtinTypes->classType) discriminantTy = ty; } @@ -2427,9 +2434,7 @@ ConstraintGenerator::LValueBounds ConstraintGenerator::checkLValue(const ScopePt // TODO: Need to clip this, but this requires more code to be reworked first before we can clip this. std::optional assignedTy = arena->addType(BlockedType{}); - auto unpackC = addConstraint(scope, local->location, - UnpackConstraint{arena->addTypePack({*ty}), arena->addTypePack({*assignedTy}), - /*resultIsLValue*/ true}); + auto unpackC = addConstraint(scope, local->location, Unpack1Constraint{*ty, *assignedTy, /*resultIsLValue*/ true}); if (auto blocked = get(*ty)) { @@ -2834,11 +2839,11 @@ ConstraintGenerator::FunctionSignature ConstraintGenerator::checkFunctionSignatu // generate constraints for return types, we have a guarantee that we // know the annotated return type already, if one was provided. LUAU_ASSERT(get(returnType)); - asMutable(returnType)->ty.emplace(annotatedRetType); + emplaceTypePack(asMutable(returnType), annotatedRetType); } else if (expectedFunction) { - asMutable(returnType)->ty.emplace(expectedFunction->retTypes); + emplaceTypePack(asMutable(returnType), expectedFunction->retTypes); } // TODO: Preserve argument names in the function's type. @@ -3386,7 +3391,7 @@ void ConstraintGenerator::fillInInferredBindings(const ScopePtr& globalScope, As std::move(tys), {}, }, - globalScope, Location{}); + globalScope, location); scope->bindings[symbol] = Binding{ty, location}; } diff --git a/Analysis/src/ConstraintSolver.cpp b/Analysis/src/ConstraintSolver.cpp index e274047e..6a9dd031 100644 --- a/Analysis/src/ConstraintSolver.cpp +++ b/Analysis/src/ConstraintSolver.cpp @@ -591,7 +591,7 @@ bool ConstraintSolver::tryDispatch(const GeneralizationConstraint& c, NotNulllocation); - asMutable(c.generalizedType)->ty.emplace(builtinTypes->errorRecoveryType()); + emplaceType(asMutable(c.generalizedType), builtinTypes->errorType); } unblock(c.generalizedType, constraint->location); @@ -771,7 +771,7 @@ bool ConstraintSolver::tryDispatch(const TypeAliasExpansionConstraint& c, NotNul auto bindResult = [this, &c, constraint](TypeId result) { LUAU_ASSERT(get(c.target)); - asMutable(c.target)->ty.emplace(result); + emplaceType(asMutable(c.target), result); unblock(c.target, constraint->location); }; @@ -953,7 +953,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull(fn)) { - asMutable(c.result)->ty.emplace(builtinTypes->errorTypePack); + emplaceTypePack(asMutable(c.result), builtinTypes->errorTypePack); unblock(c.result, constraint->location); return true; @@ -961,7 +961,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull(fn)) { - asMutable(c.result)->ty.emplace(builtinTypes->neverTypePack); + emplaceTypePack(asMutable(c.result), builtinTypes->neverTypePack); unblock(c.result, constraint->location); return true; } @@ -1019,7 +1019,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNulladdTypePack(TypePack{std::move(argsHead), argsTail}); fn = follow(*callMm); - asMutable(c.result)->ty.emplace(constraint->scope); + emplaceTypePack(asMutable(c.result), constraint->scope); } else { @@ -1036,7 +1036,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNullty.emplace(constraint->scope); + emplaceTypePack(asMutable(c.result), constraint->scope); } for (std::optional ty : c.discriminantTypes) @@ -1079,7 +1079,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNullty.emplace(result); + emplaceTypePack(asMutable(c.result), result); } for (const auto& [expanded, additions] : u2.expandedFreeTypes) @@ -1190,7 +1190,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCheckConstraint& c, NotNullargs.data[j]->annotation && get(follow(lambdaArgTys[j]))) { - asMutable(lambdaArgTys[j])->ty.emplace(expectedLambdaArgTys[j]); + emplaceType(asMutable(lambdaArgTys[j]), expectedLambdaArgTys[j]); } } } @@ -1242,7 +1242,7 @@ bool ConstraintSolver::tryDispatch(const PrimitiveTypeConstraint& c, NotNulllowerBound; - asMutable(c.freeType)->ty.emplace(bindTo); + emplaceType(asMutable(c.freeType), bindTo); return true; } @@ -1442,7 +1442,7 @@ bool ConstraintSolver::tryDispatchHasIndexer( if (auto ft = get(subjectType)) { FreeType freeResult{ft->scope, builtinTypes->neverType, builtinTypes->unknownType}; - asMutable(resultType)->ty.emplace(freeResult); + emplaceType(asMutable(resultType), freeResult); TypeId upperBound = arena->addType(TableType{/* props */ {}, TableIndexer{indexType, resultType}, TypeLevel{}, TableState::Unsealed}); @@ -1464,7 +1464,7 @@ bool ConstraintSolver::tryDispatchHasIndexer( // FIXME this is greedy. FreeType freeResult{tt->scope, builtinTypes->neverType, builtinTypes->unknownType}; - asMutable(resultType)->ty.emplace(freeResult); + emplaceType(asMutable(resultType), freeResult); tt->indexer = TableIndexer{indexType, resultType}; return true; @@ -1521,7 +1521,7 @@ bool ConstraintSolver::tryDispatchHasIndexer( else if (1 == results.size()) bindBlockedType(resultType, *results.begin(), subjectType, constraint); else - asMutable(resultType)->ty.emplace(std::vector(results.begin(), results.end())); + emplaceType(asMutable(resultType), std::vector(results.begin(), results.end())); return true; } @@ -1549,11 +1549,11 @@ bool ConstraintSolver::tryDispatchHasIndexer( } if (0 == results.size()) - asMutable(resultType)->ty.emplace(builtinTypes->errorType); + emplaceType(asMutable(resultType), builtinTypes->errorType); else if (1 == results.size()) - asMutable(resultType)->ty.emplace(*results.begin()); + emplaceType(asMutable(resultType), *results.begin()); else - asMutable(resultType)->ty.emplace(std::vector(results.begin(), results.end())); + emplaceType(asMutable(resultType), std::vector(results.begin(), results.end())); return true; } @@ -1619,6 +1619,11 @@ std::pair> ConstraintSolver::tryDispatchSetIndexer( { if (tt->indexer) { + if (isBlocked(tt->indexer->indexType)) + return {block(tt->indexer->indexType, constraint), std::nullopt}; + else if (isBlocked(tt->indexer->indexResultType)) + return {block(tt->indexer->indexResultType, constraint), std::nullopt}; + unify(constraint, indexType, tt->indexer->indexType); return {true, tt->indexer->indexResultType}; } @@ -1713,7 +1718,7 @@ bool ConstraintSolver::tryDispatchUnpack1(NotNull constraint, --lt->blockCount; if (0 == lt->blockCount) - asMutable(ty)->ty.emplace(lt->domain); + emplaceType(asMutable(ty), lt->domain); }; if (auto ut = get(resultTy)) @@ -1729,7 +1734,7 @@ bool ConstraintSolver::tryDispatchUnpack1(NotNull constraint, // constitute any meaningful constraint, so we replace it // with a free type. TypeId f = freshType(arena, builtinTypes, constraint->scope); - asMutable(resultTy)->ty.emplace(f); + emplaceType(asMutable(resultTy), f); } else bindBlockedType(resultTy, srcTy, srcTy, constraint); @@ -1756,7 +1761,7 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNullty.emplace(sourcePack); + emplaceTypePack(asMutable(resultPack), sourcePack); unblock(resultPack, constraint->location); return true; } @@ -1795,11 +1800,11 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNullblockCount; if (0 == lt->blockCount) - asMutable(resultTy)->ty.emplace(lt->domain); + emplaceType(asMutable(resultTy), lt->domain); } else if (get(resultTy) || get(resultTy)) { - asMutable(resultTy)->ty.emplace(builtinTypes->nilType); + emplaceType(asMutable(resultTy), builtinTypes->nilType); unblock(resultTy, constraint->location); } @@ -1974,7 +1979,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl LUAU_ASSERT(0 <= lt->blockCount); if (0 == lt->blockCount) - asMutable(ty)->ty.emplace(lt->domain); + emplaceType(asMutable(ty), lt->domain); } } } @@ -2032,11 +2037,8 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl } else if (auto iteratorMetatable = get(iteratorTy)) { - TypeId metaTy = follow(iteratorMetatable->metatable); - if (get(metaTy)) - return block_(metaTy); - - LUAU_ASSERT(false); + // If the metatable does not contain a `__iter` metamethod, then we iterate over the table part of the metatable. + return tryDispatchIterableTable(iteratorMetatable->table, c, constraint, force); } else if (auto primitiveTy = get(iteratorTy); primitiveTy && primitiveTy->type == PrimitiveType::Type::Table) unpack(builtinTypes->unknownType); @@ -2391,10 +2393,10 @@ void ConstraintSolver::bindBlockedType(TypeId blockedTy, TypeId resultTy, TypeId LUAU_ASSERT(freeScope); - asMutable(blockedTy)->ty.emplace(arena->freshType(freeScope)); + emplaceType(asMutable(blockedTy), arena->freshType(freeScope)); } else - asMutable(blockedTy)->ty.emplace(resultTy); + emplaceType(asMutable(blockedTy), resultTy); } bool ConstraintSolver::block_(BlockedConstraintId target, NotNull constraint) diff --git a/Analysis/src/Instantiation2.cpp b/Analysis/src/Instantiation2.cpp index d951a5b0..36e5bde6 100644 --- a/Analysis/src/Instantiation2.cpp +++ b/Analysis/src/Instantiation2.cpp @@ -23,7 +23,7 @@ bool Instantiation2::isDirty(TypePackId tp) TypeId Instantiation2::clean(TypeId ty) { - TypeId substTy = genericSubstitutions[ty]; + TypeId substTy = follow(genericSubstitutions[ty]); const FreeType* ft = get(substTy); // violation of the substitution invariant if this is not a free type. diff --git a/Analysis/src/NonStrictTypeChecker.cpp b/Analysis/src/NonStrictTypeChecker.cpp index 18a0d6f7..2a1b3cc4 100644 --- a/Analysis/src/NonStrictTypeChecker.cpp +++ b/Analysis/src/NonStrictTypeChecker.cpp @@ -191,9 +191,9 @@ struct NonStrictTypeChecker TypeId result = arena->addType(FreeType{ftp->scope}); TypePackId freeTail = arena->addTypePack(FreeTypePack{ftp->scope}); - TypePack& resultPack = asMutable(pack)->ty.emplace(); - resultPack.head.assign(1, result); - resultPack.tail = freeTail; + TypePack* resultPack = emplaceTypePack(asMutable(pack)); + resultPack->head.assign(1, result); + resultPack->tail = freeTail; return result; } @@ -560,8 +560,19 @@ struct NonStrictTypeChecker // We will compare arg and ~number AstExpr* arg = call->args.data[i]; TypeId expectedArgType = argTypes[i]; + std::shared_ptr norm = normalizer.normalize(expectedArgType); DefId def = dfg->getDef(arg); - TypeId runTimeErrorTy = getOrCreateNegation(expectedArgType); + TypeId runTimeErrorTy; + // If we're dealing with any, negating any will cause all subtype tests to fail, since ~any is any + // However, when someone calls this function, they're going to want to be able to pass it anything, + // for that reason, we manually inject never into the context so that the runtime test will always pass. + if (!norm) + reportError(NormalizationTooComplex{}, arg->location); + + if (norm && get(norm->tops)) + runTimeErrorTy = builtinTypes->neverType; + else + runTimeErrorTy = getOrCreateNegation(expectedArgType); fresh.addContext(def, runTimeErrorTy); } diff --git a/Analysis/src/TableLiteralInference.cpp b/Analysis/src/TableLiteralInference.cpp index 9a7dce3d..414544b6 100644 --- a/Analysis/src/TableLiteralInference.cpp +++ b/Analysis/src/TableLiteralInference.cpp @@ -148,7 +148,7 @@ TypeId matchLiteralType(NotNull> astTypes, Relation upperBoundRelation = relate(ft->upperBound, expectedType); if (upperBoundRelation == Relation::Subset || upperBoundRelation == Relation::Coincident) { - asMutable(exprType)->ty.emplace(expectedType); + emplaceType(asMutable(exprType), expectedType); return exprType; } @@ -158,7 +158,7 @@ TypeId matchLiteralType(NotNull> astTypes, Relation lowerBoundRelation = relate(ft->lowerBound, expectedType); if (lowerBoundRelation == Relation::Subset || lowerBoundRelation == Relation::Coincident) { - asMutable(exprType)->ty.emplace(expectedType); + emplaceType(asMutable(exprType), expectedType); return exprType; } } @@ -173,7 +173,7 @@ TypeId matchLiteralType(NotNull> astTypes, Relation upperBoundRelation = relate(ft->upperBound, expectedType); if (upperBoundRelation == Relation::Subset || upperBoundRelation == Relation::Coincident) { - asMutable(exprType)->ty.emplace(expectedType); + emplaceType(asMutable(exprType), expectedType); return exprType; } @@ -183,7 +183,7 @@ TypeId matchLiteralType(NotNull> astTypes, Relation lowerBoundRelation = relate(ft->lowerBound, expectedType); if (lowerBoundRelation == Relation::Subset || lowerBoundRelation == Relation::Coincident) { - asMutable(exprType)->ty.emplace(expectedType); + emplaceType(asMutable(exprType), expectedType); return exprType; } } @@ -193,7 +193,7 @@ TypeId matchLiteralType(NotNull> astTypes, { if (auto ft = get(exprType); ft && fastIsSubtype(ft->upperBound, expectedType)) { - asMutable(exprType)->ty.emplace(expectedType); + emplaceType(asMutable(exprType), expectedType); return exprType; } diff --git a/Analysis/src/ToString.cpp b/Analysis/src/ToString.cpp index 62d2a6f3..cb6b2f4a 100644 --- a/Analysis/src/ToString.cpp +++ b/Analysis/src/ToString.cpp @@ -20,8 +20,7 @@ #include LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) -LUAU_FASTFLAGVARIABLE(LuauToStringSimpleCompositeTypesSingleLine, false) -LUAU_FASTFLAGVARIABLE(LuauStringifyCyclesRootedAtPacks, false) +LUAU_FASTFLAGVARIABLE(LuauToStringiteTypesSingleLine, false) /* * Enables increasing levels of verbosity for Luau type names when stringifying. @@ -1492,21 +1491,10 @@ ToStringResult toStringDetailed(TypePackId tp, ToStringOptions& opts) else tvs.stringify(tp); - if (FFlag::LuauStringifyCyclesRootedAtPacks) + if (!cycles.empty() || !cycleTPs.empty()) { - if (!cycles.empty() || !cycleTPs.empty()) - { - result.cycle = true; - state.emit(" where "); - } - } - else - { - if (!cycles.empty()) - { - result.cycle = true; - state.emit(" where "); - } + result.cycle = true; + state.emit(" where "); } state.exhaustive = true; @@ -1533,30 +1521,27 @@ ToStringResult toStringDetailed(TypePackId tp, ToStringOptions& opts) semi = true; } - if (FFlag::LuauStringifyCyclesRootedAtPacks) + std::vector> sortedCycleTpNames{state.cycleTpNames.begin(), state.cycleTpNames.end()}; + std::sort(sortedCycleTpNames.begin(), sortedCycleTpNames.end(), [](const auto& a, const auto& b) { + return a.second < b.second; + }); + + TypePackStringifier tps{tvs.state}; + + for (const auto& [cycleTp, name] : sortedCycleTpNames) { - std::vector> sortedCycleTpNames{state.cycleTpNames.begin(), state.cycleTpNames.end()}; - std::sort(sortedCycleTpNames.begin(), sortedCycleTpNames.end(), [](const auto& a, const auto& b) { - return a.second < b.second; - }); + if (semi) + state.emit(" ; "); - TypePackStringifier tps{tvs.state}; + state.emit(name); + state.emit(" = "); + Luau::visit( + [&tps, cycleTp = cycleTp](auto t) { + return tps(cycleTp, t); + }, + cycleTp->ty); - for (const auto& [cycleTp, name] : sortedCycleTpNames) - { - if (semi) - state.emit(" ; "); - - state.emit(name); - state.emit(" = "); - Luau::visit( - [&tps, cycleTp = cycleTp](auto t) { - return tps(cycleTp, t); - }, - cycleTp->ty); - - semi = true; - } + semi = true; } if (opts.maxTypeLength > 0 && result.name.length() > opts.maxTypeLength) diff --git a/Analysis/src/Type.cpp b/Analysis/src/Type.cpp index e82b3d5c..b7a54e3d 100644 --- a/Analysis/src/Type.cpp +++ b/Analysis/src/Type.cpp @@ -1324,4 +1324,11 @@ bool GenericTypePackDefinition::operator==(const GenericTypePackDefinition& rhs) return tp == rhs.tp && defaultValue == rhs.defaultValue; } +template<> +LUAU_NOINLINE Unifiable::Bound* emplaceType(Type* ty, TypeId& tyArg) +{ + LUAU_ASSERT(ty != follow(tyArg)); + return &ty->ty.emplace(tyArg); +} + } // namespace Luau diff --git a/Analysis/src/TypeChecker2.cpp b/Analysis/src/TypeChecker2.cpp index 0442836f..a888564e 100644 --- a/Analysis/src/TypeChecker2.cpp +++ b/Analysis/src/TypeChecker2.cpp @@ -1016,9 +1016,9 @@ struct TypeChecker2 reportError(UnificationTooComplex{}, forInStatement->values.data[0]->location); } } - else if (iteratorNorm && iteratorNorm->hasTopTable()) + else if (iteratorNorm && iteratorNorm->hasTables()) { - // nothing + // Ok. All tables can be iterated. } else if (!iteratorNorm || !iteratorNorm->shouldSuppressErrors()) { @@ -2045,25 +2045,28 @@ struct TypeChecker2 case AstExprBinary::Op::CompareLt: { if (normLeft && normLeft->shouldSuppressErrors()) - return builtinTypes->numberType; + return builtinTypes->booleanType; + + // if we're comparing against an uninhabited type, it's unobservable that the comparison did not run + if (normLeft && normalizer.isInhabited(normLeft.get()) == NormalizationResult::False) + return builtinTypes->booleanType; if (normLeft && normLeft->isExactlyNumber()) { testIsSubtype(rightType, builtinTypes->numberType, expr->right->location); - return builtinTypes->numberType; + return builtinTypes->booleanType; } - else if (normLeft && normLeft->isSubtypeOfString()) + + if (normLeft && normLeft->isSubtypeOfString()) { testIsSubtype(rightType, builtinTypes->stringType, expr->right->location); - return builtinTypes->stringType; - } - else - { - reportError(GenericError{format("Types '%s' and '%s' cannot be compared with relational operator %s", toString(leftType).c_str(), - toString(rightType).c_str(), toString(expr->op).c_str())}, - expr->location); - return builtinTypes->errorRecoveryType(); + return builtinTypes->booleanType; } + + reportError(GenericError{format("Types '%s' and '%s' cannot be compared with relational operator %s", toString(leftType).c_str(), + toString(rightType).c_str(), toString(expr->op).c_str())}, + expr->location); + return builtinTypes->errorRecoveryType(); } case AstExprBinary::Op::And: @@ -2144,9 +2147,9 @@ struct TypeChecker2 TypeId result = module->internalTypes.addType(FreeType{ftp->scope}); TypePackId freeTail = module->internalTypes.addTypePack(FreeTypePack{ftp->scope}); - TypePack& resultPack = asMutable(pack)->ty.emplace(); - resultPack.head.assign(1, result); - resultPack.tail = freeTail; + TypePack* resultPack = emplaceTypePack(asMutable(pack)); + resultPack->head.assign(1, result); + resultPack->tail = freeTail; return result; } diff --git a/Analysis/src/TypeFamily.cpp b/Analysis/src/TypeFamily.cpp index 2dc0ff67..a685c216 100644 --- a/Analysis/src/TypeFamily.cpp +++ b/Analysis/src/TypeFamily.cpp @@ -1069,15 +1069,15 @@ static TypeFamilyReductionResult comparisonFamilyFn(TypeId instance, Not // lt implies t is number // lt implies t is number if (lhsFree && isNumber(rhsTy)) - asMutable(lhsTy)->ty.emplace(ctx->builtins->numberType); + emplaceType(asMutable(lhsTy), ctx->builtins->numberType); else if (rhsFree && isNumber(lhsTy)) - asMutable(rhsTy)->ty.emplace(ctx->builtins->numberType); - else if (lhsFree && get(rhsTy) == nullptr) + emplaceType(asMutable(rhsTy), ctx->builtins->numberType); + else if (lhsFree && ctx->normalizer->isInhabited(rhsTy) != NormalizationResult::False) { auto c1 = ctx->pushConstraint(EqualityConstraint{lhsTy, rhsTy}); const_cast(ctx->constraint)->dependencies.emplace_back(c1); } - else if (rhsFree && get(lhsTy) == nullptr) + else if (rhsFree && ctx->normalizer->isInhabited(lhsTy) != NormalizationResult::False) { auto c1 = ctx->pushConstraint(EqualityConstraint{rhsTy, lhsTy}); const_cast(ctx->constraint)->dependencies.emplace_back(c1); diff --git a/Analysis/src/TypePack.cpp b/Analysis/src/TypePack.cpp index 94d8783b..d94031ba 100644 --- a/Analysis/src/TypePack.cpp +++ b/Analysis/src/TypePack.cpp @@ -465,4 +465,11 @@ bool containsNever(TypePackId tp) return false; } +template<> +LUAU_NOINLINE Unifiable::Bound* emplaceTypePack(TypePackVar* ty, TypePackId& tyArg) +{ + LUAU_ASSERT(ty != follow(tyArg)); + return &ty->ty.emplace(tyArg); +} + } // namespace Luau diff --git a/Analysis/src/Unifier2.cpp b/Analysis/src/Unifier2.cpp index 62d051d6..34fc6ee9 100644 --- a/Analysis/src/Unifier2.cpp +++ b/Analysis/src/Unifier2.cpp @@ -145,33 +145,16 @@ bool Unifier2::unify(TypeId subTy, TypeId superTy) FreeType* subFree = getMutable(subTy); FreeType* superFree = getMutable(superTy); - if (subFree && superFree) - { - DenseHashSet seen{nullptr}; - if (OccursCheckResult::Fail == occursCheck(seen, subTy, superTy)) - { - asMutable(subTy)->ty.emplace(builtinTypes->errorRecoveryType()); - return false; - } - else if (OccursCheckResult::Fail == occursCheck(seen, superTy, subTy)) - { - asMutable(subTy)->ty.emplace(builtinTypes->errorRecoveryType()); - return false; - } - - superFree->lowerBound = mkUnion(subFree->lowerBound, superFree->lowerBound); - superFree->upperBound = mkIntersection(subFree->upperBound, superFree->upperBound); - asMutable(subTy)->ty.emplace(superTy); - } - else if (subFree) - { - return unifyFreeWithType(subTy, superTy); - } - else if (superFree) + if (superFree) { superFree->lowerBound = mkUnion(superFree->lowerBound, subTy); } + if (subFree) + { + return unifyFreeWithType(subTy, superTy); + } + if (subFree || superFree) return true; @@ -516,11 +499,11 @@ bool Unifier2::unify(TypePackId subTp, TypePackId superTp) DenseHashSet seen{nullptr}; if (OccursCheckResult::Fail == occursCheck(seen, subTp, superTp)) { - asMutable(subTp)->ty.emplace(builtinTypes->errorRecoveryTypePack()); + emplaceTypePack(asMutable(subTp), builtinTypes->errorTypePack); return false; } - asMutable(subTp)->ty.emplace(superTp); + emplaceTypePack(asMutable(subTp), superTp); return true; } @@ -529,11 +512,11 @@ bool Unifier2::unify(TypePackId subTp, TypePackId superTp) DenseHashSet seen{nullptr}; if (OccursCheckResult::Fail == occursCheck(seen, superTp, subTp)) { - asMutable(superTp)->ty.emplace(builtinTypes->errorRecoveryTypePack()); + emplaceTypePack(asMutable(superTp), builtinTypes->errorTypePack); return false; } - asMutable(superTp)->ty.emplace(subTp); + emplaceTypePack(asMutable(superTp), subTp); return true; } @@ -567,13 +550,13 @@ bool Unifier2::unify(TypePackId subTp, TypePackId superTp) { TypePackId followedSubTail = follow(*subTail); if (get(followedSubTail)) - asMutable(followedSubTail)->ty.emplace(builtinTypes->emptyTypePack); + emplaceTypePack(asMutable(followedSubTail), builtinTypes->emptyTypePack); } else if (superTail) { TypePackId followedSuperTail = follow(*superTail); if (get(followedSuperTail)) - asMutable(followedSuperTail)->ty.emplace(builtinTypes->emptyTypePack); + emplaceTypePack(asMutable(followedSuperTail), builtinTypes->emptyTypePack); } return true; @@ -759,28 +742,90 @@ struct MutatingGeneralizer : TypeOnceVisitor return; seen.insert(haystack); - std::vector* parts = nullptr; if (UnionType* ut = getMutable(haystack)) - parts = &ut->options; - else if (IntersectionType* it = getMutable(needle)) - parts = &it->parts; - else - return; - - LUAU_ASSERT(parts); - - for (TypeId& option : *parts) { - // FIXME: I bet this function has reentrancy problems - option = follow(option); - if (option == needle) - option = replacement; + for (auto iter = ut->options.begin(); iter != ut->options.end();) + { + // FIXME: I bet this function has reentrancy problems + TypeId option = follow(*iter); - // TODO seen set - else if (get(option)) - replace(seen, option, needle, haystack); - else if (get(option)) - replace(seen, option, needle, haystack); + if (option == needle && get(replacement)) + { + iter = ut->options.erase(iter); + continue; + } + + if (option == needle) + { + *iter = replacement; + iter++; + continue; + } + + // advance the iterator, nothing after this can use it. + iter++; + + if (seen.find(option)) + continue; + seen.insert(option); + + if (get(option)) + replace(seen, option, needle, haystack); + else if (get(option)) + replace(seen, option, needle, haystack); + } + + if (ut->options.size() == 1) + { + TypeId onlyType = ut->options[0]; + LUAU_ASSERT(onlyType != haystack); + emplaceType(asMutable(haystack), onlyType); + } + + return; + } + + if (IntersectionType* it = getMutable(needle)) + { + for (auto iter = it->parts.begin(); iter != it->parts.end();) + { + // FIXME: I bet this function has reentrancy problems + TypeId part = follow(*iter); + + if (part == needle && get(replacement)) + { + iter = it->parts.erase(iter); + continue; + } + + if (part == needle) + { + *iter = replacement; + iter++; + continue; + } + + // advance the iterator, nothing after this can use it. + iter++; + + if (seen.find(part)) + continue; + seen.insert(part); + + if (get(part)) + replace(seen, part, needle, haystack); + else if (get(part)) + replace(seen, part, needle, haystack); + } + + if (it->parts.size() == 1) + { + TypeId onlyType = it->parts[0]; + LUAU_ASSERT(onlyType != needle); + emplaceType(asMutable(needle), onlyType); + } + + return; } } @@ -807,7 +852,7 @@ struct MutatingGeneralizer : TypeOnceVisitor traverse(ft->upperBound); // It is possible for the above traverse() calls to cause ty to be - // transmuted. We must reaquire ft if this happens. + // transmuted. We must reacquire ft if this happens. ty = follow(ty); ft = get(ty); if (!ft) @@ -852,7 +897,17 @@ struct MutatingGeneralizer : TypeOnceVisitor DenseHashSet replaceSeen{nullptr}; replace(replaceSeen, lb, ty, builtinTypes->unknownType); } - emplaceType(asMutable(ty), lb); + + if (lb != ty) + emplaceType(asMutable(ty), lb); + else if (!isWithinFunction || (positiveCount + negativeCount == 1)) + emplaceType(asMutable(ty), builtinTypes->unknownType); + else + { + // if the lower bound is the type in question, we don't actually have a lower bound. + emplaceType(asMutable(ty), scope); + generics.push_back(ty); + } } else { @@ -864,7 +919,17 @@ struct MutatingGeneralizer : TypeOnceVisitor DenseHashSet replaceSeen{nullptr}; replace(replaceSeen, ub, ty, builtinTypes->neverType); } - emplaceType(asMutable(ty), ub); + + if (ub != ty) + emplaceType(asMutable(ty), ub); + else if (!isWithinFunction || (positiveCount + negativeCount == 1)) + emplaceType(asMutable(ty), builtinTypes->unknownType); + else + { + // if the upper bound is the type in question, we don't actually have an upper bound. + emplaceType(asMutable(ty), scope); + generics.push_back(ty); + } } return false; @@ -909,10 +974,10 @@ struct MutatingGeneralizer : TypeOnceVisitor const size_t negativeCount = getCount(negativeTypes, tp); if (1 == positiveCount + negativeCount) - asMutable(tp)->ty.emplace(builtinTypes->unknownTypePack); + emplaceTypePack(asMutable(tp), builtinTypes->unknownTypePack); else { - asMutable(tp)->ty.emplace(scope); + emplaceTypePack(asMutable(tp), scope); genericPacks.push_back(tp); } diff --git a/CLI/Bytecode.cpp b/CLI/Bytecode.cpp index 76faa6fe..02ef3237 100644 --- a/CLI/Bytecode.cpp +++ b/CLI/Bytecode.cpp @@ -25,6 +25,7 @@ static Luau::CompileOptions copts() Luau::CompileOptions result = {}; result.optimizationLevel = globalOptions.optimizationLevel; result.debugLevel = globalOptions.debugLevel; + result.typeInfoLevel = 1; return result; } diff --git a/CLI/Compile.cpp b/CLI/Compile.cpp index ee253d39..44a6ef77 100644 --- a/CLI/Compile.cpp +++ b/CLI/Compile.cpp @@ -45,6 +45,7 @@ struct GlobalOptions { int optimizationLevel = 1; int debugLevel = 1; + int typeInfoLevel = 0; const char* vectorLib = nullptr; const char* vectorCtor = nullptr; @@ -56,6 +57,7 @@ static Luau::CompileOptions copts() Luau::CompileOptions result = {}; result.optimizationLevel = globalOptions.optimizationLevel; result.debugLevel = globalOptions.debugLevel; + result.typeInfoLevel = globalOptions.typeInfoLevel; result.vectorLib = globalOptions.vectorLib; result.vectorCtor = globalOptions.vectorCtor; @@ -324,7 +326,7 @@ static bool compileFile(const char* name, CompileFormat format, Luau::CodeGen::A if (format == CompileFormat::Text) { bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Code | Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Locals | - Luau::BytecodeBuilder::Dump_Remarks); + Luau::BytecodeBuilder::Dump_Remarks | Luau::BytecodeBuilder::Dump_Types); bcb.setDumpSource(*source); } else if (format == CompileFormat::Remarks) @@ -487,6 +489,16 @@ int main(int argc, char** argv) } globalOptions.debugLevel = level; } + else if (strncmp(argv[i], "-t", 2) == 0) + { + int level = atoi(argv[i] + 2); + if (level < 0 || level > 1) + { + fprintf(stderr, "Error: Type info level must be between 0 and 1 inclusive.\n"); + return 1; + } + globalOptions.typeInfoLevel = level; + } else if (strncmp(argv[i], "--target=", 9) == 0) { const char* value = argv[i] + 9; diff --git a/CLI/Repl.cpp b/CLI/Repl.cpp index 7eb7e181..d92dcd3e 100644 --- a/CLI/Repl.cpp +++ b/CLI/Repl.cpp @@ -88,6 +88,7 @@ static Luau::CompileOptions copts() Luau::CompileOptions result = {}; result.optimizationLevel = globalOptions.optimizationLevel; result.debugLevel = globalOptions.debugLevel; + result.typeInfoLevel = 1; result.coverageLevel = coverageActive() ? 2 : 0; return result; diff --git a/CodeGen/include/Luau/BytecodeAnalysis.h b/CodeGen/include/Luau/BytecodeAnalysis.h index c75dc935..11af90da 100644 --- a/CodeGen/include/Luau/BytecodeAnalysis.h +++ b/CodeGen/include/Luau/BytecodeAnalysis.h @@ -14,6 +14,7 @@ namespace CodeGen struct IrFunction; +void loadBytecodeTypeInfo(IrFunction& function); void buildBytecodeBlocks(IrFunction& function, const std::vector& jumpTargets); void analyzeBytecodeTypes(IrFunction& function); diff --git a/CodeGen/include/Luau/CodeGen.h b/CodeGen/include/Luau/CodeGen.h index 22dd000c..9765035b 100644 --- a/CodeGen/include/Luau/CodeGen.h +++ b/CodeGen/include/Luau/CodeGen.h @@ -114,7 +114,6 @@ void setNativeExecutionEnabled(lua_State* L, bool enabled); using ModuleId = std::array; // Builds target function and all inner functions -CodeGenCompilationResult compile_DEPRECATED(lua_State* L, int idx, unsigned int flags = 0, CompilationStats* stats = nullptr); CompilationResult compile(lua_State* L, int idx, unsigned int flags = 0, CompilationStats* stats = nullptr); CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags = 0, CompilationStats* stats = nullptr); @@ -168,6 +167,7 @@ struct AssemblyOptions bool includeAssembly = false; bool includeIr = false; bool includeOutlinedCode = false; + bool includeIrTypes = false; IncludeIrPrefix includeIrPrefix = IncludeIrPrefix::Yes; IncludeUseInfo includeUseInfo = IncludeUseInfo::Yes; diff --git a/CodeGen/include/Luau/IrData.h b/CodeGen/include/Luau/IrData.h index 7cefec7b..b00fffab 100644 --- a/CodeGen/include/Luau/IrData.h +++ b/CodeGen/include/Luau/IrData.h @@ -977,6 +977,25 @@ struct BytecodeTypes uint8_t c = LBC_TYPE_ANY; }; +struct BytecodeRegTypeInfo +{ + uint8_t type = LBC_TYPE_ANY; + uint8_t reg = 0; // Register slot where variable is stored + int startpc = 0; // First point where variable is alive (could be before variable has been assigned a value) + int endpc = 0; // First point where variable is dead +}; + +struct BytecodeTypeInfo +{ + std::vector argumentTypes; + std::vector regTypes; + std::vector upvalueTypes; + + // Offsets into regTypes for each individual register + // One extra element at the end contains the vector size for easier arr[Rn], arr[Rn + 1] range access + std::vector regTypeOffsets; +}; + struct IrFunction { std::vector blocks; @@ -994,6 +1013,8 @@ struct IrFunction std::vector valueRestoreOps; std::vector validRestoreOpBlocks; + BytecodeTypeInfo bcTypeInfo; + Proto* proto = nullptr; bool variadic = false; diff --git a/CodeGen/include/Luau/IrDump.h b/CodeGen/include/Luau/IrDump.h index ab7f154d..dcca3c7b 100644 --- a/CodeGen/include/Luau/IrDump.h +++ b/CodeGen/include/Luau/IrDump.h @@ -30,6 +30,9 @@ void toString(IrToStringContext& ctx, const IrBlock& block, uint32_t index); // void toString(IrToStringContext& ctx, IrOp op); void toString(std::string& result, IrConst constant); + +const char* getBytecodeTypeName(uint8_t type); + void toString(std::string& result, const BytecodeTypes& bcTypes); void toStringDetailed( diff --git a/CodeGen/src/BytecodeAnalysis.cpp b/CodeGen/src/BytecodeAnalysis.cpp index bc9bd23a..e327d94e 100644 --- a/CodeGen/src/BytecodeAnalysis.cpp +++ b/CodeGen/src/BytecodeAnalysis.cpp @@ -8,6 +8,9 @@ #include "lobject.h" LUAU_FASTFLAG(LuauCodegenDirectUserdataFlow) +LUAU_FASTFLAG(LuauLoadTypeInfo) // Because new VM typeinfo load changes the format used by Codegen, same flag is used +LUAU_FASTFLAGVARIABLE(LuauCodegenTypeInfo, false) // New analysis is flagged separately +LUAU_FASTFLAG(LuauTypeInfoLookupImprovement) namespace Luau { @@ -16,9 +19,227 @@ namespace CodeGen static bool hasTypedParameters(Proto* proto) { + CODEGEN_ASSERT(!FFlag::LuauLoadTypeInfo); + return proto->typeinfo && proto->numparams != 0; } +template +static T read(uint8_t* data, size_t& offset) +{ + CODEGEN_ASSERT(FFlag::LuauLoadTypeInfo); + + T result; + memcpy(&result, data + offset, sizeof(T)); + offset += sizeof(T); + + return result; +} + +static uint32_t readVarInt(uint8_t* data, size_t& offset) +{ + CODEGEN_ASSERT(FFlag::LuauLoadTypeInfo); + + uint32_t result = 0; + uint32_t shift = 0; + + uint8_t byte; + + do + { + byte = read(data, offset); + result |= (byte & 127) << shift; + shift += 7; + } while (byte & 128); + + return result; +} + +void loadBytecodeTypeInfo(IrFunction& function) +{ + CODEGEN_ASSERT(FFlag::LuauLoadTypeInfo); + + Proto* proto = function.proto; + + if (FFlag::LuauTypeInfoLookupImprovement) + { + if (!proto) + return; + } + else + { + if (!proto || !proto->typeinfo) + return; + } + + BytecodeTypeInfo& typeInfo = function.bcTypeInfo; + + // If there is no typeinfo, we generate default values for arguments and upvalues + if (FFlag::LuauTypeInfoLookupImprovement && !proto->typeinfo) + { + typeInfo.argumentTypes.resize(proto->numparams, LBC_TYPE_ANY); + typeInfo.upvalueTypes.resize(proto->nups, LBC_TYPE_ANY); + return; + } + + uint8_t* data = proto->typeinfo; + size_t offset = 0; + + uint32_t typeSize = readVarInt(data, offset); + uint32_t upvalCount = readVarInt(data, offset); + uint32_t localCount = readVarInt(data, offset); + + CODEGEN_ASSERT(upvalCount == unsigned(proto->nups)); + + if (typeSize != 0) + { + uint8_t* types = (uint8_t*)data + offset; + + CODEGEN_ASSERT(typeSize == uint32_t(2 + proto->numparams)); + CODEGEN_ASSERT(types[0] == LBC_TYPE_FUNCTION); + CODEGEN_ASSERT(types[1] == proto->numparams); + + typeInfo.argumentTypes.resize(proto->numparams); + + // Skip two bytes of function type introduction + memcpy(typeInfo.argumentTypes.data(), types + 2, proto->numparams); + offset += typeSize; + } + + if (upvalCount != 0) + { + typeInfo.upvalueTypes.resize(upvalCount); + + uint8_t* types = (uint8_t*)data + offset; + memcpy(typeInfo.upvalueTypes.data(), types, upvalCount); + offset += upvalCount; + } + + if (localCount != 0) + { + typeInfo.regTypes.resize(localCount); + + for (uint32_t i = 0; i < localCount; i++) + { + BytecodeRegTypeInfo& info = typeInfo.regTypes[i]; + + info.type = read(data, offset); + info.reg = read(data, offset); + info.startpc = readVarInt(data, offset); + info.endpc = info.startpc + readVarInt(data, offset); + } + } + + CODEGEN_ASSERT(offset == size_t(proto->sizetypeinfo)); +} + +static void prepareRegTypeInfoLookups(BytecodeTypeInfo& typeInfo) +{ + CODEGEN_ASSERT(FFlag::LuauTypeInfoLookupImprovement); + + // Sort by register first, then by end PC + std::sort(typeInfo.regTypes.begin(), typeInfo.regTypes.end(), [](const BytecodeRegTypeInfo& a, const BytecodeRegTypeInfo& b) { + if (a.reg != b.reg) + return a.reg < b.reg; + + return a.endpc < b.endpc; + }); + + // Prepare data for all registers as 'regTypes' might be missing temporaries + typeInfo.regTypeOffsets.resize(256 + 1); + + for (size_t i = 0; i < typeInfo.regTypes.size(); i++) + { + const BytecodeRegTypeInfo& el = typeInfo.regTypes[i]; + + // Data is sorted by register order, so when we visit register Rn last time + // If means that register Rn+1 starts one after the slot where Rn ends + typeInfo.regTypeOffsets[el.reg + 1] = uint32_t(i + 1); + } + + // Fill in holes with the offset of the previous register + for (size_t i = 1; i < typeInfo.regTypeOffsets.size(); i++) + { + uint32_t& el = typeInfo.regTypeOffsets[i]; + + if (el == 0) + el = typeInfo.regTypeOffsets[i - 1]; + } +} + +static BytecodeRegTypeInfo* findRegType(BytecodeTypeInfo& info, uint8_t reg, int pc) +{ + CODEGEN_ASSERT(FFlag::LuauCodegenTypeInfo); + + if (FFlag::LuauTypeInfoLookupImprovement) + { + auto b = info.regTypes.begin() + info.regTypeOffsets[reg]; + auto e = info.regTypes.begin() + info.regTypeOffsets[reg + 1]; + + // Doen't have info + if (b == e) + return nullptr; + + // No info after the last live range + if (pc >= (e - 1)->endpc) + return nullptr; + + for (auto it = b; it != e; ++it) + { + CODEGEN_ASSERT(it->reg == reg); + + if (pc >= it->startpc && pc < it->endpc) + return &*it; + } + + return nullptr; + } + else + { + for (BytecodeRegTypeInfo& el : info.regTypes) + { + if (reg == el.reg && pc >= el.startpc && pc < el.endpc) + return ⪙ + } + + return nullptr; + } +} + +static void refineRegType(BytecodeTypeInfo& info, uint8_t reg, int pc, uint8_t ty) +{ + CODEGEN_ASSERT(FFlag::LuauCodegenTypeInfo); + + if (ty != LBC_TYPE_ANY) + { + if (BytecodeRegTypeInfo* regType = findRegType(info, reg, pc)) + { + // Right now, we only refine register types that were unknown + if (regType->type == LBC_TYPE_ANY) + regType->type = ty; + } + else if (FFlag::LuauTypeInfoLookupImprovement && reg < info.argumentTypes.size()) + { + if (info.argumentTypes[reg] == LBC_TYPE_ANY) + info.argumentTypes[reg] = ty; + } + } +} + +static void refineUpvalueType(BytecodeTypeInfo& info, int up, uint8_t ty) +{ + CODEGEN_ASSERT(FFlag::LuauCodegenTypeInfo); + + if (ty != LBC_TYPE_ANY) + { + if (size_t(up) < info.upvalueTypes.size()) + { + if (info.upvalueTypes[up] == LBC_TYPE_ANY) + info.upvalueTypes[up] = ty; + } + } +} + static uint8_t getBytecodeConstantTag(Proto* proto, unsigned ki) { TValue protok = proto->k[ki]; @@ -389,6 +610,11 @@ void analyzeBytecodeTypes(IrFunction& function) Proto* proto = function.proto; CODEGEN_ASSERT(proto); + BytecodeTypeInfo& bcTypeInfo = function.bcTypeInfo; + + if (FFlag::LuauTypeInfoLookupImprovement) + prepareRegTypeInfoLookups(bcTypeInfo); + // Setup our current knowledge of type tags based on arguments uint8_t regTags[256]; memset(regTags, LBC_TYPE_ANY, 256); @@ -403,16 +629,29 @@ void analyzeBytecodeTypes(IrFunction& function) // At the block start, reset or knowledge to the starting state // In the future we might be able to propagate some info between the blocks as well - if (hasTypedParameters(proto)) + if (FFlag::LuauLoadTypeInfo) { - for (int i = 0; i < proto->numparams; ++i) + for (size_t i = 0; i < bcTypeInfo.argumentTypes.size(); i++) { - uint8_t et = proto->typeinfo[2 + i]; + uint8_t et = bcTypeInfo.argumentTypes[i]; // TODO: if argument is optional, this might force a VM exit unnecessarily regTags[i] = et & ~LBC_TYPE_OPTIONAL_BIT; } } + else + { + if (hasTypedParameters(proto)) + { + for (int i = 0; i < proto->numparams; ++i) + { + uint8_t et = proto->typeinfo[2 + i]; + + // TODO: if argument is optional, this might force a VM exit unnecessarily + regTags[i] = et & ~LBC_TYPE_OPTIONAL_BIT; + } + } + } for (int i = proto->numparams; i < proto->maxstacksize; ++i) regTags[i] = LBC_TYPE_ANY; @@ -422,6 +661,18 @@ void analyzeBytecodeTypes(IrFunction& function) const Instruction* pc = &proto->code[i]; LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc)); + if (FFlag::LuauCodegenTypeInfo) + { + // Assign known register types from local type information + // TODO: this is an expensive walk for each instruction + // TODO: it's best to lookup when register is actually used in the instruction + for (BytecodeRegTypeInfo& el : bcTypeInfo.regTypes) + { + if (el.type != LBC_TYPE_ANY && i >= el.startpc && i < el.endpc) + regTags[el.reg] = el.type; + } + } + BytecodeTypes& bcType = function.bcTypes[i]; switch (op) @@ -440,6 +691,9 @@ void analyzeBytecodeTypes(IrFunction& function) int ra = LUAU_INSN_A(*pc); regTags[ra] = LBC_TYPE_BOOLEAN; bcType.result = regTags[ra]; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_LOADN: @@ -447,6 +701,9 @@ void analyzeBytecodeTypes(IrFunction& function) int ra = LUAU_INSN_A(*pc); regTags[ra] = LBC_TYPE_NUMBER; bcType.result = regTags[ra]; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_LOADK: @@ -456,6 +713,9 @@ void analyzeBytecodeTypes(IrFunction& function) bcType.a = getBytecodeConstantTag(proto, kb); regTags[ra] = bcType.a; bcType.result = regTags[ra]; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_LOADKX: @@ -465,6 +725,9 @@ void analyzeBytecodeTypes(IrFunction& function) bcType.a = getBytecodeConstantTag(proto, kb); regTags[ra] = bcType.a; bcType.result = regTags[ra]; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_MOVE: @@ -474,6 +737,9 @@ void analyzeBytecodeTypes(IrFunction& function) bcType.a = regTags[rb]; regTags[ra] = regTags[rb]; bcType.result = regTags[ra]; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_GETTABLE: @@ -767,6 +1033,9 @@ void analyzeBytecodeTypes(IrFunction& function) regTags[ra + 2] = bcType.b; regTags[ra + 3] = bcType.c; regTags[ra] = bcType.result; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_FASTCALL1: @@ -783,6 +1052,9 @@ void analyzeBytecodeTypes(IrFunction& function) regTags[LUAU_INSN_B(*pc)] = bcType.a; regTags[ra] = bcType.result; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_FASTCALL2: @@ -799,6 +1071,9 @@ void analyzeBytecodeTypes(IrFunction& function) regTags[LUAU_INSN_B(*pc)] = bcType.a; regTags[int(pc[1])] = bcType.b; regTags[ra] = bcType.result; + + if (FFlag::LuauCodegenTypeInfo) + refineRegType(bcTypeInfo, ra, i, bcType.result); break; } case LOP_FORNPREP: @@ -808,6 +1083,13 @@ void analyzeBytecodeTypes(IrFunction& function) regTags[ra] = LBC_TYPE_NUMBER; regTags[ra + 1] = LBC_TYPE_NUMBER; regTags[ra + 2] = LBC_TYPE_NUMBER; + + if (FFlag::LuauCodegenTypeInfo) + { + refineRegType(bcTypeInfo, ra, i, regTags[ra]); + refineRegType(bcTypeInfo, ra + 1, i, regTags[ra + 1]); + refineRegType(bcTypeInfo, ra + 2, i, regTags[ra + 2]); + } break; } case LOP_FORNLOOP: @@ -856,6 +1138,39 @@ void analyzeBytecodeTypes(IrFunction& function) } break; } + case LOP_GETUPVAL: + { + if (FFlag::LuauCodegenTypeInfo) + { + int ra = LUAU_INSN_A(*pc); + int up = LUAU_INSN_B(*pc); + + bcType.a = LBC_TYPE_ANY; + + if (size_t(up) < bcTypeInfo.upvalueTypes.size()) + { + uint8_t et = bcTypeInfo.upvalueTypes[up]; + + // TODO: if argument is optional, this might force a VM exit unnecessarily + bcType.a = et & ~LBC_TYPE_OPTIONAL_BIT; + } + + regTags[ra] = bcType.a; + bcType.result = regTags[ra]; + } + break; + } + case LOP_SETUPVAL: + { + if (FFlag::LuauCodegenTypeInfo) + { + int ra = LUAU_INSN_A(*pc); + int up = LUAU_INSN_B(*pc); + + refineUpvalueType(bcTypeInfo, up, regTags[ra]); + } + break; + } case LOP_GETGLOBAL: case LOP_SETGLOBAL: case LOP_CALL: @@ -876,8 +1191,6 @@ void analyzeBytecodeTypes(IrFunction& function) case LOP_JUMPXEQKN: case LOP_JUMPXEQKS: case LOP_SETLIST: - case LOP_GETUPVAL: - case LOP_SETUPVAL: case LOP_CLOSEUPVALS: case LOP_FORGLOOP: case LOP_FORGPREP_NEXT: diff --git a/CodeGen/src/CodeGen.cpp b/CodeGen/src/CodeGen.cpp index bdabfd1d..9ef9980a 100644 --- a/CodeGen/src/CodeGen.cpp +++ b/CodeGen/src/CodeGen.cpp @@ -44,7 +44,6 @@ LUAU_FASTFLAGVARIABLE(DebugCodegenNoOpt, false) LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false) LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false) -LUAU_FASTFLAGVARIABLE(LuauCodegenDetailedCompilationResult, false) // Per-module IR instruction count limit LUAU_FASTINTVARIABLE(CodegenHeuristicsInstructionLimit, 1'048'576) // 1 M @@ -445,148 +444,8 @@ void setNativeExecutionEnabled(lua_State* L, bool enabled) } } -CodeGenCompilationResult compile_DEPRECATED(lua_State* L, int idx, unsigned int flags, CompilationStats* stats) -{ - CODEGEN_ASSERT(!FFlag::LuauCodegenContext); - CODEGEN_ASSERT(!FFlag::LuauCodegenDetailedCompilationResult); - - CODEGEN_ASSERT(lua_isLfunction(L, idx)); - const TValue* func = luaA_toobject(L, idx); - - Proto* root = clvalue(func)->l.p; - - if ((flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0) - return CodeGenCompilationResult::NotNativeModule; - - // If initialization has failed, do not compile any functions - NativeState* data = getNativeState(L); - if (!data) - return CodeGenCompilationResult::CodeGenNotInitialized; - - std::vector protos; - gatherFunctions(protos, root, flags); - - // Skip protos that have been compiled during previous invocations of CodeGen::compile - protos.erase(std::remove_if(protos.begin(), protos.end(), - [](Proto* p) { - return p == nullptr || p->execdata != nullptr; - }), - protos.end()); - - if (protos.empty()) - return CodeGenCompilationResult::NothingToCompile; - - if (stats != nullptr) - stats->functionsTotal = uint32_t(protos.size()); - -#if defined(__aarch64__) - static unsigned int cpuFeatures = getCpuFeaturesA64(); - A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures); -#else - X64::AssemblyBuilderX64 build(/* logText= */ false); -#endif - - ModuleHelpers helpers; -#if defined(__aarch64__) - A64::assembleHelpers(build, helpers); -#else - X64::assembleHelpers(build, helpers); -#endif - - std::vector results; - results.reserve(protos.size()); - - uint32_t totalIrInstCount = 0; - - CodeGenCompilationResult codeGenCompilationResult = CodeGenCompilationResult::Success; - - for (Proto* p : protos) - { - // If compiling a proto fails, we want to propagate the failure via codeGenCompilationResult - // If multiple compilations fail, we only use the failure from the first unsuccessful compilation. - CodeGenCompilationResult temp = CodeGenCompilationResult::Success; - - if (std::optional np = createNativeFunction(build, helpers, p, totalIrInstCount, temp)) - results.push_back(*np); - // second compilation failure onwards, this condition fails and codeGenCompilationResult is not assigned. - else if (codeGenCompilationResult == CodeGenCompilationResult::Success) - codeGenCompilationResult = temp; - } - - // Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module - if (!build.finalize()) - { - for (OldNativeProto result : results) - destroyExecData(result.execdata); - - return CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure; - } - - // If no functions were assembled, we don't need to allocate/copy executable pages for helpers - if (results.empty()) - { - LUAU_ASSERT(codeGenCompilationResult != CodeGenCompilationResult::Success); - return codeGenCompilationResult; - } - - uint8_t* nativeData = nullptr; - size_t sizeNativeData = 0; - uint8_t* codeStart = nullptr; - if (!data->codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast(build.code.data()), - int(build.code.size() * sizeof(build.code[0])), nativeData, sizeNativeData, codeStart)) - { - for (OldNativeProto result : results) - destroyExecData(result.execdata); - - return CodeGenCompilationResult::AllocationFailed; - } - - if (gPerfLogFn && results.size() > 0) - gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), ""); - - for (size_t i = 0; i < results.size(); ++i) - { - uint32_t begin = uint32_t(results[i].exectarget); - uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0])); - CODEGEN_ASSERT(begin < end); - - if (gPerfLogFn) - logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin); - - ExtraExecData* extra = getExtraExecData(results[i].p, results[i].execdata); - extra->codeSize = end - begin; - } - - for (const OldNativeProto& result : results) - { - // the memory is now managed by VM and will be freed via onDestroyFunction - result.p->execdata = result.execdata; - result.p->exectarget = uintptr_t(codeStart) + result.exectarget; - result.p->codeentry = &kCodeEntryInsn; - } - - if (stats != nullptr) - { - for (const OldNativeProto& result : results) - { - stats->bytecodeSizeBytes += result.p->sizecode * sizeof(Instruction); - - // Account for the native -> bytecode instruction offsets mapping: - stats->nativeMetadataSizeBytes += result.p->sizecode * sizeof(uint32_t); - } - - stats->functionsCompiled += uint32_t(results.size()); - stats->nativeCodeSizeBytes += build.code.size(); - stats->nativeDataSizeBytes += build.data.size(); - } - - return codeGenCompilationResult; -} - static CompilationResult compile_OLD(lua_State* L, int idx, unsigned int flags, CompilationStats* stats) { - CODEGEN_ASSERT(FFlag::LuauCodegenDetailedCompilationResult); - CompilationResult compilationResult; CODEGEN_ASSERT(lua_isLfunction(L, idx)); diff --git a/CodeGen/src/CodeGenAssembly.cpp b/CodeGen/src/CodeGenAssembly.cpp index 5485a22c..96c73ce2 100644 --- a/CodeGen/src/CodeGenAssembly.cpp +++ b/CodeGen/src/CodeGenAssembly.cpp @@ -1,7 +1,9 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #include "Luau/CodeGen.h" +#include "Luau/BytecodeAnalysis.h" #include "Luau/BytecodeUtils.h" #include "Luau/BytecodeSummary.h" +#include "Luau/IrDump.h" #include "CodeGenLower.h" @@ -10,6 +12,8 @@ #include "lapi.h" +LUAU_FASTFLAG(LuauCodegenTypeInfo) + namespace Luau { namespace CodeGen @@ -44,6 +48,35 @@ static void logFunctionHeader(AssemblyBuilder& build, Proto* proto) build.logAppend("\n"); } +template +static void logFunctionTypes(AssemblyBuilder& build, const IrFunction& function) +{ + CODEGEN_ASSERT(FFlag::LuauCodegenTypeInfo); + + const BytecodeTypeInfo& typeInfo = function.bcTypeInfo; + + for (size_t i = 0; i < typeInfo.argumentTypes.size(); i++) + { + uint8_t ty = typeInfo.argumentTypes[i]; + + if (ty != LBC_TYPE_ANY) + build.logAppend("; R%d: %s [argument]\n", int(i), getBytecodeTypeName(ty)); + } + + for (size_t i = 0; i < typeInfo.upvalueTypes.size(); i++) + { + uint8_t ty = typeInfo.upvalueTypes[i]; + + if (ty != LBC_TYPE_ANY) + build.logAppend("; U%d: %s\n", int(i), getBytecodeTypeName(ty)); + } + + for (const BytecodeRegTypeInfo& el : typeInfo.regTypes) + { + build.logAppend("; R%d: %s from %d to %d\n", el.reg, getBytecodeTypeName(el.type), el.startpc, el.endpc); + } +} + unsigned getInstructionCount(const Instruction* insns, const unsigned size) { unsigned count = 0; @@ -100,6 +133,9 @@ static std::string getAssemblyImpl(AssemblyBuilder& build, const TValue* func, A if (options.includeAssembly || options.includeIr) logFunctionHeader(build, p); + if (FFlag::LuauCodegenTypeInfo && options.includeIrTypes) + logFunctionTypes(build, ir.function); + CodeGenCompilationResult result = CodeGenCompilationResult::Success; if (!lowerFunction(ir, build, helpers, p, options, stats, result)) diff --git a/CodeGen/src/CodeGenContext.cpp b/CodeGen/src/CodeGenContext.cpp index f8adf3b7..d9e3c4b3 100644 --- a/CodeGen/src/CodeGenContext.cpp +++ b/CodeGen/src/CodeGenContext.cpp @@ -14,6 +14,7 @@ LUAU_FASTFLAGVARIABLE(LuauCodegenContext, false) +LUAU_FASTFLAGVARIABLE(LuauCodegenCheckNullContext, false) LUAU_FASTINT(LuauCodeGenBlockSize) LUAU_FASTINT(LuauCodeGenMaxTotalSize) @@ -403,6 +404,7 @@ static size_t getMemorySize(lua_State* L, Proto* proto) static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeGenContext) noexcept { CODEGEN_ASSERT(FFlag::LuauCodegenContext); + CODEGEN_ASSERT(!FFlag::LuauCodegenCheckNullContext || codeGenContext != nullptr); lua_ExecutionCallbacks* ecb = &L->global->ecb; diff --git a/CodeGen/src/IrBuilder.cpp b/CodeGen/src/IrBuilder.cpp index 4647b902..7d285aaf 100644 --- a/CodeGen/src/IrBuilder.cpp +++ b/CodeGen/src/IrBuilder.cpp @@ -13,6 +13,9 @@ #include +LUAU_FASTFLAG(LuauLoadTypeInfo) // Because new VM typeinfo load changes the format used by Codegen, same flag is used +LUAU_FASTFLAG(LuauTypeInfoLookupImprovement) + namespace Luau { namespace CodeGen @@ -24,15 +27,17 @@ IrBuilder::IrBuilder() : constantMap({IrConstKind::Tag, ~0ull}) { } - -static bool hasTypedParameters(Proto* proto) +static bool hasTypedParameters_DEPRECATED(Proto* proto) { + CODEGEN_ASSERT(!FFlag::LuauLoadTypeInfo); + return proto->typeinfo && proto->numparams != 0; } -static void buildArgumentTypeChecks(IrBuilder& build, Proto* proto) +static void buildArgumentTypeChecks_DEPRECATED(IrBuilder& build, Proto* proto) { - CODEGEN_ASSERT(hasTypedParameters(proto)); + CODEGEN_ASSERT(!FFlag::LuauLoadTypeInfo); + CODEGEN_ASSERT(hasTypedParameters_DEPRECATED(proto)); for (int i = 0; i < proto->numparams; ++i) { @@ -108,13 +113,117 @@ static void buildArgumentTypeChecks(IrBuilder& build, Proto* proto) } } +static bool hasTypedParameters(const BytecodeTypeInfo& typeInfo) +{ + CODEGEN_ASSERT(FFlag::LuauLoadTypeInfo); + + if (FFlag::LuauTypeInfoLookupImprovement) + { + for (auto el : typeInfo.argumentTypes) + { + if (el != LBC_TYPE_ANY) + return true; + } + + return false; + } + else + { + return !typeInfo.argumentTypes.empty(); + } +} + +static void buildArgumentTypeChecks(IrBuilder& build) +{ + CODEGEN_ASSERT(FFlag::LuauLoadTypeInfo); + + const BytecodeTypeInfo& typeInfo = build.function.bcTypeInfo; + CODEGEN_ASSERT(hasTypedParameters(typeInfo)); + + for (size_t i = 0; i < typeInfo.argumentTypes.size(); i++) + { + uint8_t et = typeInfo.argumentTypes[i]; + + uint8_t tag = et & ~LBC_TYPE_OPTIONAL_BIT; + uint8_t optional = et & LBC_TYPE_OPTIONAL_BIT; + + if (tag == LBC_TYPE_ANY) + continue; + + IrOp load = build.inst(IrCmd::LOAD_TAG, build.vmReg(uint8_t(i))); + + IrOp nextCheck; + if (optional) + { + nextCheck = build.block(IrBlockKind::Internal); + IrOp fallbackCheck = build.block(IrBlockKind::Internal); + + build.inst(IrCmd::JUMP_EQ_TAG, load, build.constTag(LUA_TNIL), nextCheck, fallbackCheck); + + build.beginBlock(fallbackCheck); + } + + switch (tag) + { + case LBC_TYPE_NIL: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TNIL), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_BOOLEAN: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TBOOLEAN), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_NUMBER: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TNUMBER), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_STRING: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TSTRING), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_TABLE: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TTABLE), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_FUNCTION: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TFUNCTION), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_THREAD: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TTHREAD), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_USERDATA: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TUSERDATA), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_VECTOR: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TVECTOR), build.vmExit(kVmExitEntryGuardPc)); + break; + case LBC_TYPE_BUFFER: + build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TBUFFER), build.vmExit(kVmExitEntryGuardPc)); + break; + } + + if (optional) + { + build.inst(IrCmd::JUMP, nextCheck); + build.beginBlock(nextCheck); + } + } + + // If the last argument is optional, we can skip creating a new internal block since one will already have been created. + if (!(typeInfo.argumentTypes.back() & LBC_TYPE_OPTIONAL_BIT)) + { + IrOp next = build.block(IrBlockKind::Internal); + build.inst(IrCmd::JUMP, next); + + build.beginBlock(next); + } +} + void IrBuilder::buildFunctionIr(Proto* proto) { function.proto = proto; function.variadic = proto->is_vararg != 0; + if (FFlag::LuauLoadTypeInfo) + loadBytecodeTypeInfo(function); + // Reserve entry block - bool generateTypeChecks = hasTypedParameters(proto); + bool generateTypeChecks = FFlag::LuauLoadTypeInfo ? hasTypedParameters(function.bcTypeInfo) : hasTypedParameters_DEPRECATED(proto); IrOp entry = generateTypeChecks ? block(IrBlockKind::Internal) : IrOp{}; // Rebuild original control flow blocks @@ -128,7 +237,12 @@ void IrBuilder::buildFunctionIr(Proto* proto) if (generateTypeChecks) { beginBlock(entry); - buildArgumentTypeChecks(*this, proto); + + if (FFlag::LuauLoadTypeInfo) + buildArgumentTypeChecks(*this); + else + buildArgumentTypeChecks_DEPRECATED(*this, proto); + inst(IrCmd::JUMP, blockAtInst(0)); } else diff --git a/CodeGen/src/IrDump.cpp b/CodeGen/src/IrDump.cpp index 9a115953..48a50ecb 100644 --- a/CodeGen/src/IrDump.cpp +++ b/CodeGen/src/IrDump.cpp @@ -482,30 +482,30 @@ void toString(std::string& result, IrConst constant) const char* getBytecodeTypeName(uint8_t type) { - switch (type) + switch (type & ~LBC_TYPE_OPTIONAL_BIT) { case LBC_TYPE_NIL: - return "nil"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "nil?" : "nil"; case LBC_TYPE_BOOLEAN: - return "boolean"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "boolean?" : "boolean"; case LBC_TYPE_NUMBER: - return "number"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "number?" : "number"; case LBC_TYPE_STRING: - return "string"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "string?" : "string"; case LBC_TYPE_TABLE: - return "table"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "table?" : "table"; case LBC_TYPE_FUNCTION: - return "function"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "function?" : "function"; case LBC_TYPE_THREAD: - return "thread"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "thread?" : "thread"; case LBC_TYPE_USERDATA: - return "userdata"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "userdata?" : "userdata"; case LBC_TYPE_VECTOR: - return "vector"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "vector?" : "vector"; case LBC_TYPE_BUFFER: - return "buffer"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "buffer?" : "buffer"; case LBC_TYPE_ANY: - return "any"; + return (type & LBC_TYPE_OPTIONAL_BIT) != 0 ? "any?" : "any"; } CODEGEN_ASSERT(!"Unhandled type in getBytecodeTypeName"); diff --git a/CodeGen/src/IrLoweringA64.cpp b/CodeGen/src/IrLoweringA64.cpp index 65a8544d..f35a15fa 100644 --- a/CodeGen/src/IrLoweringA64.cpp +++ b/CodeGen/src/IrLoweringA64.cpp @@ -12,7 +12,6 @@ #include "lgc.h" LUAU_FASTFLAG(LuauCodegenRemoveDeadStores5) -LUAU_FASTFLAG(LuauCodegenCheckTruthyFormB) namespace Luau { @@ -1445,7 +1444,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } // fail to fallback on 'false' boolean value (falsy) - if (!FFlag::LuauCodegenCheckTruthyFormB || inst.b.kind != IrOpKind::Constant) + if (inst.b.kind != IrOpKind::Constant) { build.cbz(regOp(inst.b), target); } diff --git a/CodeGen/src/IrLoweringX64.cpp b/CodeGen/src/IrLoweringX64.cpp index bd41e6fa..66609cb7 100644 --- a/CodeGen/src/IrLoweringX64.cpp +++ b/CodeGen/src/IrLoweringX64.cpp @@ -15,8 +15,6 @@ #include "lstate.h" #include "lgc.h" -LUAU_FASTFLAGVARIABLE(LuauCodegenCheckTruthyFormB, false) - namespace Luau { namespace CodeGen @@ -1197,7 +1195,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } // fail to fallback on 'false' boolean value (falsy) - if (!FFlag::LuauCodegenCheckTruthyFormB || inst.b.kind != IrOpKind::Constant) + if (inst.b.kind != IrOpKind::Constant) { build.cmp(memRegUintOp(inst.b), 0); jumpOrAbortOnUndef(ConditionX64::Equal, inst.c, next); diff --git a/CodeGen/src/IrTranslation.cpp b/CodeGen/src/IrTranslation.cpp index 92092307..20150f9a 100644 --- a/CodeGen/src/IrTranslation.cpp +++ b/CodeGen/src/IrTranslation.cpp @@ -12,7 +12,6 @@ #include "lstate.h" #include "ltm.h" -LUAU_FASTFLAGVARIABLE(LuauCodegenLoadTVTag, false) LUAU_FASTFLAGVARIABLE(LuauCodegenDirectUserdataFlow, false) namespace Luau @@ -111,20 +110,13 @@ static void translateInstLoadConstant(IrBuilder& build, int ra, int k) build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.constDouble(protok.value.n)); build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER)); } - else if (FFlag::LuauCodegenLoadTVTag) + else { // Tag could be LUA_TSTRING or LUA_TVECTOR; for TSTRING we could generate LOAD_POINTER/STORE_POINTER/STORE_TAG, but it's not profitable; // however, it's still valuable to preserve the tag throughout the optimization pipeline to eliminate tag checks. IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(k), build.constInt(0), build.constTag(protok.tt)); build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load); } - else - { - // Remaining tag here right now is LUA_TSTRING, while it can be transformed to LOAD_POINTER/STORE_POINTER/STORE_TAG, it's not profitable right - // now - IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(k)); - build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load); - } } void translateInstLoadK(IrBuilder& build, const Instruction* pc) diff --git a/CodeGen/src/OptimizeConstProp.cpp b/CodeGen/src/OptimizeConstProp.cpp index f910a342..eae0baa3 100644 --- a/CodeGen/src/OptimizeConstProp.cpp +++ b/CodeGen/src/OptimizeConstProp.cpp @@ -18,8 +18,6 @@ LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3) LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64) LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false) LUAU_FASTFLAG(LuauCodegenRemoveDeadStores5) -LUAU_FASTFLAG(LuauCodegenLoadTVTag) -LUAU_FASTFLAGVARIABLE(LuauCodegenInferNumTag, false) LUAU_FASTFLAGVARIABLE(LuauCodegenLoadPropCheckRegLinkInTv, false) namespace Luau @@ -720,7 +718,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& if (arg->cmd == IrCmd::TAG_VECTOR) tag = LUA_TVECTOR; - if (FFlag::LuauCodegenLoadTVTag && arg->cmd == IrCmd::LOAD_TVALUE && arg->c.kind != IrOpKind::None) + if (arg->cmd == IrCmd::LOAD_TVALUE && arg->c.kind != IrOpKind::None) tag = function.tagOp(arg->c); } } @@ -897,59 +895,34 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& case IrCmd::CHECK_TAG: { uint8_t b = function.tagOp(inst.b); + uint8_t tag = state.tryGetTag(inst.a); - if (FFlag::LuauCodegenInferNumTag) + if (tag == 0xff) { - uint8_t tag = state.tryGetTag(inst.a); - - if (tag == 0xff) + if (IrOp value = state.tryGetValue(inst.a); value.kind == IrOpKind::Constant) { - if (IrOp value = state.tryGetValue(inst.a); value.kind == IrOpKind::Constant) - { - if (function.constOp(value).kind == IrConstKind::Double) - tag = LUA_TNUMBER; - } + if (function.constOp(value).kind == IrConstKind::Double) + tag = LUA_TNUMBER; } + } - if (tag != 0xff) + if (tag != 0xff) + { + if (tag == b) { - if (tag == b) - { - if (FFlag::DebugLuauAbortingChecks) - replace(function, inst.c, build.undef()); - else - kill(function, inst); - } + if (FFlag::DebugLuauAbortingChecks) + replace(function, inst.c, build.undef()); else - { - replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path - } + kill(function, inst); } else { - state.updateTag(inst.a, b); // We can assume the tag value going forward + replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path } } else { - if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff) - { - if (tag == b) - { - if (FFlag::DebugLuauAbortingChecks) - replace(function, inst.c, build.undef()); - else - kill(function, inst); - } - else - { - replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path - } - } - else - { - state.updateTag(inst.a, b); // We can assume the tag value going forward - } + state.updateTag(inst.a, b); // We can assume the tag value going forward } break; } diff --git a/CodeGen/src/OptimizeDeadStore.cpp b/CodeGen/src/OptimizeDeadStore.cpp index dc187f4a..6c1d6aff 100644 --- a/CodeGen/src/OptimizeDeadStore.cpp +++ b/CodeGen/src/OptimizeDeadStore.cpp @@ -10,7 +10,6 @@ #include "lobject.h" LUAU_FASTFLAGVARIABLE(LuauCodegenRemoveDeadStores5, false) -LUAU_FASTFLAG(LuauCodegenLoadTVTag) // TODO: optimization can be improved by knowing which registers are live in at each VM exit @@ -519,7 +518,7 @@ static void markDeadStoresInInst(RemoveDeadStoreState& state, IrBuilder& build, if (arg->cmd == IrCmd::TAG_VECTOR) regInfo.maybeGco = false; - if (FFlag::LuauCodegenLoadTVTag && arg->cmd == IrCmd::LOAD_TVALUE && arg->c.kind != IrOpKind::None) + if (arg->cmd == IrCmd::LOAD_TVALUE && arg->c.kind != IrOpKind::None) regInfo.maybeGco = isGCO(function.tagOp(arg->c)); } diff --git a/CodeGen/src/lcodegen.cpp b/CodeGen/src/lcodegen.cpp index de84f527..0795cd48 100644 --- a/CodeGen/src/lcodegen.cpp +++ b/CodeGen/src/lcodegen.cpp @@ -5,8 +5,6 @@ #include "lapi.h" -LUAU_FASTFLAG(LuauCodegenDetailedCompilationResult) - int luau_codegen_supported() { return Luau::CodeGen::isSupported(); @@ -19,8 +17,5 @@ void luau_codegen_create(lua_State* L) void luau_codegen_compile(lua_State* L, int idx) { - if (FFlag::LuauCodegenDetailedCompilationResult) - Luau::CodeGen::compile(L, idx); - else - Luau::CodeGen::compile_DEPRECATED(L, idx); + Luau::CodeGen::compile(L, idx); } diff --git a/Common/include/Luau/Bytecode.h b/Common/include/Luau/Bytecode.h index 5781c2b0..7012d820 100644 --- a/Common/include/Luau/Bytecode.h +++ b/Common/include/Luau/Bytecode.h @@ -47,6 +47,10 @@ // Version 4: Adds Proto::flags, typeinfo, and floor division opcodes IDIV/IDIVK. Currently supported. // Version 5: Adds SUBRK/DIVRK and vector constants. Currently supported. +// # Bytecode type information history +// Version 1: (from bytecode version 4) Type information for function signature. Currently supported. +// Version 2: (from bytecode version 4) Type information for arguments, upvalues, locals and some temporaries. Currently supported. + // Bytecode opcode, part of the instruction header enum LuauOpcode { @@ -433,7 +437,8 @@ enum LuauBytecodeTag LBC_VERSION_MAX = 5, LBC_VERSION_TARGET = 5, // Type encoding version - LBC_TYPE_VERSION = 1, + LBC_TYPE_VERSION_DEPRECATED = 1, + LBC_TYPE_VERSION = 2, // Types of constant table entries LBC_CONSTANT_NIL = 0, LBC_CONSTANT_BOOLEAN, diff --git a/Compiler/include/Luau/BytecodeBuilder.h b/Compiler/include/Luau/BytecodeBuilder.h index 99bac8ad..7f0115bb 100644 --- a/Compiler/include/Luau/BytecodeBuilder.h +++ b/Compiler/include/Luau/BytecodeBuilder.h @@ -76,6 +76,8 @@ public: void expandJumps(); void setFunctionTypeInfo(std::string value); + void pushLocalTypeInfo(LuauBytecodeType type, uint8_t reg, uint32_t startpc, uint32_t endpc); + void pushUpvalTypeInfo(LuauBytecodeType type); void setDebugFunctionName(StringRef name); void setDebugFunctionLineDefined(int line); @@ -98,6 +100,7 @@ public: Dump_Source = 1 << 2, Dump_Locals = 1 << 3, Dump_Remarks = 1 << 4, + Dump_Types = 1 << 5, }; void setDumpFlags(uint32_t flags) @@ -213,6 +216,19 @@ private: unsigned int name; }; + struct TypedLocal + { + LuauBytecodeType type; + uint8_t reg; + uint32_t startpc; + uint32_t endpc; + }; + + struct TypedUpval + { + LuauBytecodeType type; + }; + struct Jump { uint32_t source; @@ -258,6 +274,9 @@ private: std::vector debugLocals; std::vector debugUpvals; + std::vector typedLocals; + std::vector typedUpvals; + DenseHashMap stringTable; std::vector debugStrings; @@ -271,6 +290,8 @@ private: std::vector dumpSource; std::vector> dumpRemarks; + std::string tempTypeInfo; + std::string (BytecodeBuilder::*dumpFunctionPtr)(std::vector&) const = nullptr; void validate() const; @@ -281,7 +302,7 @@ private: void dumpConstant(std::string& result, int k) const; void dumpInstruction(const uint32_t* opcode, std::string& output, int targetLabel) const; - void writeFunction(std::string& ss, uint32_t id, uint8_t flags) const; + void writeFunction(std::string& ss, uint32_t id, uint8_t flags); void writeLineInfo(std::string& ss) const; void writeStringTable(std::string& ss) const; diff --git a/Compiler/include/Luau/Compiler.h b/Compiler/include/Luau/Compiler.h index 9a661db2..698a50c4 100644 --- a/Compiler/include/Luau/Compiler.h +++ b/Compiler/include/Luau/Compiler.h @@ -26,6 +26,12 @@ struct CompileOptions // 2 - full debug info with local & upvalue names; necessary for debugger int debugLevel = 1; + // type information is used to guide native code generation decisions + // information includes testable types for function arguments, locals, upvalues and some temporaries + // 0 - generate for native modules + // 1 - generate for all modules + int typeInfoLevel = 0; + // 0 - no code coverage support // 1 - statement coverage // 2 - statement and expression coverage (verbose) diff --git a/Compiler/include/luacode.h b/Compiler/include/luacode.h index 60cdeee7..a470319d 100644 --- a/Compiler/include/luacode.h +++ b/Compiler/include/luacode.h @@ -22,6 +22,12 @@ struct lua_CompileOptions // 2 - full debug info with local & upvalue names; necessary for debugger int debugLevel; // default=1 + // type information is used to guide native code generation decisions + // information includes testable types for function arguments, locals, upvalues and some temporaries + // 0 - generate for native modules + // 1 - generate for all modules + int typeInfoLevel; // default=0 + // 0 - no code coverage support // 1 - statement coverage // 2 - statement and expression coverage (verbose) diff --git a/Compiler/src/BytecodeBuilder.cpp b/Compiler/src/BytecodeBuilder.cpp index 3c9bc074..5386a528 100644 --- a/Compiler/src/BytecodeBuilder.cpp +++ b/Compiler/src/BytecodeBuilder.cpp @@ -9,6 +9,7 @@ LUAU_FASTFLAGVARIABLE(LuauCompileNoJumpLineRetarget, false) LUAU_FASTFLAG(LuauCompileRepeatUntilSkippedLocals) +LUAU_FASTFLAGVARIABLE(LuauCompileTypeInfo, false) namespace Luau { @@ -281,6 +282,12 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues, uin debugLocals.clear(); debugUpvals.clear(); + if (FFlag::LuauCompileTypeInfo) + { + typedLocals.clear(); + typedUpvals.clear(); + } + constantMap.clear(); tableShapeMap.clear(); protoMap.clear(); @@ -537,6 +544,29 @@ void BytecodeBuilder::setFunctionTypeInfo(std::string value) functions[currentFunction].typeinfo = std::move(value); } +void BytecodeBuilder::pushLocalTypeInfo(LuauBytecodeType type, uint8_t reg, uint32_t startpc, uint32_t endpc) +{ + LUAU_ASSERT(FFlag::LuauCompileTypeInfo); + + TypedLocal local; + local.type = type; + local.reg = reg; + local.startpc = startpc; + local.endpc = endpc; + + typedLocals.push_back(local); +} + +void BytecodeBuilder::pushUpvalTypeInfo(LuauBytecodeType type) +{ + LUAU_ASSERT(FFlag::LuauCompileTypeInfo); + + TypedUpval upval; + upval.type = type; + + typedUpvals.push_back(upval); +} + void BytecodeBuilder::setDebugFunctionName(StringRef name) { unsigned int index = addStringTableEntry(name); @@ -636,7 +666,6 @@ void BytecodeBuilder::finalize() bytecode = char(version); uint8_t typesversion = getTypeEncodingVersion(); - LUAU_ASSERT(typesversion == 1); writeByte(bytecode, typesversion); writeStringTable(bytecode); @@ -650,7 +679,7 @@ void BytecodeBuilder::finalize() writeVarInt(bytecode, mainFunction); } -void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags) const +void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags) { LUAU_ASSERT(id < functions.size()); const Function& func = functions[id]; @@ -663,8 +692,43 @@ void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags) writeByte(ss, flags); - writeVarInt(ss, uint32_t(func.typeinfo.size())); - ss.append(func.typeinfo); + if (FFlag::LuauCompileTypeInfo) + { + if (!func.typeinfo.empty() || !typedUpvals.empty() || !typedLocals.empty()) + { + // collect type info into a temporary string to know the overall size of type data + tempTypeInfo.clear(); + writeVarInt(tempTypeInfo, uint32_t(func.typeinfo.size())); + writeVarInt(tempTypeInfo, uint32_t(typedUpvals.size())); + writeVarInt(tempTypeInfo, uint32_t(typedLocals.size())); + + tempTypeInfo.append(func.typeinfo); + + for (const TypedUpval& l : typedUpvals) + writeByte(tempTypeInfo, l.type); + + for (const TypedLocal& l : typedLocals) + { + writeByte(tempTypeInfo, l.type); + writeByte(tempTypeInfo, l.reg); + writeVarInt(tempTypeInfo, l.startpc); + LUAU_ASSERT(l.endpc >= l.startpc); + writeVarInt(tempTypeInfo, l.endpc - l.startpc); + } + + writeVarInt(ss, uint32_t(tempTypeInfo.size())); + ss.append(tempTypeInfo); + } + else + { + writeVarInt(ss, 0); + } + } + else + { + writeVarInt(ss, uint32_t(func.typeinfo.size())); + ss.append(func.typeinfo); + } // instructions writeVarInt(ss, uint32_t(insns.size())); @@ -1134,7 +1198,7 @@ uint8_t BytecodeBuilder::getVersion() uint8_t BytecodeBuilder::getTypeEncodingVersion() { - return LBC_TYPE_VERSION; + return FFlag::LuauCompileTypeInfo ? LBC_TYPE_VERSION : LBC_TYPE_VERSION_DEPRECATED; } #ifdef LUAU_ASSERTENABLED @@ -2162,6 +2226,39 @@ void BytecodeBuilder::dumpInstruction(const uint32_t* code, std::string& result, } } +static const char* getBaseTypeString(uint8_t type) +{ + uint8_t tag = type & ~LBC_TYPE_OPTIONAL_BIT; + switch (tag) + { + case LBC_TYPE_NIL: + return "nil"; + case LBC_TYPE_BOOLEAN: + return "boolean"; + case LBC_TYPE_NUMBER: + return "number"; + case LBC_TYPE_STRING: + return "string"; + case LBC_TYPE_TABLE: + return "table"; + case LBC_TYPE_FUNCTION: + return "function"; + case LBC_TYPE_THREAD: + return "thread"; + case LBC_TYPE_USERDATA: + return "userdata"; + case LBC_TYPE_VECTOR: + return "vector"; + case LBC_TYPE_BUFFER: + return "buffer"; + case LBC_TYPE_ANY: + return "any"; + } + + LUAU_ASSERT(!"Unhandled type in getBaseTypeString"); + return nullptr; +} + std::string BytecodeBuilder::dumpCurrentFunction(std::vector& dumpinstoffs) const { if ((dumpFlags & Dump_Code) == 0) @@ -2198,6 +2295,45 @@ std::string BytecodeBuilder::dumpCurrentFunction(std::vector& dumpinstoffs) } } + if (FFlag::LuauCompileTypeInfo) + { + if (dumpFlags & Dump_Types) + { + const std::string& typeinfo = functions.back().typeinfo; + + // Arguments start from third byte in function typeinfo string + for (uint8_t i = 2; i < typeinfo.size(); ++i) + { + uint8_t et = typeinfo[i]; + + const char* base = getBaseTypeString(et); + const char* optional = (et & LBC_TYPE_OPTIONAL_BIT) ? "?" : ""; + + formatAppend(result, "R%d: %s%s [argument]\n", i - 2, base, optional); + } + + for (size_t i = 0; i < typedUpvals.size(); ++i) + { + const TypedUpval& l = typedUpvals[i]; + + const char* base = getBaseTypeString(l.type); + const char* optional = (l.type & LBC_TYPE_OPTIONAL_BIT) ? "?" : ""; + + formatAppend(result, "U%d: %s%s\n", int(i), base, optional); + } + + for (size_t i = 0; i < typedLocals.size(); ++i) + { + const TypedLocal& l = typedLocals[i]; + + const char* base = getBaseTypeString(l.type); + const char* optional = (l.type & LBC_TYPE_OPTIONAL_BIT) ? "?" : ""; + + formatAppend(result, "R%d: %s%s from %d to %d\n", l.reg, base, optional, l.startpc, l.endpc); + } + } + } + std::vector labels(insns.size(), -1); // annotate valid jump targets with 0 @@ -2364,39 +2500,6 @@ std::string BytecodeBuilder::dumpSourceRemarks() const return result; } -static const char* getBaseTypeString(uint8_t type) -{ - uint8_t tag = type & ~LBC_TYPE_OPTIONAL_BIT; - switch (tag) - { - case LBC_TYPE_NIL: - return "nil"; - case LBC_TYPE_BOOLEAN: - return "boolean"; - case LBC_TYPE_NUMBER: - return "number"; - case LBC_TYPE_STRING: - return "string"; - case LBC_TYPE_TABLE: - return "table"; - case LBC_TYPE_FUNCTION: - return "function"; - case LBC_TYPE_THREAD: - return "thread"; - case LBC_TYPE_USERDATA: - return "userdata"; - case LBC_TYPE_VECTOR: - return "vector"; - case LBC_TYPE_BUFFER: - return "buffer"; - case LBC_TYPE_ANY: - return "any"; - } - - LUAU_ASSERT(!"Unhandled type in getBaseTypeString"); - return nullptr; -} - std::string BytecodeBuilder::dumpTypeInfo() const { std::string result; diff --git a/Compiler/src/Compiler.cpp b/Compiler/src/Compiler.cpp index 9d562874..df096d3a 100644 --- a/Compiler/src/Compiler.cpp +++ b/Compiler/src/Compiler.cpp @@ -27,6 +27,8 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300) LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5) LUAU_FASTFLAGVARIABLE(LuauCompileRepeatUntilSkippedLocals, false) +LUAU_FASTFLAG(LuauCompileTypeInfo) +LUAU_FASTFLAGVARIABLE(LuauTypeInfoLookupImprovement, false) namespace Luau { @@ -40,6 +42,8 @@ static const uint32_t kMaxInstructionCount = 1'000'000'000; static const uint8_t kInvalidReg = 255; +static const uint32_t kDefaultAllocPc = ~0u; + CompileError::CompileError(const Location& location, const std::string& message) : location(location) , message(message) @@ -102,7 +106,8 @@ struct Compiler , locstants(nullptr) , tableShapes(nullptr) , builtins(nullptr) - , typeMap(nullptr) + , functionTypes(nullptr) + , localTypes(nullptr) { // preallocate some buffers that are very likely to grow anyway; this works around std::vector's inefficient growth policy for small arrays localStack.reserve(16); @@ -204,9 +209,12 @@ struct Compiler setDebugLine(func); - // note: we move types out of typeMap which is safe because compileFunction is only called once per function - if (std::string* funcType = typeMap.find(func)) - bytecode.setFunctionTypeInfo(std::move(*funcType)); + if (!FFlag::LuauCompileTypeInfo) + { + // note: we move types out of typeMap which is safe because compileFunction is only called once per function + if (std::string* funcType = functionTypes.find(func)) + bytecode.setFunctionTypeInfo(std::move(*funcType)); + } if (func->vararg) bytecode.emitABC(LOP_PREPVARARGS, uint8_t(self + func->args.size), 0, 0); @@ -214,10 +222,13 @@ struct Compiler uint8_t args = allocReg(func, self + unsigned(func->args.size)); if (func->self) - pushLocal(func->self, args); + pushLocal(func->self, args, kDefaultAllocPc); for (size_t i = 0; i < func->args.size; ++i) - pushLocal(func->args.data[i], uint8_t(args + self + i)); + pushLocal(func->args.data[i], uint8_t(args + self + i), kDefaultAllocPc); + + if (FFlag::LuauCompileTypeInfo) + argCount = localStack.size(); AstStatBlock* stat = func->body; @@ -249,6 +260,19 @@ struct Compiler bytecode.pushDebugUpval(sref(l->name)); } + if (FFlag::LuauCompileTypeInfo && options.typeInfoLevel >= 1) + { + for (AstLocal* l : upvals) + { + LuauBytecodeType ty = LBC_TYPE_ANY; + + if (LuauBytecodeType* recordedTy = localTypes.find(l)) + ty = *recordedTy; + + bytecode.pushUpvalTypeInfo(ty); + } + } + if (options.optimizationLevel >= 1) bytecode.foldJumps(); @@ -259,6 +283,13 @@ struct Compiler if (bytecode.getInstructionCount() > kMaxInstructionCount) CompileError::raise(func->location, "Exceeded function instruction limit; split the function into parts to compile"); + if (FFlag::LuauCompileTypeInfo) + { + // note: we move types out of typeMap which is safe because compileFunction is only called once per function + if (std::string* funcType = functionTypes.find(func)) + bytecode.setFunctionTypeInfo(std::move(*funcType)); + } + // top-level code only executes once so it can be marked as cold if it has no loops; code with loops might be profitable to compile natively if (func->functionDepth == 0 && !hasLoops) protoflags |= LPF_NATIVE_COLD; @@ -287,6 +318,10 @@ struct Compiler upvals.clear(); // note: instead of std::move above, we copy & clear to preserve capacity for future pushes stackSize = 0; + + if (FFlag::LuauCompileTypeInfo) + argCount = 0; + hasLoops = false; return fid; @@ -585,6 +620,7 @@ struct Compiler // if the last argument can return multiple values, we need to compute all of them into the remaining arguments unsigned int tail = unsigned(func->args.size - expr->args.size) + 1; uint8_t reg = allocReg(arg, tail); + uint32_t allocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; if (AstExprCall* expr = arg->as()) compileExprCall(expr, reg, tail, /* targetTop= */ true); @@ -594,7 +630,12 @@ struct Compiler LUAU_ASSERT(!"Unexpected expression type"); for (size_t j = i; j < func->args.size; ++j) - args.push_back({func->args.data[j], uint8_t(reg + (j - i))}); + { + if (FFlag::LuauCompileTypeInfo) + args.push_back({func->args.data[j], uint8_t(reg + (j - i)), {Constant::Type_Unknown}, allocpc}); + else + args.push_back({func->args.data[j], uint8_t(reg + (j - i))}); + } // all remaining function arguments have been allocated and assigned to break; @@ -603,13 +644,17 @@ struct Compiler { // if the argument is mutated, we need to allocate a fresh register even if it's a constant uint8_t reg = allocReg(arg, 1); + uint32_t allocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; if (arg) compileExprTemp(arg, reg); else bytecode.emitABC(LOP_LOADNIL, reg, 0, 0); - args.push_back({var, reg}); + if (FFlag::LuauCompileTypeInfo) + args.push_back({var, reg, {Constant::Type_Unknown}, allocpc}); + else + args.push_back({var, reg}); } else if (arg == nullptr) { @@ -629,14 +674,22 @@ struct Compiler // if the argument is a local that isn't mutated, we will simply reuse the existing register if (int reg = le ? getExprLocalReg(le) : -1; reg >= 0 && (!lv || !lv->written)) { - args.push_back({var, uint8_t(reg)}); + if (FFlag::LuauTypeInfoLookupImprovement) + args.push_back({var, uint8_t(reg), {Constant::Type_Unknown}, kDefaultAllocPc}); + else + args.push_back({var, uint8_t(reg)}); } else { uint8_t temp = allocReg(arg, 1); + uint32_t allocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; + compileExprTemp(arg, temp); - args.push_back({var, temp}); + if (FFlag::LuauCompileTypeInfo) + args.push_back({var, temp, {Constant::Type_Unknown}, allocpc}); + else + args.push_back({var, temp}); } } } @@ -651,7 +704,10 @@ struct Compiler { if (arg.value.type == Constant::Type_Unknown) { - pushLocal(arg.local, arg.reg); + if (FFlag::LuauCompileTypeInfo) + pushLocal(arg.local, arg.reg, arg.allocpc); + else + pushLocal(arg.local, arg.reg, kDefaultAllocPc); } else { @@ -2851,7 +2907,7 @@ struct Compiler if (int reg = getExprLocalReg(re); reg >= 0 && (!lv || !lv->written) && (!rv || !rv->written)) { - pushLocal(stat->vars.data[0], uint8_t(reg)); + pushLocal(stat->vars.data[0], uint8_t(reg), kDefaultAllocPc); return; } } @@ -2859,11 +2915,12 @@ struct Compiler // note: allocReg in this case allocates into parent block register - note that we don't have RegScope here uint8_t vars = allocReg(stat, unsigned(stat->vars.size)); + uint32_t allocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; compileExprListTemp(stat->values, vars, uint8_t(stat->vars.size), /* targetTop= */ true); for (size_t i = 0; i < stat->vars.size; ++i) - pushLocal(stat->vars.data[i], uint8_t(vars + i)); + pushLocal(stat->vars.data[i], uint8_t(vars + i), allocpc); } bool tryCompileUnrolledFor(AstStatFor* stat, int thresholdBase, int thresholdMaxBoost) @@ -2990,6 +3047,7 @@ struct Compiler // this makes sure the code inside the loop can't interfere with the iteration process (other than modifying the table we're iterating // through) uint8_t varreg = regs + 2; + uint32_t varregallocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; if (Variable* il = variables.find(stat->var); il && il->written) varreg = allocReg(stat, 1); @@ -3011,7 +3069,7 @@ struct Compiler if (varreg != regs + 2) bytecode.emitABC(LOP_MOVE, varreg, regs + 2, 0); - pushLocal(stat->var, varreg); + pushLocal(stat->var, varreg, varregallocpc); compileStat(stat->body); @@ -3056,6 +3114,7 @@ struct Compiler // note that we reserve at least 2 variables; this allows our fast path to assume that we need 2 variables instead of 1 or 2 uint8_t vars = allocReg(stat, std::max(unsigned(stat->vars.size), 2u)); LUAU_ASSERT(vars == regs + 3); + uint32_t varsallocpc = FFlag::LuauCompileTypeInfo ? bytecode.getDebugPC() : kDefaultAllocPc; LuauOpcode skipOp = LOP_FORGPREP; @@ -3091,7 +3150,7 @@ struct Compiler size_t loopLabel = bytecode.emitLabel(); for (size_t i = 0; i < stat->vars.size; ++i) - pushLocal(stat->vars.data[i], uint8_t(vars + i)); + pushLocal(stat->vars.data[i], uint8_t(vars + i), varsallocpc); compileStat(stat->body); @@ -3515,7 +3574,7 @@ struct Compiler { uint8_t var = allocReg(stat, 1); - pushLocal(stat->name, var); + pushLocal(stat->name, var, kDefaultAllocPc); compileExprFunction(stat->func, var); Local& l = locals[stat->name]; @@ -3569,7 +3628,7 @@ struct Compiler getUpval(local); } - void pushLocal(AstLocal* local, uint8_t reg) + void pushLocal(AstLocal* local, uint8_t reg, uint32_t allocpc) { if (localStack.size() >= kMaxLocalCount) CompileError::raise( @@ -3584,6 +3643,9 @@ struct Compiler l.reg = reg; l.allocated = true; l.debugpc = bytecode.getDebugPC(); + + if (FFlag::LuauCompileTypeInfo) + l.allocpc = allocpc == kDefaultAllocPc ? l.debugpc : allocpc; } bool areLocalsCaptured(size_t start) @@ -3645,6 +3707,17 @@ struct Compiler bytecode.pushDebugLocal(sref(localStack[i]->name), l->reg, l->debugpc, debugpc); } + + if (FFlag::LuauCompileTypeInfo && options.typeInfoLevel >= 1 && i >= argCount) + { + uint32_t debugpc = bytecode.getDebugPC(); + LuauBytecodeType ty = LBC_TYPE_ANY; + + if (LuauBytecodeType* recordedTy = localTypes.find(localStack[i])) + ty = *recordedTy; + + bytecode.pushLocalTypeInfo(ty, l->reg, l->allocpc, debugpc); + } } localStack.resize(start); @@ -3909,6 +3982,7 @@ struct Compiler bool allocated = false; bool captured = false; uint32_t debugpc = 0; + uint32_t allocpc = 0; }; struct LoopJump @@ -3937,6 +4011,7 @@ struct Compiler uint8_t reg; Constant value; + uint32_t allocpc; }; struct InlineFrame @@ -3969,7 +4044,8 @@ struct Compiler DenseHashMap locstants; DenseHashMap tableShapes; DenseHashMap builtins; - DenseHashMap typeMap; + DenseHashMap functionTypes; + DenseHashMap localTypes; const DenseHashMap* builtinsFold = nullptr; bool builtinsFoldMathK = false; @@ -3977,6 +4053,7 @@ struct Compiler // compileFunction state, gets reset for every function unsigned int regTop = 0; unsigned int stackSize = 0; + size_t argCount = 0; bool hasLoops = false; bool getfenvUsed = false; @@ -4010,6 +4087,9 @@ void compileOrThrow(BytecodeBuilder& bytecode, const ParseResult& parseResult, c { mainFlags |= LPF_NATIVE_MODULE; options.optimizationLevel = 2; // note: this might be removed in the future in favor of --!optimize + + if (FFlag::LuauCompileTypeInfo) + options.typeInfoLevel = 1; } } @@ -4058,8 +4138,16 @@ void compileOrThrow(BytecodeBuilder& bytecode, const ParseResult& parseResult, c root->visit(&functionVisitor); // computes type information for all functions based on type annotations - if (functionVisitor.hasTypes) - buildTypeMap(compiler.typeMap, root, options.vectorType); + if (FFlag::LuauCompileTypeInfo) + { + if (options.typeInfoLevel >= 1) + buildTypeMap(compiler.functionTypes, compiler.localTypes, root, options.vectorType); + } + else + { + if (functionVisitor.hasTypes) + buildTypeMap(compiler.functionTypes, compiler.localTypes, root, options.vectorType); + } for (AstExprFunction* expr : functions) compiler.compileFunction(expr, 0); diff --git a/Compiler/src/Types.cpp b/Compiler/src/Types.cpp index 8f630f38..d05a7ba2 100644 --- a/Compiler/src/Types.cpp +++ b/Compiler/src/Types.cpp @@ -3,6 +3,8 @@ #include "Luau/BytecodeBuilder.h" +LUAU_FASTFLAG(LuauCompileTypeInfo) + namespace Luau { @@ -144,14 +146,17 @@ static std::string getFunctionType(const AstExprFunction* func, const DenseHashM struct TypeMapVisitor : AstVisitor { - DenseHashMap& typeMap; + DenseHashMap& functionTypes; + DenseHashMap& localTypes; const char* vectorType; DenseHashMap typeAliases; std::vector> typeAliasStack; - TypeMapVisitor(DenseHashMap& typeMap, const char* vectorType) - : typeMap(typeMap) + TypeMapVisitor( + DenseHashMap& functionTypes, DenseHashMap& localTypes, const char* vectorType) + : functionTypes(functionTypes) + , localTypes(localTypes) , vectorType(vectorType) , typeAliases(AstName()) { @@ -216,15 +221,34 @@ struct TypeMapVisitor : AstVisitor std::string type = getFunctionType(node, typeAliases, vectorType); if (!type.empty()) - typeMap[node] = std::move(type); + functionTypes[node] = std::move(type); + + return true; + } + + bool visit(AstExprLocal* node) override + { + if (FFlag::LuauCompileTypeInfo) + { + AstLocal* local = node->local; + + if (AstType* annotation = local->annotation) + { + LuauBytecodeType ty = getType(annotation, {}, typeAliases, /* resolveAliases= */ true, vectorType); + + if (ty != LBC_TYPE_ANY) + localTypes[local] = ty; + } + } return true; } }; -void buildTypeMap(DenseHashMap& typeMap, AstNode* root, const char* vectorType) +void buildTypeMap(DenseHashMap& functionTypes, DenseHashMap& localTypes, AstNode* root, + const char* vectorType) { - TypeMapVisitor visitor(typeMap, vectorType); + TypeMapVisitor visitor(functionTypes, localTypes, vectorType); root->visit(&visitor); } diff --git a/Compiler/src/Types.h b/Compiler/src/Types.h index 62f9e916..de11fde9 100644 --- a/Compiler/src/Types.h +++ b/Compiler/src/Types.h @@ -2,6 +2,7 @@ #pragma once #include "Luau/Ast.h" +#include "Luau/Bytecode.h" #include "Luau/DenseHash.h" #include @@ -9,6 +10,7 @@ namespace Luau { -void buildTypeMap(DenseHashMap& typeMap, AstNode* root, const char* vectorType); +void buildTypeMap(DenseHashMap& functionTypes, DenseHashMap& localTypes, AstNode* root, + const char* vectorType); } // namespace Luau diff --git a/VM/src/lfunc.cpp b/VM/src/lfunc.cpp index 5f7c9433..b33fe9dd 100644 --- a/VM/src/lfunc.cpp +++ b/VM/src/lfunc.cpp @@ -6,6 +6,8 @@ #include "lmem.h" #include "lgc.h" +LUAU_FASTFLAGVARIABLE(LuauLoadTypeInfo, false) + Proto* luaF_newproto(lua_State* L) { Proto* f = luaM_newgco(L, Proto, sizeof(Proto), L->activememcat); @@ -51,6 +53,9 @@ Proto* luaF_newproto(lua_State* L) f->linedefined = 0; f->bytecodeid = 0; + if (FFlag::LuauLoadTypeInfo) + f->sizetypeinfo = 0; + return f; } @@ -173,8 +178,16 @@ void luaF_freeproto(lua_State* L, Proto* f, lua_Page* page) if (f->execdata) L->global->ecb.destroy(L, f); - if (f->typeinfo) - luaM_freearray(L, f->typeinfo, f->numparams + 2, uint8_t, f->memcat); + if (FFlag::LuauLoadTypeInfo) + { + if (f->typeinfo) + luaM_freearray(L, f->typeinfo, f->sizetypeinfo, uint8_t, f->memcat); + } + else + { + if (f->typeinfo) + luaM_freearray(L, f->typeinfo, f->numparams + 2, uint8_t, f->memcat); + } luaM_freegco(L, f, sizeof(Proto), f->memcat, page); } diff --git a/VM/src/lgc.cpp b/VM/src/lgc.cpp index 2ddddd97..f8389422 100644 --- a/VM/src/lgc.cpp +++ b/VM/src/lgc.cpp @@ -14,6 +14,8 @@ #include +LUAU_FASTFLAG(LuauLoadTypeInfo) + /* * Luau uses an incremental non-generational non-moving mark&sweep garbage collector. * @@ -504,8 +506,17 @@ static size_t propagatemark(global_State* g) Proto* p = gco2p(o); g->gray = p->gclist; traverseproto(g, p); - return sizeof(Proto) + sizeof(Instruction) * p->sizecode + sizeof(Proto*) * p->sizep + sizeof(TValue) * p->sizek + p->sizelineinfo + - sizeof(LocVar) * p->sizelocvars + sizeof(TString*) * p->sizeupvalues; + + if (FFlag::LuauLoadTypeInfo) + { + return sizeof(Proto) + sizeof(Instruction) * p->sizecode + sizeof(Proto*) * p->sizep + sizeof(TValue) * p->sizek + p->sizelineinfo + + sizeof(LocVar) * p->sizelocvars + sizeof(TString*) * p->sizeupvalues + p->sizetypeinfo; + } + else + { + return sizeof(Proto) + sizeof(Instruction) * p->sizecode + sizeof(Proto*) * p->sizep + sizeof(TValue) * p->sizek + p->sizelineinfo + + sizeof(LocVar) * p->sizelocvars + sizeof(TString*) * p->sizeupvalues; + } } default: LUAU_ASSERT(0); diff --git a/VM/src/lobject.h b/VM/src/lobject.h index 1f84e2da..18c69641 100644 --- a/VM/src/lobject.h +++ b/VM/src/lobject.h @@ -335,6 +335,7 @@ typedef struct Proto int linegaplog2; int linedefined; int bytecodeid; + int sizetypeinfo; } Proto; // clang-format on diff --git a/VM/src/lvmload.cpp b/VM/src/lvmload.cpp index 681d0523..f13c0f21 100644 --- a/VM/src/lvmload.cpp +++ b/VM/src/lvmload.cpp @@ -13,6 +13,8 @@ #include +LUAU_FASTFLAG(LuauLoadTypeInfo) + // TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens template struct TempBuffer @@ -258,21 +260,78 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size { p->flags = read(data, size, offset); - uint32_t typesize = readVarInt(data, size, offset); - - if (typesize && typesversion == LBC_TYPE_VERSION) + if (FFlag::LuauLoadTypeInfo) { - uint8_t* types = (uint8_t*)data + offset; + if (typesversion == 1) + { + uint32_t typesize = readVarInt(data, size, offset); - LUAU_ASSERT(typesize == unsigned(2 + p->numparams)); - LUAU_ASSERT(types[0] == LBC_TYPE_FUNCTION); - LUAU_ASSERT(types[1] == p->numparams); + if (typesize) + { + uint8_t* types = (uint8_t*)data + offset; - p->typeinfo = luaM_newarray(L, typesize, uint8_t, p->memcat); - memcpy(p->typeinfo, types, typesize); + LUAU_ASSERT(typesize == unsigned(2 + p->numparams)); + LUAU_ASSERT(types[0] == LBC_TYPE_FUNCTION); + LUAU_ASSERT(types[1] == p->numparams); + + // transform v1 into v2 format + int headersize = typesize > 127 ? 4 : 3; + + p->typeinfo = luaM_newarray(L, headersize + typesize, uint8_t, p->memcat); + p->sizetypeinfo = headersize + typesize; + + if (headersize == 4) + { + p->typeinfo[0] = (typesize & 127) | (1 << 7); + p->typeinfo[1] = typesize >> 7; + p->typeinfo[2] = 0; + p->typeinfo[3] = 0; + } + else + { + p->typeinfo[0] = uint8_t(typesize); + p->typeinfo[1] = 0; + p->typeinfo[2] = 0; + } + + memcpy(p->typeinfo + headersize, types, typesize); + } + + offset += typesize; + } + else if (typesversion == 2) + { + uint32_t typesize = readVarInt(data, size, offset); + + if (typesize) + { + uint8_t* types = (uint8_t*)data + offset; + + p->typeinfo = luaM_newarray(L, typesize, uint8_t, p->memcat); + p->sizetypeinfo = typesize; + memcpy(p->typeinfo, types, typesize); + offset += typesize; + } + } } + else + { + uint32_t typesize = readVarInt(data, size, offset); - offset += typesize; + if (typesize && typesversion == LBC_TYPE_VERSION_DEPRECATED) + { + uint8_t* types = (uint8_t*)data + offset; + + LUAU_ASSERT(typesize == unsigned(2 + p->numparams)); + LUAU_ASSERT(types[0] == LBC_TYPE_FUNCTION); + LUAU_ASSERT(types[1] == p->numparams); + + p->typeinfo = luaM_newarray(L, typesize, uint8_t, p->memcat); + memcpy(p->typeinfo, types, typesize); + } + + offset += typesize; + } } const int sizecode = readVarInt(data, size, offset); @@ -433,6 +492,8 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size } const int sizeupvalues = readVarInt(data, size, offset); + LUAU_ASSERT(sizeupvalues == p->nups); + p->upvalues = luaM_newarray(L, sizeupvalues, TString*, p->memcat); p->sizeupvalues = sizeupvalues; diff --git a/tests/Compiler.test.cpp b/tests/Compiler.test.cpp index e58526bd..e8927837 100644 --- a/tests/Compiler.test.cpp +++ b/tests/Compiler.test.cpp @@ -71,6 +71,7 @@ static std::string compileTypeTable(const char* source) Luau::CompileOptions opts; opts.vectorType = "Vector3"; + opts.typeInfoLevel = 1; Luau::compileOrThrow(bcb, source, opts); return bcb.dumpTypeInfo(); diff --git a/tests/Conformance.test.cpp b/tests/Conformance.test.cpp index 238ff02b..9333cb19 100644 --- a/tests/Conformance.test.cpp +++ b/tests/Conformance.test.cpp @@ -33,9 +33,6 @@ void luaC_validate(lua_State* L); LUAU_FASTFLAG(DebugLuauAbortingChecks) LUAU_FASTINT(CodegenHeuristicsInstructionLimit) LUAU_FASTFLAG(LuauCompileRepeatUntilSkippedLocals) -LUAU_FASTFLAG(LuauCodegenInferNumTag) -LUAU_FASTFLAG(LuauCodegenDetailedCompilationResult) -LUAU_FASTFLAG(LuauCodegenCheckTruthyFormB) LUAU_DYNAMIC_FASTFLAG(LuauFastCrossTableMove) static lua_CompileOptions defaultOptions() @@ -43,6 +40,7 @@ static lua_CompileOptions defaultOptions() lua_CompileOptions copts = {}; copts.optimizationLevel = optimizationLevel; copts.debugLevel = 1; + copts.typeInfoLevel = 1; copts.vectorCtor = "vector"; copts.vectorType = "vector"; @@ -240,12 +238,7 @@ static StateRef runConformance(const char* name, void (*setup)(lua_State* L) = n free(bytecode); if (result == 0 && codegen && !skipCodegen && luau_codegen_supported()) - { - if (FFlag::LuauCodegenDetailedCompilationResult) - Luau::CodeGen::compile(L, -1, Luau::CodeGen::CodeGen_ColdFunctions); - else - Luau::CodeGen::compile_DEPRECATED(L, -1, Luau::CodeGen::CodeGen_ColdFunctions); - } + Luau::CodeGen::compile(L, -1, Luau::CodeGen::CodeGen_ColdFunctions); int status = (result == 0) ? lua_resume(L, nullptr, 0) : LUA_ERRSYNTAX; @@ -338,6 +331,7 @@ static std::vector analyzeFile(const cha Luau::CompileOptions options; options.optimizationLevel = optimizationLevel; options.debugLevel = 1; + options.typeInfoLevel = 1; compileOrThrow(bcb, source, options); @@ -2070,9 +2064,6 @@ TEST_CASE("SafeEnv") TEST_CASE("Native") { - ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenInferNumTag, true}; - ScopedFastFlag luauCodegenCheckTruthyFormB{FFlag::LuauCodegenCheckTruthyFormB, true}; - // This tests requires code to run natively, otherwise all 'is_native' checks will fail if (!codegen || !luau_codegen_supported()) return; @@ -2130,8 +2121,6 @@ TEST_CASE("NativeTypeAnnotations") TEST_CASE("HugeFunction") { - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; - std::string source = makeHugeFunctionSource(); StateRef globalState(luaL_newstate(), lua_close); @@ -2235,7 +2224,6 @@ TEST_CASE("IrInstructionLimit") return; ScopedFastInt codegenHeuristicsInstructionLimit{FInt::CodegenHeuristicsInstructionLimit, 50'000}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; std::string source; diff --git a/tests/DataFlowGraph.test.cpp b/tests/DataFlowGraph.test.cpp index 1b7fe842..07d64e96 100644 --- a/tests/DataFlowGraph.test.cpp +++ b/tests/DataFlowGraph.test.cpp @@ -678,4 +678,30 @@ TEST_CASE_FIXTURE(DataFlowGraphFixture, "dfg_function_definition_in_a_do_block") CHECK(x2 == x3); } +TEST_CASE_FIXTURE(DataFlowGraphFixture, "dfg_captured_local_is_assigned_a_function") +{ + dfg(R"( + local f + + local function g() + f() + end + + function f() + end + )"); + + DefId f1 = graph->getDef(query(module)->vars.data[0]); + DefId f2 = getDef(); + DefId f3 = getDef(); + + CHECK(f1 != f2); + CHECK(f2 != f3); + + const Phi* f2phi = get(f2); + REQUIRE(f2phi); + CHECK(f2phi->operands.size() == 1); + CHECK(f2phi->operands.at(0) == f3); +} + TEST_SUITE_END(); diff --git a/tests/IrBuilder.test.cpp b/tests/IrBuilder.test.cpp index 7ec01e77..2f198e65 100644 --- a/tests/IrBuilder.test.cpp +++ b/tests/IrBuilder.test.cpp @@ -14,7 +14,6 @@ LUAU_FASTFLAG(LuauCodegenRemoveDeadStores5) LUAU_FASTFLAG(DebugLuauAbortingChecks) -LUAU_FASTFLAG(LuauCodegenInferNumTag) LUAU_FASTFLAG(LuauCodegenLoadPropCheckRegLinkInTv) using namespace Luau::CodeGen; @@ -2633,8 +2632,6 @@ bb_0: TEST_CASE_FIXTURE(IrBuilderFixture, "InferNumberTagFromLimitedContext") { - ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenInferNumTag, true}; - IrOp entry = build.block(IrBlockKind::Internal); build.beginBlock(entry); diff --git a/tests/IrLowering.test.cpp b/tests/IrLowering.test.cpp index 7a1e0bfe..479329b4 100644 --- a/tests/IrLowering.test.cpp +++ b/tests/IrLowering.test.cpp @@ -13,10 +13,13 @@ #include LUAU_FASTFLAG(LuauCodegenRemoveDeadStores5) -LUAU_FASTFLAG(LuauCodegenLoadTVTag) LUAU_FASTFLAG(LuauCodegenDirectUserdataFlow) +LUAU_FASTFLAG(LuauCompileTypeInfo) +LUAU_FASTFLAG(LuauLoadTypeInfo) +LUAU_FASTFLAG(LuauCodegenTypeInfo) +LUAU_FASTFLAG(LuauTypeInfoLookupImprovement) -static std::string getCodegenAssembly(const char* source) +static std::string getCodegenAssembly(const char* source, bool includeIrTypes = false) { Luau::CodeGen::AssemblyOptions options; @@ -27,6 +30,7 @@ static std::string getCodegenAssembly(const char* source) options.includeAssembly = false; options.includeIr = true; options.includeOutlinedCode = false; + options.includeIrTypes = includeIrTypes; options.includeIrPrefix = Luau::CodeGen::IncludeIrPrefix::No; options.includeUseInfo = Luau::CodeGen::IncludeUseInfo::No; @@ -44,6 +48,7 @@ static std::string getCodegenAssembly(const char* source) copts.optimizationLevel = 2; copts.debugLevel = 1; + copts.typeInfoLevel = 1; copts.vectorCtor = "vector"; copts.vectorType = "vector"; @@ -396,7 +401,6 @@ bb_bytecode_0: TEST_CASE("VectorConstantTag") { ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores5, true}; - ScopedFastFlag luauCodegenLoadTVTag{FFlag::LuauCodegenLoadTVTag, true}; CHECK_EQ("\n" + getCodegenAssembly(R"( local function vecrcp(a: vector) @@ -545,4 +549,315 @@ bb_4: )"); } +TEST_CASE("ExplicitUpvalueAndLocalTypes") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local y: vector = ... + +local function getsum(t) + local x: vector = t + return x.X + x.Y + y.X + y.Y +end +)", + /* includeIrTypes */ true), + R"( +; function getsum($arg0) line 4 +; U0: vector +; R0: vector from 0 to 14 +bb_bytecode_0: + CHECK_TAG R0, tvector, exit(0) + %2 = LOAD_FLOAT R0, 0i + STORE_DOUBLE R4, %2 + STORE_TAG R4, tnumber + %7 = LOAD_FLOAT R0, 4i + %16 = ADD_NUM %2, %7 + STORE_DOUBLE R3, %16 + STORE_TAG R3, tnumber + GET_UPVALUE R5, U0 + CHECK_TAG R5, tvector, exit(6) + %22 = LOAD_FLOAT R5, 0i + %31 = ADD_NUM %16, %22 + STORE_DOUBLE R2, %31 + STORE_TAG R2, tnumber + GET_UPVALUE R4, U0 + CHECK_TAG R4, tvector, exit(10) + %37 = LOAD_FLOAT R4, 4i + %46 = ADD_NUM %31, %37 + STORE_DOUBLE R1, %46 + STORE_TAG R1, tnumber + INTERRUPT 13u + RETURN R1, 1i +)"); +} + +TEST_CASE("FastcallTypeInferThroughLocal") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local function getsum(x, c) + local v = vector(x, 2, 3) + if c then + return v.X + v.Y + else + return v.Z + end +end +)", + /* includeIrTypes */ true), + R"( +; function getsum($arg0, $arg1) line 2 +; R2: vector from 0 to 17 +bb_bytecode_0: + %0 = LOAD_TVALUE R0 + STORE_TVALUE R3, %0 + STORE_DOUBLE R4, 2 + STORE_TAG R4, tnumber + STORE_DOUBLE R5, 3 + STORE_TAG R5, tnumber + CHECK_SAFE_ENV exit(4) + CHECK_TAG R3, tnumber, exit(4) + %13 = LOAD_DOUBLE R3 + STORE_VECTOR R2, %13, 2, 3 + STORE_TAG R2, tvector + JUMP_IF_FALSY R1, bb_bytecode_1, bb_3 +bb_3: + CHECK_TAG R2, tvector, exit(8) + %21 = LOAD_FLOAT R2, 0i + %26 = LOAD_FLOAT R2, 4i + %35 = ADD_NUM %21, %26 + STORE_DOUBLE R3, %35 + STORE_TAG R3, tnumber + INTERRUPT 13u + RETURN R3, 1i +bb_bytecode_1: + CHECK_TAG R2, tvector, exit(14) + %42 = LOAD_FLOAT R2, 8i + STORE_DOUBLE R3, %42 + STORE_TAG R3, tnumber + INTERRUPT 16u + RETURN R3, 1i +)"); +} + +TEST_CASE("FastcallTypeInferThroughUpvalue") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local v = ... + +local function getsum(x, c) + v = vector(x, 2, 3) + if c then + return v.X + v.Y + else + return v.Z + end +end +)", + /* includeIrTypes */ true), + R"( +; function getsum($arg0, $arg1) line 4 +; U0: vector +bb_bytecode_0: + %0 = LOAD_TVALUE R0 + STORE_TVALUE R3, %0 + STORE_DOUBLE R4, 2 + STORE_TAG R4, tnumber + STORE_DOUBLE R5, 3 + STORE_TAG R5, tnumber + CHECK_SAFE_ENV exit(4) + CHECK_TAG R3, tnumber, exit(4) + %13 = LOAD_DOUBLE R3 + STORE_VECTOR R2, %13, 2, 3 + STORE_TAG R2, tvector + SET_UPVALUE U0, R2, tvector + JUMP_IF_FALSY R1, bb_bytecode_1, bb_3 +bb_3: + GET_UPVALUE R4, U0 + CHECK_TAG R4, tvector, exit(10) + %23 = LOAD_FLOAT R4, 0i + STORE_DOUBLE R3, %23 + STORE_TAG R3, tnumber + GET_UPVALUE R5, U0 + CHECK_TAG R5, tvector, exit(13) + %29 = LOAD_FLOAT R5, 4i + %38 = ADD_NUM %23, %29 + STORE_DOUBLE R2, %38 + STORE_TAG R2, tnumber + INTERRUPT 16u + RETURN R2, 1i +bb_bytecode_1: + GET_UPVALUE R3, U0 + CHECK_TAG R3, tvector, exit(18) + %46 = LOAD_FLOAT R3, 8i + STORE_DOUBLE R2, %46 + STORE_TAG R2, tnumber + INTERRUPT 20u + RETURN R2, 1i +)"); +} + +TEST_CASE("LoadAndMoveTypePropagation") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}, {FFlag::LuauTypeInfoLookupImprovement, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local function getsum(n) + local seqsum = 0 + for i = 1,n do + if i < 10 then + seqsum += i + else + seqsum *= i + end + end + + return seqsum +end +)", + /* includeIrTypes */ true), + R"( +; function getsum($arg0) line 2 +; R1: number from 0 to 13 +; R4: number from 1 to 11 +bb_bytecode_0: + STORE_DOUBLE R1, 0 + STORE_TAG R1, tnumber + STORE_DOUBLE R4, 1 + STORE_TAG R4, tnumber + %4 = LOAD_TVALUE R0 + STORE_TVALUE R2, %4 + STORE_DOUBLE R3, 1 + STORE_TAG R3, tnumber + CHECK_TAG R2, tnumber, exit(4) + %12 = LOAD_DOUBLE R2 + JUMP_CMP_NUM 1, %12, not_le, bb_bytecode_4, bb_bytecode_1 +bb_bytecode_1: + INTERRUPT 5u + STORE_DOUBLE R5, 10 + STORE_TAG R5, tnumber + CHECK_TAG R4, tnumber, bb_fallback_6 + JUMP_CMP_NUM R4, 10, not_lt, bb_bytecode_2, bb_5 +bb_5: + CHECK_TAG R1, tnumber, exit(8) + CHECK_TAG R4, tnumber, exit(8) + %32 = LOAD_DOUBLE R1 + %34 = ADD_NUM %32, R4 + STORE_DOUBLE R1, %34 + JUMP bb_bytecode_3 +bb_bytecode_2: + CHECK_TAG R1, tnumber, exit(10) + CHECK_TAG R4, tnumber, exit(10) + %41 = LOAD_DOUBLE R1 + %43 = MUL_NUM %41, R4 + STORE_DOUBLE R1, %43 + JUMP bb_bytecode_3 +bb_bytecode_3: + %46 = LOAD_DOUBLE R2 + %47 = LOAD_DOUBLE R4 + %48 = ADD_NUM %47, 1 + STORE_DOUBLE R4, %48 + JUMP_CMP_NUM %48, %46, le, bb_bytecode_1, bb_bytecode_4 +bb_bytecode_4: + INTERRUPT 12u + RETURN R1, 1i +)"); +} + +TEST_CASE("ArgumentTypeRefinement") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}, {FFlag::LuauTypeInfoLookupImprovement, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local function getsum(x, y) + x = vector(1, y, 3) + return x.Y + x.Z +end +)", + /* includeIrTypes */ true), + R"( +; function getsum($arg0, $arg1) line 2 +; R0: vector [argument] +bb_bytecode_0: + STORE_DOUBLE R3, 1 + STORE_TAG R3, tnumber + %2 = LOAD_TVALUE R1 + STORE_TVALUE R4, %2 + STORE_DOUBLE R5, 3 + STORE_TAG R5, tnumber + CHECK_SAFE_ENV exit(4) + CHECK_TAG R4, tnumber, exit(4) + %14 = LOAD_DOUBLE R4 + STORE_VECTOR R2, 1, %14, 3 + STORE_TAG R2, tvector + %18 = LOAD_TVALUE R2 + STORE_TVALUE R0, %18 + %22 = LOAD_FLOAT R0, 4i + %27 = LOAD_FLOAT R0, 8i + %36 = ADD_NUM %22, %27 + STORE_DOUBLE R2, %36 + STORE_TAG R2, tnumber + INTERRUPT 13u + RETURN R2, 1i +)"); +} + +TEST_CASE("InlineFunctionType") +{ + ScopedFastFlag sffs[]{{FFlag::LuauLoadTypeInfo, true}, {FFlag::LuauCompileTypeInfo, true}, {FFlag::LuauCodegenTypeInfo, true}, + {FFlag::LuauCodegenRemoveDeadStores5, true}, {FFlag::LuauTypeInfoLookupImprovement, true}}; + + CHECK_EQ("\n" + getCodegenAssembly(R"( +local function inl(v: vector, s: number) + return v.Y * s +end + +local function getsum(x) + return inl(x, 2) + inl(x, 5) +end +)", + /* includeIrTypes */ true), + R"( +; function inl($arg0, $arg1) line 2 +; R0: vector [argument] +; R1: number [argument] +bb_0: + CHECK_TAG R0, tvector, exit(entry) + CHECK_TAG R1, tnumber, exit(entry) + JUMP bb_2 +bb_2: + JUMP bb_bytecode_1 +bb_bytecode_1: + %8 = LOAD_FLOAT R0, 4i + %17 = MUL_NUM %8, R1 + STORE_DOUBLE R2, %17 + STORE_TAG R2, tnumber + INTERRUPT 3u + RETURN R2, 1i +; function getsum($arg0) line 6 +; R0: vector from 0 to 3 +; R0: vector from 3 to 6 +bb_bytecode_0: + CHECK_TAG R0, tvector, exit(0) + %2 = LOAD_FLOAT R0, 4i + %8 = MUL_NUM %2, 2 + %13 = LOAD_FLOAT R0, 4i + %19 = MUL_NUM %13, 5 + %28 = ADD_NUM %8, %19 + STORE_DOUBLE R1, %28 + STORE_TAG R1, tnumber + INTERRUPT 7u + RETURN R1, 1i +)"); +} + TEST_SUITE_END(); diff --git a/tests/NonStrictTypeChecker.test.cpp b/tests/NonStrictTypeChecker.test.cpp index 3ffe6dc3..d85e46ee 100644 --- a/tests/NonStrictTypeChecker.test.cpp +++ b/tests/NonStrictTypeChecker.test.cpp @@ -63,6 +63,7 @@ struct NonStrictTypeCheckerFixture : Fixture NonStrictTypeCheckerFixture() { registerHiddenTypes(&frontend); + registerTestTypes(); } CheckResult checkNonStrict(const std::string& code) @@ -76,6 +77,17 @@ struct NonStrictTypeCheckerFixture : Fixture return check(Mode::Nonstrict, code); } + CheckResult checkNonStrictModule(const std::string& moduleName) + { + ScopedFastFlag flags[] = { + {FFlag::LuauCheckedFunctionSyntax, true}, + {FFlag::DebugLuauDeferredConstraintResolution, true}, + }; + LoadDefinitionFileResult res = loadDefinition(definitions); + LUAU_ASSERT(res.success); + return frontend.check(moduleName); + } + std::string definitions = R"BUILTIN_SRC( declare function @checked abs(n: number): number declare function @checked lower(s: string): string @@ -106,6 +118,8 @@ type DateTypeArg = { declare os : { time: @checked (time: DateTypeArg?) -> number } + +declare function @checked require(target : any) : any )BUILTIN_SRC"; }; @@ -527,4 +541,23 @@ os.time({year = 0, month = 0, day = 0, min = 0, isdst = nil}) Luau::InternalCompilerError); } +TEST_CASE_FIXTURE(NonStrictTypeCheckerFixture, "non_strict_shouldnt_warn_on_require_module") +{ + fileResolver.source["Modules/A"] = R"( +--!strict +type t = {x : number} +local e : t = {x = 3} +return e +)"; + fileResolver.sourceTypes["Modules/A"] = SourceCode::Module; + + fileResolver.source["Modules/B"] = R"( +--!nonstrict +local E = require(script.Parent.A) +)"; + + CheckResult result = checkNonStrictModule("Modules/B"); + LUAU_REQUIRE_NO_ERRORS(result); +} + TEST_SUITE_END(); diff --git a/tests/SharedCodeAllocator.test.cpp b/tests/SharedCodeAllocator.test.cpp index 833b7502..0b142930 100644 --- a/tests/SharedCodeAllocator.test.cpp +++ b/tests/SharedCodeAllocator.test.cpp @@ -16,7 +16,6 @@ #endif LUAU_FASTFLAG(LuauCodegenContext) -LUAU_FASTFLAG(LuauCodegenDetailedCompilationResult) using namespace Luau::CodeGen; @@ -34,7 +33,6 @@ TEST_CASE("NativeModuleRefRefcounting") return; ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize}; SharedCodeAllocator allocator{&codeAllocator}; @@ -253,7 +251,6 @@ TEST_CASE("NativeProtoRefcounting") return; ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize}; SharedCodeAllocator allocator{&codeAllocator}; @@ -307,7 +304,6 @@ TEST_CASE("NativeProtoState") return; ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize}; SharedCodeAllocator allocator{&codeAllocator}; @@ -369,7 +365,6 @@ TEST_CASE("AnonymousModuleLifetime") return; ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize}; SharedCodeAllocator allocator{&codeAllocator}; @@ -419,7 +414,6 @@ TEST_CASE("SharedAllocation") return; ScopedFastFlag luauCodegenContext{FFlag::LuauCodegenContext, true}; - ScopedFastFlag luauCodegenDetailedCompilationResult{FFlag::LuauCodegenDetailedCompilationResult, true}; UniqueSharedCodeGenContext sharedCodeGenContext = createSharedCodeGenContext(); diff --git a/tests/ToString.test.cpp b/tests/ToString.test.cpp index 425c3c6f..4789a810 100644 --- a/tests/ToString.test.cpp +++ b/tests/ToString.test.cpp @@ -14,7 +14,6 @@ LUAU_FASTFLAG(LuauRecursiveTypeParameterRestriction); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(LuauCheckedFunctionSyntax); LUAU_FASTFLAG(DebugLuauSharedSelf); -LUAU_FASTFLAG(LuauStringifyCyclesRootedAtPacks); TEST_SUITE_BEGIN("ToString"); @@ -239,7 +238,6 @@ TEST_CASE_FIXTURE(Fixture, "functions_are_always_parenthesized_in_unions_or_inte TEST_CASE_FIXTURE(Fixture, "simple_intersections_printed_on_one_line") { - ScopedFastFlag sff{FFlag::LuauToStringSimpleCompositeTypesSingleLine, true}; CheckResult result = check(R"( local a: string & number )"); @@ -252,7 +250,6 @@ TEST_CASE_FIXTURE(Fixture, "simple_intersections_printed_on_one_line") TEST_CASE_FIXTURE(Fixture, "complex_intersections_printed_on_multiple_lines") { - ScopedFastFlag sff{FFlag::LuauToStringSimpleCompositeTypesSingleLine, true}; CheckResult result = check(R"( local a: string & number & boolean )"); @@ -271,7 +268,6 @@ TEST_CASE_FIXTURE(Fixture, "complex_intersections_printed_on_multiple_lines") TEST_CASE_FIXTURE(Fixture, "overloaded_functions_always_printed_on_multiple_lines") { - ScopedFastFlag sff{FFlag::LuauToStringSimpleCompositeTypesSingleLine, true}; CheckResult result = check(R"( local a: ((string) -> string) & ((number) -> number) )"); @@ -288,7 +284,6 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_always_printed_on_multiple_line TEST_CASE_FIXTURE(Fixture, "simple_unions_printed_on_one_line") { - ScopedFastFlag sff{FFlag::LuauToStringSimpleCompositeTypesSingleLine, true}; CheckResult result = check(R"( local a: number | boolean )"); @@ -301,7 +296,6 @@ TEST_CASE_FIXTURE(Fixture, "simple_unions_printed_on_one_line") TEST_CASE_FIXTURE(Fixture, "complex_unions_printed_on_multiple_lines") { - ScopedFastFlag sff{FFlag::LuauToStringSimpleCompositeTypesSingleLine, true}; CheckResult result = check(R"( local a: string | number | boolean )"); @@ -1006,8 +1000,6 @@ TEST_CASE_FIXTURE(Fixture, "read_only_properties") TEST_CASE_FIXTURE(Fixture, "cycle_rooted_in_a_pack") { - ScopedFastFlag sff{FFlag::LuauStringifyCyclesRootedAtPacks, true}; - TypeArena arena; TypePackId thePack = arena.addTypePack({builtinTypes->numberType, builtinTypes->numberType}); diff --git a/tests/TypeInfer.functions.test.cpp b/tests/TypeInfer.functions.test.cpp index 1b8dea75..4fb3d58b 100644 --- a/tests/TypeInfer.functions.test.cpp +++ b/tests/TypeInfer.functions.test.cpp @@ -2369,7 +2369,7 @@ end auto err = get(result.errors.back()); LUAU_ASSERT(err); CHECK("number" == toString(err->recommendedReturn)); - CHECK(err->recommendedArgs.size() == 2); + REQUIRE(err->recommendedArgs.size() == 2); CHECK("number" == toString(err->recommendedArgs[0].second)); CHECK("number" == toString(err->recommendedArgs[1].second)); } @@ -2597,4 +2597,80 @@ We are unable to determine the appropriate result type for such a call.)"; CHECK("Cannot call non-function (() -> () -> ()) | (() -> ())" == toString(result.errors[0])); } +TEST_CASE_FIXTURE(Fixture, "fuzzer_missing_follow_in_ast_stat_fun") +{ + (void)check(R"( + local _ = function() + end ~= _ + + while (_) do + _,_,_,_,_,_,_,_,_,_._,_ = nil + function _(...):()->() + end + function _(...):any + _ ..= ... + end + _,_,_,_,_,_,_,_,_,_,_ = nil + end + )"); +} + +TEST_CASE_FIXTURE(Fixture, "unifier_should_not_bind_free_types") +{ + CheckResult result = check(R"( + function foo(player) + local success,result = player:thing() + if(success) then + return "Successfully posted message."; + elseif(not result) then + return false; + else + return result; + end + end + )"); + + if (FFlag::DebugLuauDeferredConstraintResolution) + { + // The new solver should ideally be able to do better here, but this is no worse than the old solver. + + LUAU_REQUIRE_ERROR_COUNT(2, result); + + auto tm1 = get(result.errors[0]); + REQUIRE(tm1); + CHECK(toString(tm1->wantedTp) == "string"); + CHECK(toString(tm1->givenTp) == "boolean"); + + auto tm2 = get(result.errors[1]); + REQUIRE(tm2); + CHECK(toString(tm2->wantedTp) == "string"); + CHECK(toString(tm2->givenTp) == "~(false?)"); + } + else + { + LUAU_REQUIRE_ERROR_COUNT(1, result); + + const TypeMismatch* tm = get(result.errors[0]); + REQUIRE(tm); + CHECK(toString(tm->wantedType) == "string"); + CHECK(toString(tm->givenType) == "boolean"); + } +} + +TEST_CASE_FIXTURE(Fixture, "captured_local_is_assigned_a_function") +{ + CheckResult result = check(R"( + local f + + local function g() + f() + end + + function f() + end + )"); + + LUAU_REQUIRE_NO_ERRORS(result); +} + TEST_SUITE_END(); diff --git a/tests/TypeInfer.loops.test.cpp b/tests/TypeInfer.loops.test.cpp index 8d14b812..d1716f5d 100644 --- a/tests/TypeInfer.loops.test.cpp +++ b/tests/TypeInfer.loops.test.cpp @@ -1076,4 +1076,42 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "iter_mm_results_are_lvalue") LUAU_REQUIRE_NO_ERRORS(result); } +TEST_CASE_FIXTURE(BuiltinsFixture, "forin_metatable_no_iter_mm") +{ + ScopedFastFlag sff{FFlag::DebugLuauDeferredConstraintResolution, true}; + + CheckResult result = check(R"( + local t = setmetatable({1, 2, 3}, {}) + + for i, v in t do + print(i, v) + end + )"); + + LUAU_REQUIRE_NO_ERRORS(result); + + CHECK_EQ("number", toString(requireTypeAtPosition({4, 18}))); + CHECK_EQ("number", toString(requireTypeAtPosition({4, 21}))); +} + +TEST_CASE_FIXTURE(BuiltinsFixture, "forin_metatable_iter_mm") +{ + ScopedFastFlag sff{FFlag::DebugLuauDeferredConstraintResolution, true}; + + CheckResult result = check(R"( + type Iterable = typeof(setmetatable({}, {} :: { + __iter: (Iterable) -> () -> T... + })) + + for i, v in {} :: Iterable<...number> do + print(i, v) + end + )"); + + LUAU_REQUIRE_NO_ERRORS(result); + + CHECK_EQ("number", toString(requireTypeAtPosition({6, 18}))); + CHECK_EQ("number", toString(requireTypeAtPosition({6, 21}))); +} + TEST_SUITE_END(); diff --git a/tests/TypeInfer.tables.test.cpp b/tests/TypeInfer.tables.test.cpp index 29537b8c..4cc07fba 100644 --- a/tests/TypeInfer.tables.test.cpp +++ b/tests/TypeInfer.tables.test.cpp @@ -4196,6 +4196,9 @@ TEST_CASE_FIXTURE(Fixture, "read_ond_write_only_indexers_are_unsupported") TEST_CASE_FIXTURE(Fixture, "table_writes_introduce_write_properties") { + if (!FFlag::DebugLuauDeferredConstraintResolution) + return; + ScopedFastFlag sff[] = {{FFlag::LuauReadWritePropertySyntax, true}, {FFlag::DebugLuauDeferredConstraintResolution, true}}; CheckResult result = check(R"( @@ -4389,6 +4392,22 @@ return {} LUAU_REQUIRE_NO_ERRORS(result); } +TEST_CASE_FIXTURE(Fixture, "setprop_on_a_mutating_local_in_both_loops_and_functions") +{ + CheckResult result = check(R"( + local _ = 5 + + while (_) do + _._ = nil + function _() + _ = nil + end + end + )"); + + LUAU_REQUIRE_ERRORS(result); +} + TEST_CASE_FIXTURE(Fixture, "setindexer_multiple_tables_intersection") { ScopedFastFlag sff{FFlag::DebugLuauDeferredConstraintResolution, true}; diff --git a/tests/TypeInfer.test.cpp b/tests/TypeInfer.test.cpp index bc986efc..140f462a 100644 --- a/tests/TypeInfer.test.cpp +++ b/tests/TypeInfer.test.cpp @@ -1518,4 +1518,41 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "be_sure_to_use_active_txnlog_when_evaluating CHECK(5 == e.location.begin.line); } +/* + * We had an issue where this kind of typeof() call could produce the untestable type ~{} + */ +TEST_CASE_FIXTURE(Fixture, "typeof_cannot_refine_builtin_alias") +{ + GlobalTypes& globals = frontend.globals; + TypeArena& arena = globals.globalTypes; + + unfreeze(arena); + + globals.globalScope->exportedTypeBindings["GlobalTable"] = TypeFun{{}, arena.addType(TableType{TableState::Sealed, TypeLevel{}})}; + + freeze(arena); + + (void) check(R"( + function foo(x) + if typeof(x) == 'GlobalTable' then + end + end + )"); +} + +/* + * We had an issue where we tripped the canMutate() check when binding one + * blocked type to another. + */ +TEST_CASE_FIXTURE(Fixture, "delay_setIndexer_constraint_if_the_indexers_type_is_blocked") +{ + (void) check(R"( + local SG = GetService(true) + local lines: { [string]: typeof(SG.ScreenGui) } = {} + lines[deadline] = nil -- This line + )"); + + // As long as type inference doesn't trip an assert or crash, we're good! +} + TEST_SUITE_END(); diff --git a/tests/Unifier2.test.cpp b/tests/Unifier2.test.cpp index 8a3bc8de..dcec34d1 100644 --- a/tests/Unifier2.test.cpp +++ b/tests/Unifier2.test.cpp @@ -76,13 +76,13 @@ TEST_CASE_FIXTURE(Unifier2Fixture, "T <: U") CHECK(u2.unify(left, right)); - CHECK("'a" == toString(left)); - CHECK("'a" == toString(right)); + CHECK("t1 where t1 = ('a <: (t1 <: 'b))" == toString(left)); + CHECK("t1 where t1 = (('a <: t1) <: 'b)" == toString(right)); CHECK("never" == toString(freeLeft->lowerBound)); - CHECK("unknown" == toString(freeLeft->upperBound)); + CHECK("t1 where t1 = (('a <: t1) <: 'b)" == toString(freeLeft->upperBound)); - CHECK("never" == toString(freeRight->lowerBound)); + CHECK("t1 where t1 = ('a <: (t1 <: 'b))" == toString(freeRight->lowerBound)); CHECK("unknown" == toString(freeRight->upperBound)); } diff --git a/tools/faillist.txt b/tools/faillist.txt index 254c3115..469e3a84 100644 --- a/tools/faillist.txt +++ b/tools/faillist.txt @@ -66,7 +66,6 @@ GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments GenericsTests.infer_generic_function_function_argument GenericsTests.infer_generic_function_function_argument_2 GenericsTests.infer_generic_function_function_argument_3 -GenericsTests.infer_generic_function_function_argument_overloaded GenericsTests.instantiated_function_argument_names GenericsTests.no_stack_overflow_from_quantifying GenericsTests.properties_can_be_instantiated_polytypes @@ -99,6 +98,7 @@ IntersectionTypes.table_write_sealed_indirect IntersectionTypes.union_saturate_overloaded_functions Linter.TableOperationsIndexer ModuleTests.clone_self_property +Negations.cofinite_strings_can_be_compared_for_equality NonstrictModeTests.inconsistent_module_return_types_are_ok NonstrictModeTests.infer_nullary_function NonstrictModeTests.infer_the_maximum_number_of_values_the_function_could_return @@ -178,6 +178,7 @@ TableTests.generalize_table_argument TableTests.generic_table_instantiation_potential_regression TableTests.indexer_on_sealed_table_must_unify_with_free_table TableTests.indexers_get_quantified_too +TableTests.infer_array TableTests.infer_indexer_from_array_like_table TableTests.infer_indexer_from_its_variable_type_and_unifiable TableTests.inferred_return_type_of_free_table @@ -222,6 +223,7 @@ TableTests.table_subtyping_with_extra_props_dont_report_multiple_errors TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors2 TableTests.table_unification_4 TableTests.table_unifies_into_map +TableTests.table_writes_introduce_write_properties TableTests.type_mismatch_on_massive_table_is_cut_short TableTests.used_colon_instead_of_dot TableTests.used_dot_instead_of_colon @@ -265,7 +267,6 @@ TypeInfer.cli_50041_committing_txnlog_in_apollo_client_error TypeInfer.dont_ice_when_failing_the_occurs_check TypeInfer.dont_report_type_errors_within_an_AstExprError TypeInfer.dont_report_type_errors_within_an_AstStatError -TypeInfer.follow_on_new_types_in_substitution TypeInfer.globals TypeInfer.globals2 TypeInfer.globals_are_banned_in_strict_mode @@ -311,6 +312,7 @@ TypeInferFunctions.function_does_not_return_enough_values TypeInferFunctions.function_exprs_are_generalized_at_signature_scope_not_enclosing TypeInferFunctions.function_is_supertype_of_concrete_functions TypeInferFunctions.function_statement_sealed_table_assignment_through_indexer +TypeInferFunctions.fuzzer_missing_follow_in_ast_stat_fun TypeInferFunctions.generic_packs_are_not_variadic TypeInferFunctions.higher_order_function_2 TypeInferFunctions.higher_order_function_3 @@ -341,7 +343,6 @@ TypeInferFunctions.too_many_arguments_error_location TypeInferFunctions.too_many_return_values_in_parentheses TypeInferFunctions.too_many_return_values_no_function TypeInferLoops.cli_68448_iterators_need_not_accept_nil -TypeInferLoops.dcr_iteration_fragmented_keys TypeInferLoops.dcr_iteration_on_never_gives_never TypeInferLoops.dcr_xpath_candidates TypeInferLoops.for_in_loop @@ -392,8 +393,6 @@ TypeInferOperators.UnknownGlobalCompoundAssign TypeInferPrimitives.CheckMethodsOfNumber TypeInferPrimitives.string_index TypeInferUnknownNever.assign_to_local_which_is_never -TypeInferUnknownNever.compare_never -TypeInferUnknownNever.dont_unify_operands_if_one_of_the_operand_is_never_in_any_ordering_operators TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_never TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_sorta_never TypeInferUnknownNever.length_of_never diff --git a/tools/lldb_formatters.py b/tools/lldb_formatters.py index 8238cc90..9be6cade 100644 --- a/tools/lldb_formatters.py +++ b/tools/lldb_formatters.py @@ -228,21 +228,7 @@ class DenseHashMapSyntheticChildrenProvider: skipped += 1 continue - slot_key = slot_key_valobj.GetSummary() - if slot_key is None: - slot_key = slot_key_valobj.GetValue() - - if slot_key is None: - slot_key = slot_key_valobj.GetValueAsSigned() - - if slot_key is None: - slot_key = slot_key_valobj.GetValueAsUnsigned() - - if slot_key is None: - slot_key = str(index) - - slot_value_valobj = slot_pair.GetChildMemberWithName("second") - return self.valobj.CreateValueFromData(f"[{slot_key}]", slot_value_valobj.GetData(), slot_value_valobj.GetType()) + return self.valobj.CreateValueFromData(f"[{index}]", slot_pair.GetData(), slot_pair.GetType()) except Exception as e: print("get_child_at_index error", e, index)