diff --git a/Analysis/include/Luau/Constraint.h b/Analysis/include/Luau/Constraint.h index d5cfcf3f..3ffb3fb8 100644 --- a/Analysis/include/Luau/Constraint.h +++ b/Analysis/include/Luau/Constraint.h @@ -90,18 +90,49 @@ struct TypeAliasExpansionConstraint TypeId target; }; -using ConstraintPtr = std::unique_ptr; - struct FunctionCallConstraint { - std::vector> innerConstraints; + std::vector> innerConstraints; TypeId fn; TypePackId result; class AstExprCall* astFragment; }; -using ConstraintV = Variant; +// result ~ prim ExpectedType SomeSingletonType MultitonType +// +// If ExpectedType is potentially a singleton (an actual singleton or a union +// that contains a singleton), then result ~ SomeSingletonType +// +// else result ~ MultitonType +struct PrimitiveTypeConstraint +{ + TypeId resultType; + TypeId expectedType; + TypeId singletonType; + TypeId multitonType; +}; + +// result ~ hasProp type "prop_name" +// +// If the subject is a table, bind the result to the named prop. If the table +// has an indexer, bind it to the index result type. If the subject is a union, +// bind the result to the union of its constituents' properties. +// +// It would be nice to get rid of this constraint and someday replace it with +// +// T <: {p: X} +// +// Where {} describes an inexact shape type. +struct HasPropConstraint +{ + TypeId resultType; + TypeId subjectType; + std::string prop; +}; + +using ConstraintV = + Variant; struct Constraint { @@ -117,6 +148,8 @@ struct Constraint std::vector> dependencies; }; +using ConstraintPtr = std::unique_ptr; + inline Constraint& asMutable(const Constraint& c) { return const_cast(c); diff --git a/Analysis/include/Luau/ConstraintGraphBuilder.h b/Analysis/include/Luau/ConstraintGraphBuilder.h index 1567e0ad..e7d8ad45 100644 --- a/Analysis/include/Luau/ConstraintGraphBuilder.h +++ b/Analysis/include/Luau/ConstraintGraphBuilder.h @@ -125,23 +125,25 @@ struct ConstraintGraphBuilder void visit(const ScopePtr& scope, AstStatDeclareClass* declareClass); void visit(const ScopePtr& scope, AstStatDeclareFunction* declareFunction); - TypePackId checkPack(const ScopePtr& scope, AstArray exprs); - TypePackId checkPack(const ScopePtr& scope, AstExpr* expr); + TypePackId checkPack(const ScopePtr& scope, AstArray exprs, const std::vector& expectedTypes = {}); + TypePackId checkPack(const ScopePtr& scope, AstExpr* expr, const std::vector& expectedTypes = {}); /** * Checks an expression that is expected to evaluate to one type. * @param scope the scope the expression is contained within. * @param expr the expression to check. + * @param expectedType the type of the expression that is expected from its + * surrounding context. Used to implement bidirectional type checking. * @return the type of the expression. */ - TypeId check(const ScopePtr& scope, AstExpr* expr); + TypeId check(const ScopePtr& scope, AstExpr* expr, std::optional expectedType = {}); - TypeId checkExprTable(const ScopePtr& scope, AstExprTable* expr); + TypeId check(const ScopePtr& scope, AstExprTable* expr, std::optional expectedType); TypeId check(const ScopePtr& scope, AstExprIndexName* indexName); TypeId check(const ScopePtr& scope, AstExprIndexExpr* indexExpr); TypeId check(const ScopePtr& scope, AstExprUnary* unary); TypeId check(const ScopePtr& scope, AstExprBinary* binary); - TypeId check(const ScopePtr& scope, AstExprIfElse* ifElse); + TypeId check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional expectedType); TypeId check(const ScopePtr& scope, AstExprTypeAssertion* typeAssert); struct FunctionSignature diff --git a/Analysis/include/Luau/ConstraintSolver.h b/Analysis/include/Luau/ConstraintSolver.h index fe6a025b..abea51b8 100644 --- a/Analysis/include/Luau/ConstraintSolver.h +++ b/Analysis/include/Luau/ConstraintSolver.h @@ -100,6 +100,8 @@ struct ConstraintSolver bool tryDispatch(const NameConstraint& c, NotNull constraint); bool tryDispatch(const TypeAliasExpansionConstraint& c, NotNull constraint); bool tryDispatch(const FunctionCallConstraint& c, NotNull constraint); + bool tryDispatch(const PrimitiveTypeConstraint& c, NotNull constraint); + bool tryDispatch(const HasPropConstraint& c, NotNull constraint); // for a, ... in some_table do bool tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull constraint, bool force); @@ -116,6 +118,16 @@ struct ConstraintSolver bool block(TypeId target, NotNull constraint); bool block(TypePackId target, NotNull constraint); + // Traverse the type. If any blocked or pending typevars are found, block + // the constraint on them. + // + // Returns false if a type blocks the constraint. + // + // FIXME: This use of a boolean for the return result is an appalling + // interface. + bool recursiveBlock(TypeId target, NotNull constraint); + bool recursiveBlock(TypePackId target, NotNull constraint); + void unblock(NotNull progressed); void unblock(TypeId progressed); void unblock(TypePackId progressed); diff --git a/Analysis/include/Luau/Normalize.h b/Analysis/include/Luau/Normalize.h index 48dbe2be..8e8b889b 100644 --- a/Analysis/include/Luau/Normalize.h +++ b/Analysis/include/Luau/Normalize.h @@ -17,8 +17,8 @@ struct SingletonTypes; using ModulePtr = std::shared_ptr; -bool isSubtype(TypeId subTy, TypeId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice); -bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice); +bool isSubtype(TypeId subTy, TypeId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true); +bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true); std::pair normalize( TypeId ty, NotNull scope, TypeArena& arena, NotNull singletonTypes, InternalErrorReporter& ice); diff --git a/Analysis/include/Luau/RequireTracer.h b/Analysis/include/Luau/RequireTracer.h index f69d133e..718a6cc1 100644 --- a/Analysis/include/Luau/RequireTracer.h +++ b/Analysis/include/Luau/RequireTracer.h @@ -6,6 +6,7 @@ #include "Luau/Location.h" #include +#include namespace Luau { diff --git a/Analysis/src/ConstraintGraphBuilder.cpp b/Analysis/src/ConstraintGraphBuilder.cpp index 6a65ab92..aa1e9547 100644 --- a/Analysis/src/ConstraintGraphBuilder.cpp +++ b/Analysis/src/ConstraintGraphBuilder.cpp @@ -36,6 +36,20 @@ static std::optional matchRequire(const AstExprCall& call) return call.args.data[0]; } +static bool matchSetmetatable(const AstExprCall& call) +{ + const char* smt = "setmetatable"; + + if (call.args.size != 2) + return false; + + const AstExprGlobal* funcAsGlobal = call.func->as(); + if (!funcAsGlobal || funcAsGlobal->name != smt) + return false; + + return true; +} + ConstraintGraphBuilder::ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena, NotNull moduleResolver, NotNull singletonTypes, NotNull ice, const ScopePtr& globalScope, DcrLogger* logger) : moduleName(moduleName) @@ -214,15 +228,16 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local) for (AstLocal* local : local->vars) { - TypeId ty = freshType(scope); + TypeId ty = nullptr; Location location = local->location; if (local->annotation) { location = local->annotation->location; - TypeId annotation = resolveType(scope, local->annotation, /* topLevel */ true); - addConstraint(scope, location, SubtypeConstraint{ty, annotation}); + ty = resolveType(scope, local->annotation, /* topLevel */ true); } + else + ty = freshType(scope); varTypes.push_back(ty); scope->bindings[local] = Binding{ty, location}; @@ -231,6 +246,8 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local) for (size_t i = 0; i < local->values.size; ++i) { AstExpr* value = local->values.data[i]; + const bool hasAnnotation = i < local->vars.size && nullptr != local->vars.data[i]->annotation; + if (value->is()) { // HACK: we leave nil-initialized things floating under the assumption that they will later be populated. @@ -239,7 +256,11 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local) } else if (i == local->values.size - 1) { - TypePackId exprPack = checkPack(scope, value); + std::vector expectedTypes; + if (hasAnnotation) + expectedTypes.insert(begin(expectedTypes), begin(varTypes) + i, end(varTypes)); + + TypePackId exprPack = checkPack(scope, value, expectedTypes); if (i < local->vars.size) { @@ -250,7 +271,11 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local) } else { - TypeId exprType = check(scope, value); + std::optional expectedType; + if (hasAnnotation) + expectedType = varTypes.at(i); + + TypeId exprType = check(scope, value, expectedType); if (i < varTypes.size()) addConstraint(scope, local->location, SubtypeConstraint{varTypes[i], exprType}); } @@ -458,7 +483,15 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction* funct void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatReturn* ret) { - TypePackId exprTypes = checkPack(scope, ret->list); + // At this point, the only way scope->returnType should have anything + // interesting in it is if the function has an explicit return annotation. + // If this is the case, then we can expect that the return expression + // conforms to that. + std::vector expectedTypes; + for (TypeId ty : scope->returnType) + expectedTypes.push_back(ty); + + TypePackId exprTypes = checkPack(scope, ret->list, expectedTypes); addConstraint(scope, ret->location, PackSubtypeConstraint{exprTypes, scope->returnType}); } @@ -695,7 +728,7 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatDeclareFunction scope->bindings[global->name] = Binding{fnType, global->location}; } -TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray exprs) +TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray exprs, const std::vector& expectedTypes) { std::vector head; std::optional tail; @@ -704,9 +737,17 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray expectedType; + if (i < expectedTypes.size()) + expectedType = expectedTypes[i]; head.push_back(check(scope, expr)); + } else - tail = checkPack(scope, expr); + { + std::vector expectedTailTypes{begin(expectedTypes) + i, end(expectedTypes)}; + tail = checkPack(scope, expr, expectedTailTypes); + } } if (head.empty() && tail) @@ -715,7 +756,7 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArrayaddTypePack(TypePack{std::move(head), tail}); } -TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* expr) +TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* expr, const std::vector& expectedTypes) { RecursionCounter counter{&recursionCount}; @@ -730,7 +771,6 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp if (AstExprCall* call = expr->as()) { TypeId fnType = check(scope, call->func); - const size_t constraintIndex = scope->constraints.size(); const size_t scopeIndex = scopes.size(); @@ -743,49 +783,63 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp // TODO self - const size_t constraintEndIndex = scope->constraints.size(); - const size_t scopeEndIndex = scopes.size(); - - astOriginalCallTypes[call->func] = fnType; - - TypeId instantiatedType = arena->addType(BlockedTypeVar{}); - TypePackId rets = arena->addTypePack(BlockedTypePack{}); - FunctionTypeVar ftv(arena->addTypePack(TypePack{args, {}}), rets); - TypeId inferredFnType = arena->addType(ftv); - - scope->unqueuedConstraints.push_back( - std::make_unique(NotNull{scope.get()}, call->func->location, InstantiationConstraint{instantiatedType, fnType})); - NotNull ic(scope->unqueuedConstraints.back().get()); - - scope->unqueuedConstraints.push_back( - std::make_unique(NotNull{scope.get()}, call->func->location, SubtypeConstraint{inferredFnType, instantiatedType})); - NotNull sc(scope->unqueuedConstraints.back().get()); - - // We force constraints produced by checking function arguments to wait - // until after we have resolved the constraint on the function itself. - // This ensures, for instance, that we start inferring the contents of - // lambdas under the assumption that their arguments and return types - // will be compatible with the enclosing function call. - for (size_t ci = constraintIndex; ci < constraintEndIndex; ++ci) - scope->constraints[ci]->dependencies.push_back(sc); - - for (size_t si = scopeIndex; si < scopeEndIndex; ++si) + if (matchSetmetatable(*call)) { - for (auto& c : scopes[si].second->constraints) - { - c->dependencies.push_back(sc); - } + LUAU_ASSERT(args.size() == 2); + TypeId target = args[0]; + TypeId mt = args[1]; + + MetatableTypeVar mtv{target, mt}; + TypeId resultTy = arena->addType(mtv); + result = arena->addTypePack({resultTy}); } + else + { + const size_t constraintEndIndex = scope->constraints.size(); + const size_t scopeEndIndex = scopes.size(); - addConstraint(scope, call->func->location, - FunctionCallConstraint{ - {ic, sc}, - fnType, - rets, - call, - }); + astOriginalCallTypes[call->func] = fnType; - result = rets; + TypeId instantiatedType = arena->addType(BlockedTypeVar{}); + // TODO: How do expectedTypes play into this? Do they? + TypePackId rets = arena->addTypePack(BlockedTypePack{}); + FunctionTypeVar ftv(arena->addTypePack(TypePack{args, {}}), rets); + TypeId inferredFnType = arena->addType(ftv); + + scope->unqueuedConstraints.push_back( + std::make_unique(NotNull{scope.get()}, call->func->location, InstantiationConstraint{instantiatedType, fnType})); + NotNull ic(scope->unqueuedConstraints.back().get()); + + scope->unqueuedConstraints.push_back( + std::make_unique(NotNull{scope.get()}, call->func->location, SubtypeConstraint{inferredFnType, instantiatedType})); + NotNull sc(scope->unqueuedConstraints.back().get()); + + // We force constraints produced by checking function arguments to wait + // until after we have resolved the constraint on the function itself. + // This ensures, for instance, that we start inferring the contents of + // lambdas under the assumption that their arguments and return types + // will be compatible with the enclosing function call. + for (size_t ci = constraintIndex; ci < constraintEndIndex; ++ci) + scope->constraints[ci]->dependencies.push_back(sc); + + for (size_t si = scopeIndex; si < scopeEndIndex; ++si) + { + for (auto& c : scopes[si].second->constraints) + { + c->dependencies.push_back(sc); + } + } + + addConstraint(scope, call->func->location, + FunctionCallConstraint{ + {ic, sc}, + fnType, + rets, + call, + }); + + result = rets; + } } else if (AstExprVarargs* varargs = expr->as()) { @@ -796,7 +850,10 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp } else { - TypeId t = check(scope, expr); + std::optional expectedType; + if (!expectedTypes.empty()) + expectedType = expectedTypes[0]; + TypeId t = check(scope, expr, expectedType); result = arena->addTypePack({t}); } @@ -805,7 +862,7 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp return result; } -TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr) +TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, std::optional expectedType) { RecursionCounter counter{&recursionCount}; @@ -819,12 +876,47 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr) if (auto group = expr->as()) result = check(scope, group->expr); - else if (expr->is()) - result = singletonTypes->stringType; + else if (auto stringExpr = expr->as()) + { + if (expectedType) + { + const TypeId expectedTy = follow(*expectedType); + if (get(expectedTy) || get(expectedTy)) + { + result = arena->addType(BlockedTypeVar{}); + TypeId singletonType = arena->addType(SingletonTypeVar(StringSingleton{std::string(stringExpr->value.data, stringExpr->value.size)})); + addConstraint(scope, expr->location, PrimitiveTypeConstraint{result, expectedTy, singletonType, singletonTypes->stringType}); + } + else if (maybeSingleton(expectedTy)) + result = arena->addType(SingletonTypeVar{StringSingleton{std::string{stringExpr->value.data, stringExpr->value.size}}}); + else + result = singletonTypes->stringType; + } + else + result = singletonTypes->stringType; + } else if (expr->is()) result = singletonTypes->numberType; - else if (expr->is()) - result = singletonTypes->booleanType; + else if (auto boolExpr = expr->as()) + { + if (expectedType) + { + const TypeId expectedTy = follow(*expectedType); + const TypeId singletonType = boolExpr->value ? singletonTypes->trueType : singletonTypes->falseType; + + if (get(expectedTy) || get(expectedTy)) + { + result = arena->addType(BlockedTypeVar{}); + addConstraint(scope, expr->location, PrimitiveTypeConstraint{result, expectedTy, singletonType, singletonTypes->booleanType}); + } + else if (maybeSingleton(expectedTy)) + result = singletonType; + else + result = singletonTypes->booleanType; + } + else + result = singletonTypes->booleanType; + } else if (expr->is()) result = singletonTypes->nilType; else if (auto a = expr->as()) @@ -864,13 +956,13 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr) else if (auto indexExpr = expr->as()) result = check(scope, indexExpr); else if (auto table = expr->as()) - result = checkExprTable(scope, table); + result = check(scope, table, expectedType); else if (auto unary = expr->as()) result = check(scope, unary); else if (auto binary = expr->as()) result = check(scope, binary); else if (auto ifElse = expr->as()) - result = check(scope, ifElse); + result = check(scope, ifElse, expectedType); else if (auto typeAssert = expr->as()) result = check(scope, typeAssert); else if (auto err = expr->as()) @@ -924,20 +1016,9 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprUnary* unary) { TypeId operandType = check(scope, unary->expr); - switch (unary->op) - { - case AstExprUnary::Minus: - { - TypeId resultType = arena->addType(BlockedTypeVar{}); - addConstraint(scope, unary->location, UnaryConstraint{AstExprUnary::Minus, operandType, resultType}); - return resultType; - } - default: - LUAU_ASSERT(0); - } - - LUAU_UNREACHABLE(); - return singletonTypes->errorRecoveryType(); + TypeId resultType = arena->addType(BlockedTypeVar{}); + addConstraint(scope, unary->location, UnaryConstraint{unary->op, operandType, resultType}); + return resultType; } TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binary) @@ -946,22 +1027,34 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binar TypeId rightType = check(scope, binary->right); switch (binary->op) { + case AstExprBinary::And: case AstExprBinary::Or: { addConstraint(scope, binary->location, SubtypeConstraint{leftType, rightType}); return leftType; } case AstExprBinary::Add: + case AstExprBinary::Sub: + case AstExprBinary::Mul: + case AstExprBinary::Div: + case AstExprBinary::Mod: + case AstExprBinary::Pow: + case AstExprBinary::CompareNe: + case AstExprBinary::CompareEq: + case AstExprBinary::CompareLt: + case AstExprBinary::CompareLe: + case AstExprBinary::CompareGt: + case AstExprBinary::CompareGe: { TypeId resultType = arena->addType(BlockedTypeVar{}); - addConstraint(scope, binary->location, BinaryConstraint{AstExprBinary::Add, leftType, rightType, resultType}); + addConstraint(scope, binary->location, BinaryConstraint{binary->op, leftType, rightType, resultType}); return resultType; } - case AstExprBinary::Sub: + case AstExprBinary::Concat: { - TypeId resultType = arena->addType(BlockedTypeVar{}); - addConstraint(scope, binary->location, BinaryConstraint{AstExprBinary::Sub, leftType, rightType, resultType}); - return resultType; + addConstraint(scope, binary->left->location, SubtypeConstraint{leftType, singletonTypes->stringType}); + addConstraint(scope, binary->right->location, SubtypeConstraint{rightType, singletonTypes->stringType}); + return singletonTypes->stringType; } default: LUAU_ASSERT(0); @@ -971,16 +1064,16 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binar return nullptr; } -TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* ifElse) +TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional expectedType) { check(scope, ifElse->condition); - TypeId thenType = check(scope, ifElse->trueExpr); - TypeId elseType = check(scope, ifElse->falseExpr); + TypeId thenType = check(scope, ifElse->trueExpr, expectedType); + TypeId elseType = check(scope, ifElse->falseExpr, expectedType); if (ifElse->hasElse) { - TypeId resultType = arena->addType(BlockedTypeVar{}); + TypeId resultType = expectedType ? *expectedType : freshType(scope); addConstraint(scope, ifElse->trueExpr->location, SubtypeConstraint{thenType, resultType}); addConstraint(scope, ifElse->falseExpr->location, SubtypeConstraint{elseType, resultType}); return resultType; @@ -995,7 +1088,7 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTypeAssertion return resolveType(scope, typeAssert->annotation); } -TypeId ConstraintGraphBuilder::checkExprTable(const ScopePtr& scope, AstExprTable* expr) +TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTable* expr, std::optional expectedType) { TypeId ty = arena->addType(TableTypeVar{}); TableTypeVar* ttv = getMutable(ty); @@ -1015,7 +1108,18 @@ TypeId ConstraintGraphBuilder::checkExprTable(const ScopePtr& scope, AstExprTabl for (const AstExprTable::Item& item : expr->items) { - TypeId itemTy = check(scope, item.value); + std::optional expectedValueType; + + if (item.key && expectedType) + { + if (auto stringKey = item.key->as()) + { + expectedValueType = arena->addType(BlockedTypeVar{}); + addConstraint(scope, item.value->location, HasPropConstraint{*expectedValueType, *expectedType, stringKey->value.data}); + } + } + + TypeId itemTy = check(scope, item.value, expectedValueType); if (get(follow(itemTy))) return ty; @@ -1130,7 +1234,12 @@ ConstraintGraphBuilder::FunctionSignature ConstraintGraphBuilder::checkFunctionS if (fn->returnAnnotation) { TypePackId annotatedRetType = resolveTypePack(signatureScope, *fn->returnAnnotation); - addConstraint(signatureScope, getLocation(*fn->returnAnnotation), PackSubtypeConstraint{returnType, annotatedRetType}); + + // We bind the annotated type directly here so that, when we need to + // generate constraints for return types, we have a guarantee that we + // know the annotated return type already, if one was provided. + LUAU_ASSERT(get(returnType)); + asMutable(returnType)->ty.emplace(annotatedRetType); } std::vector argTypes; diff --git a/Analysis/src/ConstraintSolver.cpp b/Analysis/src/ConstraintSolver.cpp index 6fb57b15..b2bf773f 100644 --- a/Analysis/src/ConstraintSolver.cpp +++ b/Analysis/src/ConstraintSolver.cpp @@ -396,8 +396,12 @@ bool ConstraintSolver::tryDispatch(NotNull constraint, bool fo success = tryDispatch(*taec, constraint); else if (auto fcc = get(*constraint)) success = tryDispatch(*fcc, constraint); + else if (auto fcc = get(*constraint)) + success = tryDispatch(*fcc, constraint); + else if (auto hpc = get(*constraint)) + success = tryDispatch(*hpc, constraint); else - LUAU_ASSERT(0); + LUAU_ASSERT(false); if (success) { @@ -409,6 +413,11 @@ bool ConstraintSolver::tryDispatch(NotNull constraint, bool fo bool ConstraintSolver::tryDispatch(const SubtypeConstraint& c, NotNull constraint, bool force) { + if (!recursiveBlock(c.subType, constraint)) + return false; + if (!recursiveBlock(c.superType, constraint)) + return false; + if (isBlocked(c.subType)) return block(c.subType, constraint); else if (isBlocked(c.superType)) @@ -421,6 +430,9 @@ bool ConstraintSolver::tryDispatch(const SubtypeConstraint& c, NotNull constraint, bool force) { + if (!recursiveBlock(c.subPack, constraint) || !recursiveBlock(c.superPack, constraint)) + return false; + if (isBlocked(c.subPack)) return block(c.subPack, constraint); else if (isBlocked(c.superPack)) @@ -480,13 +492,30 @@ bool ConstraintSolver::tryDispatch(const UnaryConstraint& c, NotNull(c.resultType)); - if (isNumber(operandType) || get(operandType) || get(operandType)) + switch (c.op) { - asMutable(c.resultType)->ty.emplace(c.operandType); - return true; + case AstExprUnary::Not: + { + asMutable(c.resultType)->ty.emplace(singletonTypes->booleanType); + return true; + } + case AstExprUnary::Len: + { + asMutable(c.resultType)->ty.emplace(singletonTypes->numberType); + return true; + } + case AstExprUnary::Minus: + { + if (isNumber(operandType) || get(operandType) || get(operandType)) + { + asMutable(c.resultType)->ty.emplace(c.operandType); + return true; + } + break; + } } - LUAU_ASSERT(0); // TODO metatable handling + LUAU_ASSERT(false); // TODO metatable handling return false; } @@ -906,6 +935,91 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull constraint) +{ + TypeId expectedType = follow(c.expectedType); + if (isBlocked(expectedType) || get(expectedType)) + return block(expectedType, constraint); + + TypeId bindTo = maybeSingleton(expectedType) ? c.singletonType : c.multitonType; + asMutable(c.resultType)->ty.emplace(bindTo); + + return true; +} + +bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull constraint) +{ + TypeId subjectType = follow(c.subjectType); + + if (isBlocked(subjectType) || get(subjectType)) + return block(subjectType, constraint); + + TypeId resultType = nullptr; + + auto collectParts = [&](auto&& unionOrIntersection) -> std::pair> { + bool blocked = false; + + std::vector parts; + for (TypeId expectedPart : unionOrIntersection) + { + expectedPart = follow(expectedPart); + if (isBlocked(expectedPart) || get(expectedPart)) + { + blocked = true; + block(expectedPart, constraint); + } + else if (const TableTypeVar* ttv = get(follow(expectedPart))) + { + if (auto prop = ttv->props.find(c.prop); prop != ttv->props.end()) + parts.push_back(prop->second.type); + else if (ttv->indexer && maybeString(ttv->indexer->indexType)) + parts.push_back(ttv->indexer->indexResultType); + } + } + + return {blocked, parts}; + }; + + if (auto ttv = get(subjectType)) + { + if (auto prop = ttv->props.find(c.prop); prop != ttv->props.end()) + resultType = prop->second.type; + else if (ttv->indexer && maybeString(ttv->indexer->indexType)) + resultType = ttv->indexer->indexResultType; + } + else if (auto utv = get(subjectType)) + { + auto [blocked, parts] = collectParts(utv); + + if (blocked) + return false; + else if (parts.size() == 1) + resultType = parts[0]; + else if (parts.size() > 1) + resultType = arena->addType(UnionTypeVar{std::move(parts)}); + else + LUAU_ASSERT(false); // parts.size() == 0 + } + else if (auto itv = get(subjectType)) + { + auto [blocked, parts] = collectParts(itv); + + if (blocked) + return false; + else if (parts.size() == 1) + resultType = parts[0]; + else if (parts.size() > 1) + resultType = arena->addType(IntersectionTypeVar{std::move(parts)}); + else + LUAU_ASSERT(false); // parts.size() == 0 + } + + if (resultType) + asMutable(c.resultType)->ty.emplace(resultType); + + return true; +} + bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull constraint, bool force) { auto block_ = [&](auto&& t) { @@ -914,7 +1028,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl // TODO: I believe it is the case that, if we are asked to force // this constraint, then we can do nothing but fail. I'd like to // find a code sample that gets here. - LUAU_ASSERT(0); + LUAU_ASSERT(false); } else block(t, constraint); @@ -979,7 +1093,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl if (get(metaTy)) return block_(metaTy); - LUAU_ASSERT(0); + LUAU_ASSERT(false); } else errorify(c.variables); @@ -996,7 +1110,7 @@ bool ConstraintSolver::tryDispatchIterableFunction( if (get(firstIndexTy)) { if (force) - LUAU_ASSERT(0); + LUAU_ASSERT(false); else block(firstIndexTy, constraint); return false; @@ -1061,6 +1175,48 @@ bool ConstraintSolver::block(TypePackId target, NotNull constr return false; } +struct Blocker : TypeVarOnceVisitor +{ + NotNull solver; + NotNull constraint; + + bool blocked = false; + + explicit Blocker(NotNull solver, NotNull constraint) + : solver(solver) + , constraint(constraint) + { + } + + bool visit(TypeId ty, const BlockedTypeVar&) + { + blocked = true; + solver->block(ty, constraint); + return false; + } + + bool visit(TypeId ty, const PendingExpansionTypeVar&) + { + blocked = true; + solver->block(ty, constraint); + return false; + } +}; + +bool ConstraintSolver::recursiveBlock(TypeId target, NotNull constraint) +{ + Blocker blocker{NotNull{this}, constraint}; + blocker.traverse(target); + return !blocker.blocked; +} + +bool ConstraintSolver::recursiveBlock(TypePackId pack, NotNull constraint) +{ + Blocker blocker{NotNull{this}, constraint}; + blocker.traverse(pack); + return !blocker.blocked; +} + void ConstraintSolver::unblock_(BlockedConstraintId progressed) { auto it = blocked.find(progressed); diff --git a/Analysis/src/Normalize.cpp b/Analysis/src/Normalize.cpp index c3f0bb9d..42f61517 100644 --- a/Analysis/src/Normalize.cpp +++ b/Analysis/src/Normalize.cpp @@ -5,6 +5,7 @@ #include #include "Luau/Clone.h" +#include "Luau/Common.h" #include "Luau/Unifier.h" #include "Luau/VisitTypeVar.h" @@ -13,8 +14,8 @@ LUAU_FASTFLAGVARIABLE(DebugLuauCopyBeforeNormalizing, false) // This could theoretically be 2000 on amd64, but x86 requires this. LUAU_FASTINTVARIABLE(LuauNormalizeIterationLimit, 1200); LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineTableFix, false); -LUAU_FASTFLAGVARIABLE(LuauFixNormalizationOfCyclicUnions, false); LUAU_FASTFLAG(LuauUnknownAndNeverType) +LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) namespace Luau { @@ -54,24 +55,24 @@ struct Replacer } // anonymous namespace -bool isSubtype(TypeId subTy, TypeId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice) +bool isSubtype(TypeId subTy, TypeId superTy, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice, bool anyIsTop) { UnifierSharedState sharedState{&ice}; TypeArena arena; Unifier u{&arena, singletonTypes, Mode::Strict, scope, Location{}, Covariant, sharedState}; - u.anyIsTop = true; + u.anyIsTop = anyIsTop; u.tryUnify(subTy, superTy); const bool ok = u.errors.empty() && u.log.empty(); return ok; } -bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice) +bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull scope, NotNull singletonTypes, InternalErrorReporter& ice, bool anyIsTop) { UnifierSharedState sharedState{&ice}; TypeArena arena; Unifier u{&arena, singletonTypes, Mode::Strict, scope, Location{}, Covariant, sharedState}; - u.anyIsTop = true; + u.anyIsTop = anyIsTop; u.tryUnify(subPack, superPack); const bool ok = u.errors.empty() && u.log.empty(); @@ -319,18 +320,11 @@ struct Normalize final : TypeVarVisitor UnionTypeVar* utv = &const_cast(utvRef); - // TODO: Clip tempOptions and optionsRef when clipping FFlag::LuauFixNormalizationOfCyclicUnions - std::vector tempOptions; - if (!FFlag::LuauFixNormalizationOfCyclicUnions) - tempOptions = std::move(utv->options); - - std::vector& optionsRef = FFlag::LuauFixNormalizationOfCyclicUnions ? utv->options : tempOptions; - // We might transmute, so it's not safe to rely on the builtin traversal logic of visitTypeVar - for (TypeId option : optionsRef) + for (TypeId option : utv->options) traverse(option); - std::vector newOptions = normalizeUnion(optionsRef); + std::vector newOptions = normalizeUnion(utv->options); const bool normal = areNormal(newOptions, seen, ice); @@ -355,106 +349,54 @@ struct Normalize final : TypeVarVisitor IntersectionTypeVar* itv = &const_cast(itvRef); - if (FFlag::LuauFixNormalizationOfCyclicUnions) + std::vector oldParts = itv->parts; + IntersectionTypeVar newIntersection; + + for (TypeId part : oldParts) + traverse(part); + + std::vector tables; + for (TypeId part : oldParts) { - std::vector oldParts = itv->parts; - IntersectionTypeVar newIntersection; - - for (TypeId part : oldParts) - traverse(part); - - std::vector tables; - for (TypeId part : oldParts) + part = follow(part); + if (get(part)) + tables.push_back(part); + else { - part = follow(part); - if (get(part)) - tables.push_back(part); - else - { - Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD - combineIntoIntersection(replacer, &newIntersection, part); - } - } - - // Don't allocate a new table if there's just one in the intersection. - if (tables.size() == 1) - newIntersection.parts.push_back(tables[0]); - else if (!tables.empty()) - { - const TableTypeVar* first = get(tables[0]); - LUAU_ASSERT(first); - - TypeId newTable = arena.addType(TableTypeVar{first->state, first->level}); - TableTypeVar* ttv = getMutable(newTable); - for (TypeId part : tables) - { - // Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need - // to be rewritten to point at 'newTable' in the clone. - Replacer replacer{&arena, part, newTable}; - combineIntoTable(replacer, ttv, part); - } - - newIntersection.parts.push_back(newTable); - } - - itv->parts = std::move(newIntersection.parts); - - asMutable(ty)->normal = areNormal(itv->parts, seen, ice); - - if (itv->parts.size() == 1) - { - TypeId part = itv->parts[0]; - *asMutable(ty) = BoundTypeVar{part}; + Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD + combineIntoIntersection(replacer, &newIntersection, part); } } - else + + // Don't allocate a new table if there's just one in the intersection. + if (tables.size() == 1) + newIntersection.parts.push_back(tables[0]); + else if (!tables.empty()) { - std::vector oldParts = std::move(itv->parts); + const TableTypeVar* first = get(tables[0]); + LUAU_ASSERT(first); - for (TypeId part : oldParts) - traverse(part); - - std::vector tables; - for (TypeId part : oldParts) + TypeId newTable = arena.addType(TableTypeVar{first->state, first->level}); + TableTypeVar* ttv = getMutable(newTable); + for (TypeId part : tables) { - part = follow(part); - if (get(part)) - tables.push_back(part); - else - { - Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD - combineIntoIntersection(replacer, itv, part); - } + // Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need + // to be rewritten to point at 'newTable' in the clone. + Replacer replacer{&arena, part, newTable}; + combineIntoTable(replacer, ttv, part); } - // Don't allocate a new table if there's just one in the intersection. - if (tables.size() == 1) - itv->parts.push_back(tables[0]); - else if (!tables.empty()) - { - const TableTypeVar* first = get(tables[0]); - LUAU_ASSERT(first); + newIntersection.parts.push_back(newTable); + } - TypeId newTable = arena.addType(TableTypeVar{first->state, first->level}); - TableTypeVar* ttv = getMutable(newTable); - for (TypeId part : tables) - { - // Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need - // to be rewritten to point at 'newTable' in the clone. - Replacer replacer{&arena, part, newTable}; - combineIntoTable(replacer, ttv, part); - } + itv->parts = std::move(newIntersection.parts); - itv->parts.push_back(newTable); - } + asMutable(ty)->normal = areNormal(itv->parts, seen, ice); - asMutable(ty)->normal = areNormal(itv->parts, seen, ice); - - if (itv->parts.size() == 1) - { - TypeId part = itv->parts[0]; - *asMutable(ty) = BoundTypeVar{part}; - } + if (itv->parts.size() == 1) + { + TypeId part = itv->parts[0]; + *asMutable(ty) = BoundTypeVar{part}; } return false; @@ -629,21 +571,18 @@ struct Normalize final : TypeVarVisitor table->props.insert({propName, prop}); } - if (FFlag::LuauFixNormalizationOfCyclicUnions) + if (tyTable->indexer) { - if (tyTable->indexer) + if (table->indexer) { - if (table->indexer) - { - table->indexer->indexType = combine(replacer, replacer.smartClone(tyTable->indexer->indexType), table->indexer->indexType); - table->indexer->indexResultType = - combine(replacer, replacer.smartClone(tyTable->indexer->indexResultType), table->indexer->indexResultType); - } - else - { - table->indexer = - TableIndexer{replacer.smartClone(tyTable->indexer->indexType), replacer.smartClone(tyTable->indexer->indexResultType)}; - } + table->indexer->indexType = combine(replacer, replacer.smartClone(tyTable->indexer->indexType), table->indexer->indexType); + table->indexer->indexResultType = + combine(replacer, replacer.smartClone(tyTable->indexer->indexResultType), table->indexer->indexResultType); + } + else + { + table->indexer = + TableIndexer{replacer.smartClone(tyTable->indexer->indexType), replacer.smartClone(tyTable->indexer->indexResultType)}; } } diff --git a/Analysis/src/ToString.cpp b/Analysis/src/ToString.cpp index 4ec2371f..8ab124ea 100644 --- a/Analysis/src/ToString.cpp +++ b/Analysis/src/ToString.cpp @@ -1512,6 +1512,15 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts) { return "call " + tos(c.fn, opts) + " with { result = " + tos(c.result, opts) + " }"; } + else if constexpr (std::is_same_v) + { + return tos(c.resultType, opts) + " ~ prim " + tos(c.expectedType, opts) + ", " + tos(c.singletonType, opts) + ", " + + tos(c.multitonType, opts); + } + else if constexpr (std::is_same_v) + { + return tos(c.resultType, opts) + " ~ hasProp " + tos(c.subjectType, opts) + ", \"" + c.prop + "\""; + } else static_assert(always_false_v, "Non-exhaustive constraint switch"); }; diff --git a/Analysis/src/TypeChecker2.cpp b/Analysis/src/TypeChecker2.cpp index 76b27acd..ea06882a 100644 --- a/Analysis/src/TypeChecker2.cpp +++ b/Analysis/src/TypeChecker2.cpp @@ -307,10 +307,9 @@ struct TypeChecker2 if (var->annotation) { TypeId varType = lookupAnnotation(var->annotation); - if (!isSubtype(*it, varType, stack.back(), singletonTypes, ice)) - { - reportError(TypeMismatch{varType, *it}, value->location); - } + ErrorVec errors = tryUnify(stack.back(), value->location, *it, varType); + if (!errors.empty()) + reportErrors(std::move(errors)); } ++it; @@ -325,7 +324,7 @@ struct TypeChecker2 if (var->annotation) { TypeId varType = lookupAnnotation(var->annotation); - if (!isSubtype(varType, valueType, stack.back(), singletonTypes, ice)) + if (!isSubtype(varType, valueType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{varType, valueType}, value->location); } @@ -540,7 +539,7 @@ struct TypeChecker2 visit(rhs); TypeId rhsType = lookupType(rhs); - if (!isSubtype(rhsType, lhsType, stack.back(), singletonTypes, ice)) + if (!isSubtype(rhsType, lhsType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{lhsType, rhsType}, rhs->location); } @@ -691,7 +690,7 @@ struct TypeChecker2 TypeId actualType = lookupType(number); TypeId numberType = singletonTypes->numberType; - if (!isSubtype(numberType, actualType, stack.back(), singletonTypes, ice)) + if (!isSubtype(numberType, actualType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{actualType, numberType}, number->location); } @@ -702,7 +701,7 @@ struct TypeChecker2 TypeId actualType = lookupType(string); TypeId stringType = singletonTypes->stringType; - if (!isSubtype(stringType, actualType, stack.back(), singletonTypes, ice)) + if (!isSubtype(stringType, actualType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{actualType, stringType}, string->location); } @@ -762,7 +761,7 @@ struct TypeChecker2 FunctionTypeVar ftv{argsTp, expectedRetType}; TypeId expectedType = arena.addType(ftv); - if (!isSubtype(expectedType, instantiatedFunctionType, stack.back(), singletonTypes, ice)) + if (!isSubtype(instantiatedFunctionType, expectedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { CloneState cloneState; expectedType = clone(expectedType, module->internalTypes, cloneState); @@ -781,7 +780,7 @@ struct TypeChecker2 getIndexTypeFromType(module->getModuleScope(), leftType, indexName->index.value, indexName->location, /* addErrors */ true); if (ty) { - if (!isSubtype(resultType, *ty, stack.back(), singletonTypes, ice)) + if (!isSubtype(resultType, *ty, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{resultType, *ty}, indexName->location); } @@ -814,7 +813,7 @@ struct TypeChecker2 TypeId inferredArgTy = *argIt; TypeId annotatedArgTy = lookupAnnotation(arg->annotation); - if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back(), singletonTypes, ice)) + if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) { reportError(TypeMismatch{annotatedArgTy, inferredArgTy}, arg->location); } @@ -859,10 +858,10 @@ struct TypeChecker2 TypeId computedType = lookupType(expr->expr); // Note: As an optimization, we try 'number <: number | string' first, as that is the more likely case. - if (isSubtype(annotationType, computedType, stack.back(), singletonTypes, ice)) + if (isSubtype(annotationType, computedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) return; - if (isSubtype(computedType, annotationType, stack.back(), singletonTypes, ice)) + if (isSubtype(computedType, annotationType, stack.back(), singletonTypes, ice, /* anyIsTop */ false)) return; reportError(TypesAreUnrelated{computedType, annotationType}, expr->location); diff --git a/Ast/include/Luau/DenseHash.h b/Ast/include/Luau/DenseHash.h index f8543111..b222feb0 100644 --- a/Ast/include/Luau/DenseHash.h +++ b/Ast/include/Luau/DenseHash.h @@ -5,7 +5,6 @@ #include #include -#include #include #include @@ -35,30 +34,125 @@ public: class iterator; DenseHashTable(const Key& empty_key, size_t buckets = 0) - : count(0) + : data(nullptr) + , capacity(0) + , count(0) , empty_key(empty_key) { + // validate that equality operator is at least somewhat functional + LUAU_ASSERT(eq(empty_key, empty_key)); // buckets has to be power-of-two or zero LUAU_ASSERT((buckets & (buckets - 1)) == 0); - // don't move this to initializer list! this works around an MSVC codegen issue on AMD CPUs: - // https://developercommunity.visualstudio.com/t/stdvector-constructor-from-size-t-is-25-times-slow/1546547 if (buckets) - resize_data(buckets); + { + data = static_cast(::operator new(sizeof(Item) * buckets)); + capacity = buckets; + + ItemInterface::fill(data, buckets, empty_key); + } + } + + ~DenseHashTable() + { + if (data) + destroy(); + } + + DenseHashTable(const DenseHashTable& other) + : data(nullptr) + , capacity(0) + , count(other.count) + , empty_key(other.empty_key) + { + if (other.capacity) + { + data = static_cast(::operator new(sizeof(Item) * other.capacity)); + + for (size_t i = 0; i < other.capacity; ++i) + { + new (&data[i]) Item(other.data[i]); + capacity = i + 1; // if Item copy throws, capacity will note the number of initialized objects for destroy() to clean up + } + } + } + + DenseHashTable(DenseHashTable&& other) + : data(other.data) + , capacity(other.capacity) + , count(other.count) + , empty_key(other.empty_key) + { + other.data = nullptr; + other.capacity = 0; + other.count = 0; + } + + DenseHashTable& operator=(DenseHashTable&& other) + { + if (this != &other) + { + if (data) + destroy(); + + data = other.data; + capacity = other.capacity; + count = other.count; + empty_key = other.empty_key; + + other.data = nullptr; + other.capacity = 0; + other.count = 0; + } + + return *this; + } + + DenseHashTable& operator=(const DenseHashTable& other) + { + if (this != &other) + { + DenseHashTable copy(other); + *this = std::move(copy); + } + + return *this; } void clear() { - data.clear(); + if (count == 0) + return; + + if (capacity > 32) + { + destroy(); + } + else + { + ItemInterface::destroy(data, capacity); + ItemInterface::fill(data, capacity, empty_key); + } + count = 0; } + void destroy() + { + ItemInterface::destroy(data, capacity); + + ::operator delete(data); + data = nullptr; + + capacity = 0; + } + Item* insert_unsafe(const Key& key) { // It is invalid to insert empty_key into the table since it acts as a "entry does not exist" marker LUAU_ASSERT(!eq(key, empty_key)); - size_t hashmod = data.size() - 1; + size_t hashmod = capacity - 1; size_t bucket = hasher(key) & hashmod; for (size_t probe = 0; probe <= hashmod; ++probe) @@ -90,12 +184,12 @@ public: const Item* find(const Key& key) const { - if (data.empty()) + if (count == 0) return 0; if (eq(key, empty_key)) return 0; - size_t hashmod = data.size() - 1; + size_t hashmod = capacity - 1; size_t bucket = hasher(key) & hashmod; for (size_t probe = 0; probe <= hashmod; ++probe) @@ -121,18 +215,11 @@ public: void rehash() { - size_t newsize = data.empty() ? 16 : data.size() * 2; - - if (data.empty() && data.capacity() >= newsize) - { - LUAU_ASSERT(count == 0); - resize_data(newsize); - return; - } + size_t newsize = capacity == 0 ? 16 : capacity * 2; DenseHashTable newtable(empty_key, newsize); - for (size_t i = 0; i < data.size(); ++i) + for (size_t i = 0; i < capacity; ++i) { const Key& key = ItemInterface::getKey(data[i]); @@ -144,12 +231,14 @@ public: } LUAU_ASSERT(count == newtable.count); - data.swap(newtable.data); + + std::swap(data, newtable.data); + std::swap(capacity, newtable.capacity); } void rehash_if_full() { - if (count >= data.size() * 3 / 4) + if (count >= capacity * 3 / 4) { rehash(); } @@ -159,7 +248,7 @@ public: { size_t start = 0; - while (start < data.size() && eq(ItemInterface::getKey(data[start]), empty_key)) + while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key)) start++; return const_iterator(this, start); @@ -167,14 +256,14 @@ public: const_iterator end() const { - return const_iterator(this, data.size()); + return const_iterator(this, capacity); } iterator begin() { size_t start = 0; - while (start < data.size() && eq(ItemInterface::getKey(data[start]), empty_key)) + while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key)) start++; return iterator(this, start); @@ -182,7 +271,7 @@ public: iterator end() { - return iterator(this, data.size()); + return iterator(this, capacity); } size_t size() const @@ -227,7 +316,7 @@ public: const_iterator& operator++() { - size_t size = set->data.size(); + size_t size = set->capacity; do { @@ -286,7 +375,7 @@ public: iterator& operator++() { - size_t size = set->data.size(); + size_t size = set->capacity; do { @@ -309,23 +398,8 @@ public: }; private: - template - void resize_data(size_t count, typename std::enable_if_t>* dummy = nullptr) - { - data.resize(count, ItemInterface::create(empty_key)); - } - - template - void resize_data(size_t count, typename std::enable_if_t>* dummy = nullptr) - { - size_t size = data.size(); - data.resize(count); - - for (size_t i = size; i < count; i++) - data[i].first = empty_key; - } - - std::vector data; + Item* data; + size_t capacity; size_t count; Key empty_key; Hash hasher; @@ -345,9 +419,16 @@ struct ItemInterfaceSet item = key; } - static Key create(const Key& key) + static void fill(Key* data, size_t count, const Key& key) { - return key; + for (size_t i = 0; i < count; ++i) + new (&data[i]) Key(key); + } + + static void destroy(Key* data, size_t count) + { + for (size_t i = 0; i < count; ++i) + data[i].~Key(); } }; @@ -364,9 +445,22 @@ struct ItemInterfaceMap item.first = key; } - static std::pair create(const Key& key) + static void fill(std::pair* data, size_t count, const Key& key) { - return std::pair(key, Value()); + for (size_t i = 0; i < count; ++i) + { + new (&data[i].first) Key(key); + new (&data[i].second) Value(); + } + } + + static void destroy(std::pair* data, size_t count) + { + for (size_t i = 0; i < count; ++i) + { + data[i].first.~Key(); + data[i].second.~Value(); + } } }; diff --git a/Ast/include/Luau/Lexer.h b/Ast/include/Luau/Lexer.h index 7e7fe76b..929402b3 100644 --- a/Ast/include/Luau/Lexer.h +++ b/Ast/include/Luau/Lexer.h @@ -6,6 +6,8 @@ #include "Luau/DenseHash.h" #include "Luau/Common.h" +#include + namespace Luau { diff --git a/Ast/src/Parser.cpp b/Ast/src/Parser.cpp index 0914054f..cf3eaaae 100644 --- a/Ast/src/Parser.cpp +++ b/Ast/src/Parser.cpp @@ -20,7 +20,6 @@ LUAU_DYNAMIC_FASTFLAGVARIABLE(LuaReportParseWrongNamedType, false) bool lua_telemetry_parsed_named_non_function_type = false; LUAU_FASTFLAGVARIABLE(LuauErrorDoubleHexPrefix, false) -LUAU_FASTFLAGVARIABLE(LuauLintParseIntegerIssues, false) LUAU_DYNAMIC_FASTFLAGVARIABLE(LuaReportParseIntegerIssues, false) LUAU_FASTFLAGVARIABLE(LuauInterpolatedStringBaseSupport, false) @@ -2070,95 +2069,8 @@ AstExpr* Parser::parseAssertionExpr() return expr; } -static const char* parseInteger_DEPRECATED(double& result, const char* data, int base) -{ - LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues); - - char* end = nullptr; - unsigned long long value = strtoull(data, &end, base); - - if (value == ULLONG_MAX && errno == ERANGE) - { - // 'errno' might have been set before we called 'strtoull', but we don't want the overhead of resetting a TLS variable on each call - // so we only reset it when we get a result that might be an out-of-range error and parse again to make sure - errno = 0; - value = strtoull(data, &end, base); - - if (errno == ERANGE) - { - if (DFFlag::LuaReportParseIntegerIssues) - { - if (base == 2) - lua_telemetry_parsed_out_of_range_bin_integer = true; - else - lua_telemetry_parsed_out_of_range_hex_integer = true; - } - } - } - - result = double(value); - return *end == 0 ? nullptr : "Malformed number"; -} - -static const char* parseNumber_DEPRECATED2(double& result, const char* data) -{ - LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues); - - // binary literal - if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2]) - return parseInteger_DEPRECATED(result, data + 2, 2); - - // hexadecimal literal - if (data[0] == '0' && (data[1] == 'x' || data[1] == 'X') && data[2]) - { - if (DFFlag::LuaReportParseIntegerIssues && data[2] == '0' && (data[3] == 'x' || data[3] == 'X')) - lua_telemetry_parsed_double_prefix_hex_integer = true; - - return parseInteger_DEPRECATED(result, data + 2, 16); - } - - char* end = nullptr; - double value = strtod(data, &end); - - result = value; - return *end == 0 ? nullptr : "Malformed number"; -} - -static bool parseNumber_DEPRECATED(double& result, const char* data) -{ - LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues); - - // binary literal - if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2]) - { - char* end = nullptr; - unsigned long long value = strtoull(data + 2, &end, 2); - - result = double(value); - return *end == 0; - } - // hexadecimal literal - else if (data[0] == '0' && (data[1] == 'x' || data[1] == 'X') && data[2]) - { - char* end = nullptr; - unsigned long long value = strtoull(data + 2, &end, 16); - - result = double(value); - return *end == 0; - } - else - { - char* end = nullptr; - double value = strtod(data, &end); - - result = value; - return *end == 0; - } -} - static ConstantNumberParseResult parseInteger(double& result, const char* data, int base) { - LUAU_ASSERT(FFlag::LuauLintParseIntegerIssues); LUAU_ASSERT(base == 2 || base == 16); char* end = nullptr; @@ -2195,8 +2107,6 @@ static ConstantNumberParseResult parseInteger(double& result, const char* data, static ConstantNumberParseResult parseDouble(double& result, const char* data) { - LUAU_ASSERT(FFlag::LuauLintParseIntegerIssues); - // binary literal if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2]) return parseInteger(result, data + 2, 2); @@ -2771,49 +2681,14 @@ AstExpr* Parser::parseNumber() scratchData.erase(std::remove(scratchData.begin(), scratchData.end(), '_'), scratchData.end()); } - if (FFlag::LuauLintParseIntegerIssues) - { - double value = 0; - ConstantNumberParseResult result = parseDouble(value, scratchData.c_str()); - nextLexeme(); + double value = 0; + ConstantNumberParseResult result = parseDouble(value, scratchData.c_str()); + nextLexeme(); - if (result == ConstantNumberParseResult::Malformed) - return reportExprError(start, {}, "Malformed number"); + if (result == ConstantNumberParseResult::Malformed) + return reportExprError(start, {}, "Malformed number"); - return allocator.alloc(start, value, result); - } - else if (DFFlag::LuaReportParseIntegerIssues) - { - double value = 0; - if (const char* error = parseNumber_DEPRECATED2(value, scratchData.c_str())) - { - nextLexeme(); - - return reportExprError(start, {}, "%s", error); - } - else - { - nextLexeme(); - - return allocator.alloc(start, value); - } - } - else - { - double value = 0; - if (parseNumber_DEPRECATED(value, scratchData.c_str())) - { - nextLexeme(); - - return allocator.alloc(start, value); - } - else - { - nextLexeme(); - - return reportExprError(start, {}, "Malformed number"); - } - } + return allocator.alloc(start, value, result); } AstLocal* Parser::pushLocal(const Binding& binding) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3dafe5fe..43289f41 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -51,6 +51,8 @@ if(LUAU_BUILD_WEB) add_executable(Luau.Web) endif() +# Proxy target to make it possible to depend on private VM headers +add_library(Luau.VM.Internals INTERFACE) include(Sources.cmake) @@ -70,6 +72,7 @@ target_link_libraries(Luau.Analysis PUBLIC Luau.Ast) target_compile_features(Luau.CodeGen PRIVATE cxx_std_17) target_include_directories(Luau.CodeGen PUBLIC CodeGen/include) +target_link_libraries(Luau.CodeGen PRIVATE Luau.VM Luau.VM.Internals) # Code generation needs VM internals target_link_libraries(Luau.CodeGen PUBLIC Luau.Common) target_compile_features(Luau.VM PRIVATE cxx_std_11) @@ -78,6 +81,8 @@ target_link_libraries(Luau.VM PUBLIC Luau.Common) target_include_directories(isocline PUBLIC extern/isocline/include) +target_include_directories(Luau.VM.Internals INTERFACE VM/src) + set(LUAU_OPTIONS) if(MSVC) diff --git a/CodeGen/include/Luau/UnwindBuilderDwarf2.h b/CodeGen/include/Luau/UnwindBuilderDwarf2.h index 09c91d43..25dbc55b 100644 --- a/CodeGen/include/Luau/UnwindBuilderDwarf2.h +++ b/CodeGen/include/Luau/UnwindBuilderDwarf2.h @@ -27,13 +27,13 @@ public: private: static const unsigned kRawDataLimit = 128; - char rawData[kRawDataLimit]; - char* pos = rawData; + uint8_t rawData[kRawDataLimit]; + uint8_t* pos = rawData; uint32_t stackOffset = 0; // We will remember the FDE location to write some of the fields like entry length, function start and size later - char* fdeEntryStart = nullptr; + uint8_t* fdeEntryStart = nullptr; }; } // namespace CodeGen diff --git a/CodeGen/src/AssemblyBuilderX64.cpp b/CodeGen/src/AssemblyBuilderX64.cpp index 0fd10328..32325b0d 100644 --- a/CodeGen/src/AssemblyBuilderX64.cpp +++ b/CodeGen/src/AssemblyBuilderX64.cpp @@ -1,6 +1,8 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #include "Luau/AssemblyBuilderX64.h" +#include "ByteUtils.h" + #include #include #include @@ -46,44 +48,6 @@ const unsigned AVX_F2 = 0b11; const unsigned kMaxAlign = 16; -// Utility functions to correctly write data on big endian machines -#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -#include - -static void writeu32(uint8_t* target, uint32_t value) -{ - value = htole32(value); - memcpy(target, &value, sizeof(value)); -} - -static void writeu64(uint8_t* target, uint64_t value) -{ - value = htole64(value); - memcpy(target, &value, sizeof(value)); -} - -static void writef32(uint8_t* target, float value) -{ - static_assert(sizeof(float) == sizeof(uint32_t), "type size must match to reinterpret data"); - uint32_t data; - memcpy(&data, &value, sizeof(value)); - writeu32(target, data); -} - -static void writef64(uint8_t* target, double value) -{ - static_assert(sizeof(double) == sizeof(uint64_t), "type size must match to reinterpret data"); - uint64_t data; - memcpy(&data, &value, sizeof(value)); - writeu64(target, data); -} -#else -#define writeu32(target, value) memcpy(target, &value, sizeof(value)) -#define writeu64(target, value) memcpy(target, &value, sizeof(value)) -#define writef32(target, value) memcpy(target, &value, sizeof(value)) -#define writef64(target, value) memcpy(target, &value, sizeof(value)) -#endif - AssemblyBuilderX64::AssemblyBuilderX64(bool logText) : logText(logText) { @@ -1014,16 +978,14 @@ void AssemblyBuilderX64::placeImm32(int32_t imm) { uint8_t* pos = codePos; LUAU_ASSERT(pos + sizeof(imm) < codeEnd); - writeu32(pos, imm); - codePos = pos + sizeof(imm); + codePos = writeu32(pos, imm); } void AssemblyBuilderX64::placeImm64(int64_t imm) { uint8_t* pos = codePos; LUAU_ASSERT(pos + sizeof(imm) < codeEnd); - writeu64(pos, imm); - codePos = pos + sizeof(imm); + codePos = writeu64(pos, imm); } void AssemblyBuilderX64::placeLabel(Label& label) diff --git a/CodeGen/src/ByteUtils.h b/CodeGen/src/ByteUtils.h new file mode 100644 index 00000000..1e1e341d --- /dev/null +++ b/CodeGen/src/ByteUtils.h @@ -0,0 +1,78 @@ +// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +#pragma once + +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#include +#endif + +#include + +inline uint8_t* writeu8(uint8_t* target, uint8_t value) +{ + *target = value; + return target + sizeof(value); +} + +inline uint8_t* writeu32(uint8_t* target, uint32_t value) +{ +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + value = htole32(value); +#endif + + memcpy(target, &value, sizeof(value)); + return target + sizeof(value); +} + +inline uint8_t* writeu64(uint8_t* target, uint64_t value) +{ +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + value = htole64(value); +#endif + + memcpy(target, &value, sizeof(value)); + return target + sizeof(value); +} + +inline uint8_t* writeuleb128(uint8_t* target, uint64_t value) +{ + do + { + uint8_t byte = value & 0x7f; + value >>= 7; + + if (value) + byte |= 0x80; + + *target++ = byte; + } while (value); + + return target; +} + +inline uint8_t* writef32(uint8_t* target, float value) +{ +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + static_assert(sizeof(float) == sizeof(uint32_t), "type size must match to reinterpret data"); + uint32_t data; + memcpy(&data, &value, sizeof(value)); + writeu32(target, data); +#else + memcpy(target, &value, sizeof(value)); +#endif + + return target + sizeof(value); +} + +inline uint8_t* writef64(uint8_t* target, double value) +{ +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + static_assert(sizeof(double) == sizeof(uint64_t), "type size must match to reinterpret data"); + uint64_t data; + memcpy(&data, &value, sizeof(value)); + writeu64(target, data); +#else + memcpy(target, &value, sizeof(value)); +#endif + + return target + sizeof(value); +} diff --git a/CodeGen/src/Fallbacks.cpp b/CodeGen/src/Fallbacks.cpp new file mode 100644 index 00000000..3893d349 --- /dev/null +++ b/CodeGen/src/Fallbacks.cpp @@ -0,0 +1,2511 @@ +// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details +// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand +#include "Fallbacks.h" +#include "FallbacksProlog.h" + +const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + LUAU_ASSERT(insn == 0); + return pc; +} + +const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + setnilvalue(ra); + return pc; +} + +const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + setbvalue(ra, LUAU_INSN_B(insn)); + + pc += LUAU_INSN_C(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + setnvalue(ra, LUAU_INSN_D(insn)); + return pc; +} + +const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(LUAU_INSN_D(insn)); + + setobj2s(L, ra, kv); + return pc; +} + +const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + + setobj2s(L, ra, rb); + return pc; +} + +const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + LUAU_ASSERT(ttisstring(kv)); + + // fast-path: value is in expected slot + Table* h = cl->env; + int slot = LUAU_INSN_C(insn) & h->nodemask8; + LuaNode* n = &h->node[slot]; + + if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv)) && !ttisnil(gval(n))) + { + setobj2s(L, ra, gval(n)); + return pc; + } + else + { + // slow-path, may invoke Lua calls via __index metamethod + TValue g; + sethvalue(L, &g, h); + L->cachedslot = slot; + VM_PROTECT(luaV_gettable(L, &g, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } +} + +const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + LUAU_ASSERT(ttisstring(kv)); + + // fast-path: value is in expected slot + Table* h = cl->env; + int slot = LUAU_INSN_C(insn) & h->nodemask8; + LuaNode* n = &h->node[slot]; + + if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly)) + { + setobj2t(L, gval(n), ra); + luaC_barriert(L, h, ra); + return pc; + } + else + { + // slow-path, may invoke Lua calls via __newindex metamethod + TValue g; + sethvalue(L, &g, h); + L->cachedslot = slot; + VM_PROTECT(luaV_settable(L, &g, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } +} + +const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* ur = VM_UV(LUAU_INSN_B(insn)); + TValue* v = ttisupval(ur) ? upvalue(ur)->v : ur; + + setobj2s(L, ra, v); + return pc; +} + +const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* ur = VM_UV(LUAU_INSN_B(insn)); + UpVal* uv = upvalue(ur); + + setobj(L, uv->v, ra); + luaC_barrier(L, uv, ra); + return pc; +} + +const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + if (L->openupval && L->openupval->v >= ra) + luaF_close(L, ra); + return pc; +} + +const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(LUAU_INSN_D(insn)); + + // fast-path: import resolution was successful and closure environment is "safe" for import + if (!ttisnil(kv) && cl->env->safeenv) + { + setobj2s(L, ra, kv); + pc++; // skip over AUX + return pc; + } + else + { + uint32_t aux = *pc++; + + VM_PROTECT(luaV_getimport(L, cl->env, k, aux, /* propagatenil= */ false)); + ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack + + setobj2s(L, ra, L->top - 1); + L->top--; + return pc; + } +} + +const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + LUAU_ASSERT(ttisstring(kv)); + + // fast-path: built-in table + if (ttistable(rb)) + { + Table* h = hvalue(rb); + + int slot = LUAU_INSN_C(insn) & h->nodemask8; + LuaNode* n = &h->node[slot]; + + // fast-path: value is in expected slot + if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))) + { + setobj2s(L, ra, gval(n)); + return pc; + } + else if (!h->metatable) + { + // fast-path: value is not in expected slot, but the table lookup doesn't involve metatable + const TValue* res = luaH_getstr(h, tsvalue(kv)); + + if (res != luaO_nilobject) + { + int cachedslot = gval2slot(h, res); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, cachedslot); + } + + setobj2s(L, ra, res); + return pc; + } + else + { + // slow-path, may invoke Lua calls via __index metamethod + L->cachedslot = slot; + VM_PROTECT(luaV_gettable(L, rb, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } + } + else + { + // fast-path: user data with C __index TM + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, kv); + L->top = top + 3; + + L->cachedslot = LUAU_INSN_C(insn); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } + else if (ttisvector(rb)) + { + // fast-path: quick case-insensitive comparison with "X"/"Y"/"Z" + const char* name = getstr(tsvalue(kv)); + int ic = (name[0] | ' ') - 'x'; + +#if LUA_VECTOR_SIZE == 4 + // 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w' + if (ic == -1) + ic = 3; +#endif + + if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0') + { + const float* v = rb->value.v; // silences ubsan when indexing v[] + setnvalue(ra, v[ic]); + return pc; + } + + fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX); + + if (fn && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, kv); + L->top = top + 3; + + L->cachedslot = LUAU_INSN_C(insn); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } + + // fall through to slow path + } + + // fall through to slow path + } + + // slow-path, may invoke Lua calls via __index metamethod + VM_PROTECT(luaV_gettable(L, rb, kv, ra)); + return pc; +} + +const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + LUAU_ASSERT(ttisstring(kv)); + + // fast-path: built-in table + if (ttistable(rb)) + { + Table* h = hvalue(rb); + + int slot = LUAU_INSN_C(insn) & h->nodemask8; + LuaNode* n = &h->node[slot]; + + // fast-path: value is in expected slot + if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly)) + { + setobj2t(L, gval(n), ra); + luaC_barriert(L, h, ra); + return pc; + } + else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly) + { + VM_PROTECT_PC(); // set may fail + + TValue* res = luaH_setstr(L, h, tsvalue(kv)); + int cachedslot = gval2slot(h, res); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, cachedslot); + setobj2t(L, res, ra); + luaC_barriert(L, h, ra); + return pc; + } + else + { + // slow-path, may invoke Lua calls via __newindex metamethod + L->cachedslot = slot; + VM_PROTECT(luaV_settable(L, rb, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } + } + else + { + // fast-path: user data with C __newindex TM + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, kv); + setobj2s(L, top + 3, ra); + L->top = top + 4; + + L->cachedslot = LUAU_INSN_C(insn); + VM_PROTECT(luaV_callTM(L, 3, -1)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + return pc; + } + else + { + // slow-path, may invoke Lua calls via __newindex metamethod + VM_PROTECT(luaV_settable(L, rb, kv, ra)); + return pc; + } + } +} + +const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path: array lookup + if (ttistable(rb) && ttisnumber(rc)) + { + Table* h = hvalue(rb); + + double indexd = nvalue(rc); + int index = int(indexd); + + // index has to be an exact integer and in-bounds for the array portion + if (LUAU_LIKELY(unsigned(index - 1) < unsigned(h->sizearray) && !h->metatable && double(index) == indexd)) + { + setobj2s(L, ra, &h->array[unsigned(index - 1)]); + return pc; + } + + // fall through to slow path + } + + // slow-path: handles out of bounds array lookups, non-integer numeric keys, non-array table lookup, __index MT calls + VM_PROTECT(luaV_gettable(L, rb, rc, ra)); + return pc; +} + +const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path: array assign + if (ttistable(rb) && ttisnumber(rc)) + { + Table* h = hvalue(rb); + + double indexd = nvalue(rc); + int index = int(indexd); + + // index has to be an exact integer and in-bounds for the array portion + if (LUAU_LIKELY(unsigned(index - 1) < unsigned(h->sizearray) && !h->metatable && !h->readonly && double(index) == indexd)) + { + setobj2t(L, &h->array[unsigned(index - 1)], ra); + luaC_barriert(L, h, ra); + return pc; + } + + // fall through to slow path + } + + // slow-path: handles out of bounds array assignments, non-integer numeric keys, non-array table access, __newindex MT calls + VM_PROTECT(luaV_settable(L, rb, rc, ra)); + return pc; +} + +const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + int c = LUAU_INSN_C(insn); + + // fast-path: array lookup + if (ttistable(rb)) + { + Table* h = hvalue(rb); + + if (LUAU_LIKELY(unsigned(c) < unsigned(h->sizearray) && !h->metatable)) + { + setobj2s(L, ra, &h->array[c]); + return pc; + } + + // fall through to slow path + } + + // slow-path: handles out of bounds array lookups + TValue n; + setnvalue(&n, c + 1); + VM_PROTECT(luaV_gettable(L, rb, &n, ra)); + return pc; +} + +const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + int c = LUAU_INSN_C(insn); + + // fast-path: array assign + if (ttistable(rb)) + { + Table* h = hvalue(rb); + + if (LUAU_LIKELY(unsigned(c) < unsigned(h->sizearray) && !h->metatable && !h->readonly)) + { + setobj2t(L, &h->array[c], ra); + luaC_barriert(L, h, ra); + return pc; + } + + // fall through to slow path + } + + // slow-path: handles out of bounds array lookups + TValue n; + setnvalue(&n, c + 1); + VM_PROTECT(luaV_settable(L, rb, &n, ra)); + return pc; +} + +const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)]; + LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep)); + + // note: we save closure to stack early in case the code below wants to capture it by value + Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv); + setclvalue(L, ra, ncl); + + for (int ui = 0; ui < pv->nups; ++ui) + { + Instruction uinsn = *pc++; + LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); + + switch (LUAU_INSN_A(uinsn)) + { + case LCT_VAL: + setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn))); + break; + + case LCT_REF: + setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn)))); + break; + + case LCT_UPVAL: + setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn))); + break; + + default: + LUAU_ASSERT(!"Unknown upvalue capture type"); + } + } + + VM_PROTECT(luaC_checkGC(L)); + return pc; +} + +const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + LUAU_ASSERT(ttisstring(kv)); + + if (ttistable(rb)) + { + Table* h = hvalue(rb); + // note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works + // for predictive lookups + LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)]; + + const TValue* mt = 0; + const LuaNode* mtn = 0; + + // fast-path: key is in the table in expected slot + if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))) + { + // note: order of copies allows rb to alias ra+1 or ra + setobj2s(L, ra + 1, rb); + setobj2s(L, ra, gval(n)); + } + // fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot + else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) && + (mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) && tsvalue(gkey(mtn)) == tsvalue(kv) && + !ttisnil(gval(mtn))) + { + // note: order of copies allows rb to alias ra+1 or ra + setobj2s(L, ra + 1, rb); + setobj2s(L, ra, gval(mtn)); + } + else + { + // slow-path: handles full table lookup + setobj2s(L, ra + 1, rb); + L->cachedslot = LUAU_INSN_C(insn); + VM_PROTECT(luaV_gettable(L, rb, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + // recompute ra since stack might have been reallocated + ra = VM_REG(LUAU_INSN_A(insn)); + if (ttisnil(ra)) + luaG_methoderror(L, ra + 1, tsvalue(kv)); + } + } + else + { + Table* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)]; + const TValue* tmi = 0; + + // fast-path: metatable with __namecall + if (const TValue* fn = fasttm(L, mt, TM_NAMECALL)) + { + // note: order of copies allows rb to alias ra+1 or ra + setobj2s(L, ra + 1, rb); + setobj2s(L, ra, fn); + + L->namecall = tsvalue(kv); + } + else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi)) + { + Table* h = hvalue(tmi); + int slot = LUAU_INSN_C(insn) & h->nodemask8; + LuaNode* n = &h->node[slot]; + + // fast-path: metatable with __index that has method in expected slot + if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))) + { + // note: order of copies allows rb to alias ra+1 or ra + setobj2s(L, ra + 1, rb); + setobj2s(L, ra, gval(n)); + } + else + { + // slow-path: handles slot mismatch + setobj2s(L, ra + 1, rb); + L->cachedslot = slot; + VM_PROTECT(luaV_gettable(L, rb, kv, ra)); + // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ + VM_PATCH_C(pc - 2, L->cachedslot); + // recompute ra since stack might have been reallocated + ra = VM_REG(LUAU_INSN_A(insn)); + if (ttisnil(ra)) + luaG_methoderror(L, ra + 1, tsvalue(kv)); + } + } + else + { + // slow-path: handles non-table __index + setobj2s(L, ra + 1, rb); + VM_PROTECT(luaV_gettable(L, rb, kv, ra)); + // recompute ra since stack might have been reallocated + ra = VM_REG(LUAU_INSN_A(insn)); + if (ttisnil(ra)) + luaG_methoderror(L, ra + 1, tsvalue(kv)); + } + } + + // intentional fallthrough to CALL + LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL); + return pc; +} + +const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + int nparams = LUAU_INSN_B(insn) - 1; + int nresults = LUAU_INSN_C(insn) - 1; + + StkId argtop = L->top; + argtop = (nparams == LUA_MULTRET) ? argtop : ra + 1 + nparams; + + // slow-path: not a function call + if (LUAU_UNLIKELY(!ttisfunction(ra))) + { + VM_PROTECT(luaV_tryfuncTM(L, ra)); + argtop++; // __call adds an extra self + } + + Closure* ccl = clvalue(ra); + L->ci->savedpc = pc; + + CallInfo* ci = incr_ci(L); + ci->func = ra; + ci->base = ra + 1; + ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet + ci->savedpc = NULL; + ci->flags = 0; + ci->nresults = nresults; + + L->base = ci->base; + L->top = argtop; + + // note: this reallocs stack, but we don't need to VM_PROTECT this + // this is because we're going to modify base/savedpc manually anyhow + // crucially, we can't use ra/argtop after this line + luaD_checkstack(L, ccl->stacksize); + + LUAU_ASSERT(ci->top <= L->stack_last); + + if (!ccl->isC) + { + Proto* p = ccl->l.p; + + // fill unused parameters with nil + StkId argi = L->top; + StkId argend = L->base + p->numparams; + while (argi < argend) + setnilvalue(argi++); // complete missing arguments + L->top = p->is_vararg ? argi : ci->top; + + // reentry + pc = p->code; + cl = ccl; + base = L->base; + k = p->k; + return pc; + } + else + { + lua_CFunction func = ccl->c.f; + int n = func(L); + + // yield + if (n < 0) + return NULL; + + // ci is our callinfo, cip is our parent + CallInfo* ci = L->ci; + CallInfo* cip = ci - 1; + + // copy return values into parent stack (but only up to nresults!), fill the rest with nil + // note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally + StkId res = ci->func; + StkId vali = L->top - n; + StkId valend = L->top; + + int i; + for (i = nresults; i != 0 && vali < valend; i--) + setobjs2s(L, res++, vali++); + while (i-- > 0) + setnilvalue(res++); + + // pop the stack frame + L->ci = cip; + L->base = cip->base; + L->top = (nresults == LUA_MULTRET) ? res : cip->top; + + base = L->base; // stack may have been reallocated, so we need to refresh base ptr + return pc; + } +} + +const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + StkId ra = &base[LUAU_INSN_A(insn)]; // note: this can point to L->top if b == LUA_MULTRET making VM_REG unsafe to use + int b = LUAU_INSN_B(insn) - 1; + + // ci is our callinfo, cip is our parent + CallInfo* ci = L->ci; + CallInfo* cip = ci - 1; + + StkId res = ci->func; // note: we assume CALL always puts func+args and expects results to start at func + + StkId vali = ra; + StkId valend = (b == LUA_MULTRET) ? L->top : ra + b; // copy as much as possible for MULTRET calls, and only as much as needed otherwise + + int nresults = ci->nresults; + + // copy return values into parent stack (but only up to nresults!), fill the rest with nil + // note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally + int i; + for (i = nresults; i != 0 && vali < valend; i--) + setobjs2s(L, res++, vali++); + while (i-- > 0) + setnilvalue(res++); + + // pop the stack frame + L->ci = cip; + L->base = cip->base; + L->top = (nresults == LUA_MULTRET) ? res : cip->top; + + // we're done! + if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN)) + { + L->top = res; + return NULL; + } + + LUAU_ASSERT(isLua(L->ci)); + + Closure* nextcl = clvalue(cip->func); + Proto* nextproto = nextcl->l.p; + + // reentry + pc = cip->savedpc; + cl = nextcl; + base = L->base; + k = nextproto->k; + return pc; +} + +const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + pc += l_isfalse(ra) ? 0 : LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + pc += l_isfalse(ra) ? LUAU_INSN_D(insn) : 0; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // Note that all jumps below jump by 1 in the "false" case to skip over aux + if (ttype(ra) == ttype(rb)) + { + switch (ttype(ra)) + { + case LUA_TNIL: + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TBOOLEAN: + pc += bvalue(ra) == bvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TLIGHTUSERDATA: + pc += pvalue(ra) == pvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TNUMBER: + pc += nvalue(ra) == nvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TVECTOR: + pc += luai_veceq(vvalue(ra), vvalue(rb)) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TSTRING: + case LUA_TFUNCTION: + case LUA_TTHREAD: + pc += gcvalue(ra) == gcvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TTABLE: + // fast-path: same metatable, no EQ metamethod + if (hvalue(ra)->metatable == hvalue(rb)->metatable) + { + const TValue* fn = fasttm(L, hvalue(ra)->metatable, TM_EQ); + + if (!fn) + { + pc += hvalue(ra) == hvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + } + // slow path after switch() + break; + + case LUA_TUSERDATA: + // fast-path: same metatable, no EQ metamethod or C metamethod + if (uvalue(ra)->metatable == uvalue(rb)->metatable) + { + const TValue* fn = fasttm(L, uvalue(ra)->metatable, TM_EQ); + + if (!fn) + { + pc += uvalue(ra) == uvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else if (ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, ra); + setobj2s(L, top + 2, rb); + int res = int(top - base); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, res)); + pc += !l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + } + // slow path after switch() + break; + + default:; + } + + // slow-path: tables with metatables and userdata values + // note that we don't have a fast path for userdata values without metatables, since that's very rare + int res; + VM_PROTECT(res = luaV_equalval(L, ra, rb)); + + pc += (res == 1) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + pc += 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // Note that all jumps below jump by 1 in the "true" case to skip over aux + if (ttype(ra) == ttype(rb)) + { + switch (ttype(ra)) + { + case LUA_TNIL: + pc += 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TBOOLEAN: + pc += bvalue(ra) != bvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TLIGHTUSERDATA: + pc += pvalue(ra) != pvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TNUMBER: + pc += nvalue(ra) != nvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TVECTOR: + pc += !luai_veceq(vvalue(ra), vvalue(rb)) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TSTRING: + case LUA_TFUNCTION: + case LUA_TTHREAD: + pc += gcvalue(ra) != gcvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + + case LUA_TTABLE: + // fast-path: same metatable, no EQ metamethod + if (hvalue(ra)->metatable == hvalue(rb)->metatable) + { + const TValue* fn = fasttm(L, hvalue(ra)->metatable, TM_EQ); + + if (!fn) + { + pc += hvalue(ra) != hvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + } + // slow path after switch() + break; + + case LUA_TUSERDATA: + // fast-path: same metatable, no EQ metamethod or C metamethod + if (uvalue(ra)->metatable == uvalue(rb)->metatable) + { + const TValue* fn = fasttm(L, uvalue(ra)->metatable, TM_EQ); + + if (!fn) + { + pc += uvalue(ra) != uvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else if (ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, ra); + setobj2s(L, top + 2, rb); + int res = int(top - base); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, res)); + pc += l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + } + // slow path after switch() + break; + + default:; + } + + // slow-path: tables with metatables and userdata values + // note that we don't have a fast path for userdata values without metatables, since that's very rare + int res; + VM_PROTECT(res = luaV_equalval(L, ra, rb)); + + pc += (res == 0) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // fast-path: number + // Note that all jumps below jump by 1 in the "false" case to skip over aux + if (ttisnumber(ra) && ttisnumber(rb)) + { + pc += nvalue(ra) <= nvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + // fast-path: string + else if (ttisstring(ra) && ttisstring(rb)) + { + pc += luaV_strcmp(tsvalue(ra), tsvalue(rb)) <= 0 ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + int res; + VM_PROTECT(res = luaV_lessequal(L, ra, rb)); + + pc += (res == 1) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // fast-path: number + // Note that all jumps below jump by 1 in the "true" case to skip over aux + if (ttisnumber(ra) && ttisnumber(rb)) + { + pc += !(nvalue(ra) <= nvalue(rb)) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + // fast-path: string + else if (ttisstring(ra) && ttisstring(rb)) + { + pc += !(luaV_strcmp(tsvalue(ra), tsvalue(rb)) <= 0) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + int res; + VM_PROTECT(res = luaV_lessequal(L, ra, rb)); + + pc += (res == 0) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // fast-path: number + // Note that all jumps below jump by 1 in the "false" case to skip over aux + if (ttisnumber(ra) && ttisnumber(rb)) + { + pc += nvalue(ra) < nvalue(rb) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + // fast-path: string + else if (ttisstring(ra) && ttisstring(rb)) + { + pc += luaV_strcmp(tsvalue(ra), tsvalue(rb)) < 0 ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + int res; + VM_PROTECT(res = luaV_lessthan(L, ra, rb)); + + pc += (res == 1) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(aux); + + // fast-path: number + // Note that all jumps below jump by 1 in the "true" case to skip over aux + if (ttisnumber(ra) && ttisnumber(rb)) + { + pc += !(nvalue(ra) < nvalue(rb)) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + // fast-path: string + else if (ttisstring(ra) && ttisstring(rb)) + { + pc += !(luaV_strcmp(tsvalue(ra), tsvalue(rb)) < 0) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + int res; + VM_PROTECT(res = luaV_lessthan(L, ra, rb)); + + pc += (res == 0) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + setnvalue(ra, nvalue(rb) + nvalue(rc)); + return pc; + } + else if (ttisvector(rb) && ttisvector(rc)) + { + const float* vb = rb->value.v; + const float* vc = rc->value.v; + setvvalue(ra, vb[0] + vc[0], vb[1] + vc[1], vb[2] + vc[2], vb[3] + vc[3]); + return pc; + } + else + { + // fast-path for userdata with C functions + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_ADD)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, rc); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_ADD)); + return pc; + } + } +} + +const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + setnvalue(ra, nvalue(rb) - nvalue(rc)); + return pc; + } + else if (ttisvector(rb) && ttisvector(rc)) + { + const float* vb = rb->value.v; + const float* vc = rc->value.v; + setvvalue(ra, vb[0] - vc[0], vb[1] - vc[1], vb[2] - vc[2], vb[3] - vc[3]); + return pc; + } + else + { + // fast-path for userdata with C functions + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_SUB)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, rc); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_SUB)); + return pc; + } + } +} + +const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + setnvalue(ra, nvalue(rb) * nvalue(rc)); + return pc; + } + else if (ttisvector(rb) && ttisnumber(rc)) + { + const float* vb = rb->value.v; + float vc = cast_to(float, nvalue(rc)); + setvvalue(ra, vb[0] * vc, vb[1] * vc, vb[2] * vc, vb[3] * vc); + return pc; + } + else if (ttisvector(rb) && ttisvector(rc)) + { + const float* vb = rb->value.v; + const float* vc = rc->value.v; + setvvalue(ra, vb[0] * vc[0], vb[1] * vc[1], vb[2] * vc[2], vb[3] * vc[3]); + return pc; + } + else if (ttisnumber(rb) && ttisvector(rc)) + { + float vb = cast_to(float, nvalue(rb)); + const float* vc = rc->value.v; + setvvalue(ra, vb * vc[0], vb * vc[1], vb * vc[2], vb * vc[3]); + return pc; + } + else + { + // fast-path for userdata with C functions + StkId rbc = ttisnumber(rb) ? rc : rb; + const TValue* fn = 0; + if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_MUL)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, rc); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_MUL)); + return pc; + } + } +} + +const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + setnvalue(ra, nvalue(rb) / nvalue(rc)); + return pc; + } + else if (ttisvector(rb) && ttisnumber(rc)) + { + const float* vb = rb->value.v; + float vc = cast_to(float, nvalue(rc)); + setvvalue(ra, vb[0] / vc, vb[1] / vc, vb[2] / vc, vb[3] / vc); + return pc; + } + else if (ttisvector(rb) && ttisvector(rc)) + { + const float* vb = rb->value.v; + const float* vc = rc->value.v; + setvvalue(ra, vb[0] / vc[0], vb[1] / vc[1], vb[2] / vc[2], vb[3] / vc[3]); + return pc; + } + else if (ttisnumber(rb) && ttisvector(rc)) + { + float vb = cast_to(float, nvalue(rb)); + const float* vc = rc->value.v; + setvvalue(ra, vb / vc[0], vb / vc[1], vb / vc[2], vb / vc[3]); + return pc; + } + else + { + // fast-path for userdata with C functions + StkId rbc = ttisnumber(rb) ? rc : rb; + const TValue* fn = 0; + if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_DIV)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, rc); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_DIV)); + return pc; + } + } +} + +const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + double nb = nvalue(rb); + double nc = nvalue(rc); + setnvalue(ra, luai_nummod(nb, nc)); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_MOD)); + return pc; + } +} + +const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb) && ttisnumber(rc)) + { + setnvalue(ra, pow(nvalue(rb), nvalue(rc))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rc, TM_POW)); + return pc; + } +} + +const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + setnvalue(ra, nvalue(rb) + nvalue(kv)); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_ADD)); + return pc; + } +} + +const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + setnvalue(ra, nvalue(rb) - nvalue(kv)); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_SUB)); + return pc; + } +} + +const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + setnvalue(ra, nvalue(rb) * nvalue(kv)); + return pc; + } + else if (ttisvector(rb)) + { + const float* vb = rb->value.v; + float vc = cast_to(float, nvalue(kv)); + setvvalue(ra, vb[0] * vc, vb[1] * vc, vb[2] * vc, vb[3] * vc); + return pc; + } + else + { + // fast-path for userdata with C functions + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_MUL)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, kv); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_MUL)); + return pc; + } + } +} + +const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + setnvalue(ra, nvalue(rb) / nvalue(kv)); + return pc; + } + else if (ttisvector(rb)) + { + const float* vb = rb->value.v; + float vc = cast_to(float, nvalue(kv)); + setvvalue(ra, vb[0] / vc, vb[1] / vc, vb[2] / vc, vb[3] / vc); + return pc; + } + else + { + // fast-path for userdata with C functions + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_DIV)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + setobj2s(L, top + 2, kv); + L->top = top + 3; + + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_DIV)); + return pc; + } + } +} + +const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + double nb = nvalue(rb); + double nk = nvalue(kv); + setnvalue(ra, luai_nummod(nb, nk)); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_MOD)); + return pc; + } +} + +const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + // fast-path + if (ttisnumber(rb)) + { + double nb = nvalue(rb); + double nk = nvalue(kv); + + // pow is very slow so we specialize this for ^2, ^0.5 and ^3 + double r = (nk == 2.0) ? nb * nb : (nk == 0.5) ? sqrt(nb) : (nk == 3.0) ? nb * nb * nb : pow(nb, nk); + + setnvalue(ra, r); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, kv, TM_POW)); + return pc; + } +} + +const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + setobj2s(L, ra, l_isfalse(rb) ? rb : rc); + return pc; +} + +const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + StkId rc = VM_REG(LUAU_INSN_C(insn)); + + setobj2s(L, ra, l_isfalse(rb) ? rc : rb); + return pc; +} + +const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + setobj2s(L, ra, l_isfalse(rb) ? rb : kv); + return pc; +} + +const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + TValue* kv = VM_KV(LUAU_INSN_C(insn)); + + setobj2s(L, ra, l_isfalse(rb) ? kv : rb); + return pc; +} + +const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int b = LUAU_INSN_B(insn); + int c = LUAU_INSN_C(insn); + + // This call may realloc the stack! So we need to query args further down + VM_PROTECT(luaV_concat(L, c - b + 1, c)); + + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + setobjs2s(L, ra, base + b); + VM_PROTECT(luaC_checkGC(L)); + return pc; +} + +const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + + int res = l_isfalse(rb); + setbvalue(ra, res); + return pc; +} + +const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + + // fast-path + if (ttisnumber(rb)) + { + setnvalue(ra, -nvalue(rb)); + return pc; + } + else if (ttisvector(rb)) + { + const float* vb = rb->value.v; + setvvalue(ra, -vb[0], -vb[1], -vb[2], -vb[3]); + return pc; + } + else + { + // fast-path for userdata with C functions + const TValue* fn = 0; + if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_UNM)) && ttisfunction(fn) && clvalue(fn)->isC) + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + LUAU_ASSERT(L->top + 2 < L->stack + L->stacksize); + StkId top = L->top; + setobj2s(L, top + 0, fn); + setobj2s(L, top + 1, rb); + L->top = top + 2; + + VM_PROTECT(luaV_callTM(L, 1, LUAU_INSN_A(insn))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_doarith(L, ra, rb, rb, TM_UNM)); + return pc; + } + } +} + +const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = VM_REG(LUAU_INSN_B(insn)); + + // fast-path #1: tables + if (ttistable(rb)) + { + Table* h = hvalue(rb); + + if (fastnotm(h->metatable, TM_LEN)) + { + setnvalue(ra, cast_num(luaH_getn(h))); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_dolen(L, ra, rb)); + return pc; + } + } + // fast-path #2: strings (not very important but easy to do) + else if (ttisstring(rb)) + { + TString* ts = tsvalue(rb); + setnvalue(ra, cast_num(ts->len)); + return pc; + } + else + { + // slow-path, may invoke C/Lua via metamethods + VM_PROTECT(luaV_dolen(L, ra, rb)); + return pc; + } +} + +const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + int b = LUAU_INSN_B(insn); + uint32_t aux = *pc++; + + sethvalue(L, ra, luaH_new(L, aux, b == 0 ? 0 : (1 << (b - 1)))); + VM_PROTECT(luaC_checkGC(L)); + return pc; +} + +const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(LUAU_INSN_D(insn)); + + sethvalue(L, ra, luaH_clone(L, hvalue(kv))); + VM_PROTECT(luaC_checkGC(L)); + return pc; +} + +const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use + int c = LUAU_INSN_C(insn) - 1; + uint32_t index = *pc++; + + if (c == LUA_MULTRET) + { + c = int(L->top - rb); + L->top = L->ci->top; + } + + Table* h = hvalue(ra); + + if (!ttistable(ra)) + return NULL; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode + + int last = index + c - 1; + if (last > h->sizearray) + luaH_resizearray(L, h, last); + + TValue* array = h->array; + + for (int i = 0; i < c; ++i) + setobj2t(L, &array[index + i - 1], rb + i); + + luaC_barrierfast(L, h); + return pc; +} + +const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + if (!ttisnumber(ra + 0) || !ttisnumber(ra + 1) || !ttisnumber(ra + 2)) + { + // slow-path: can convert arguments to numbers and trigger Lua errors + // Note: this doesn't reallocate stack so we don't need to recompute ra/base + VM_PROTECT_PC(); + + luaV_prepareFORN(L, ra + 0, ra + 1, ra + 2); + } + + double limit = nvalue(ra + 0); + double step = nvalue(ra + 1); + double idx = nvalue(ra + 2); + + // Note: make sure the loop condition is exactly the same between this and LOP_FORNLOOP so that we handle NaN/etc. consistently + pc += (step > 0 ? idx <= limit : limit <= idx) ? 0 : LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + LUAU_ASSERT(ttisnumber(ra + 0) && ttisnumber(ra + 1) && ttisnumber(ra + 2)); + + double limit = nvalue(ra + 0); + double step = nvalue(ra + 1); + double idx = nvalue(ra + 2) + step; + + setnvalue(ra + 2, idx); + + // Note: make sure the loop condition is exactly the same between this and LOP_FORNPREP so that we handle NaN/etc. consistently + if (step > 0 ? idx <= limit : limit <= idx) + { + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + // fallthrough to exit + return pc; + } +} + +const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + if (ttisfunction(ra)) + { + // will be called during FORGLOOP + } + else + { + Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL); + + if (const TValue* fn = fasttm(L, mt, TM_ITER)) + { + setobj2s(L, ra + 1, ra); + setobj2s(L, ra, fn); + + L->top = ra + 2; // func + self arg + LUAU_ASSERT(L->top <= L->stack_last); + + VM_PROTECT(luaD_call(L, ra, 3)); + L->top = L->ci->top; + + // recompute ra since stack might have been reallocated + ra = VM_REG(LUAU_INSN_A(insn)); + + // protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP + if (ttisnil(ra)) + { + VM_PROTECT(luaG_typeerror(L, ra, "call")); + } + } + else if (fasttm(L, mt, TM_CALL)) + { + // table or userdata with __call, will be called during FORGLOOP + // TODO: we might be able to stop supporting this depending on whether it's used in practice + } + else if (ttistable(ra)) + { + // set up registers for builtin iteration + setobj2s(L, ra + 1, ra); + setpvalue(ra + 2, reinterpret_cast(uintptr_t(0))); + setnilvalue(ra); + } + else + { + VM_PROTECT(luaG_typeerror(L, ra, "iterate over")); + } + } + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + uint32_t aux = *pc; + + // fast-path: builtin table iteration + // note: ra=nil guarantees ra+1=table and ra+2=userdata because of the setup by FORGPREP* opcodes + // TODO: remove the table check per guarantee above + if (ttisnil(ra) && ttistable(ra + 1)) + { + Table* h = hvalue(ra + 1); + int index = int(reinterpret_cast(pvalue(ra + 2))); + + int sizearray = h->sizearray; + + // clear extra variables since we might have more than two + // note: while aux encodes ipairs bit, when set we always use 2 variables, so it's safe to check this via a signed comparison + if (LUAU_UNLIKELY(int(aux) > 2)) + for (int i = 2; i < int(aux); ++i) + setnilvalue(ra + 3 + i); + + // terminate ipairs-style traversal early when encountering nil + if (int(aux) < 0 && (unsigned(index) >= unsigned(sizearray) || ttisnil(&h->array[index]))) + { + pc++; + return pc; + } + + // first we advance index through the array portion + while (unsigned(index) < unsigned(sizearray)) + { + TValue* e = &h->array[index]; + + if (!ttisnil(e)) + { + setpvalue(ra + 2, reinterpret_cast(uintptr_t(index + 1))); + setnvalue(ra + 3, double(index + 1)); + setobj2s(L, ra + 4, e); + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + + index++; + } + + int sizenode = 1 << h->lsizenode; + + // then we advance index through the hash portion + while (unsigned(index - sizearray) < unsigned(sizenode)) + { + LuaNode* n = &h->node[index - sizearray]; + + if (!ttisnil(gval(n))) + { + setpvalue(ra + 2, reinterpret_cast(uintptr_t(index + 1))); + getnodekey(L, ra + 3, n); + setobj2s(L, ra + 4, gval(n)); + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + + index++; + } + + // fallthrough to exit + pc++; + return pc; + } + else + { + // note: it's safe to push arguments past top for complicated reasons (see top of the file) + setobjs2s(L, ra + 3 + 2, ra + 2); + setobjs2s(L, ra + 3 + 1, ra + 1); + setobjs2s(L, ra + 3, ra); + + L->top = ra + 3 + 3; // func + 2 args (state and index) + LUAU_ASSERT(L->top <= L->stack_last); + + VM_PROTECT(luaD_call(L, ra + 3, uint8_t(aux))); + L->top = L->ci->top; + + // recompute ra since stack might have been reallocated + ra = VM_REG(LUAU_INSN_A(insn)); + + // copy first variable back into the iteration index + setobjs2s(L, ra + 2, ra + 3); + + // note that we need to increment pc by 1 to exit the loop since we need to skip over aux + pc += ttisnil(ra + 3) ? 1 : LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } +} + +const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + // fast-path: ipairs/inext + if (cl->env->safeenv && ttistable(ra + 1) && ttisnumber(ra + 2) && nvalue(ra + 2) == 0.0) + { + setnilvalue(ra); + // ra+1 is already the table + setpvalue(ra + 2, reinterpret_cast(uintptr_t(0))); + } + else if (!ttisfunction(ra)) + { + VM_PROTECT(luaG_typeerror(L, ra, "iterate over")); + } + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"Unsupported deprecated opcode"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + // fast-path: pairs/next + if (cl->env->safeenv && ttistable(ra + 1) && ttisnil(ra + 2)) + { + setnilvalue(ra); + // ra+1 is already the table + setpvalue(ra + 2, reinterpret_cast(uintptr_t(0))); + } + else if (!ttisfunction(ra)) + { + VM_PROTECT(luaG_typeerror(L, ra, "iterate over")); + } + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"Unsupported deprecated opcode"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int b = LUAU_INSN_B(insn) - 1; + int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1; + + if (b == LUA_MULTRET) + { + VM_PROTECT(luaD_checkstack(L, n)); + StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack + + for (int j = 0; j < n; j++) + setobjs2s(L, ra + j, base - n + j); + + L->top = ra + n; + return pc; + } + else + { + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + for (int j = 0; j < b && j < n; j++) + setobjs2s(L, ra + j, base - n + j); + for (int j = n; j < b; j++) + setnilvalue(ra + j); + return pc; + } +} + +const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(LUAU_INSN_D(insn)); + + Closure* kcl = clvalue(kv); + + // clone closure if the environment is not shared + // note: we save closure to stack early in case the code below wants to capture it by value + Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p); + setclvalue(L, ra, ncl); + + // this loop does three things: + // - if the closure was created anew, it just fills it with upvalues + // - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future + // - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure + // normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that + for (int ui = 0; ui < kcl->nupvalues; ++ui) + { + Instruction uinsn = pc[ui]; + LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); + LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL); + + TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn)); + + // check if the existing closure is safe to reuse + if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv)) + continue; + + // lazily clone the closure and update the upvalues + if (ncl == kcl && kcl->preload == 0) + { + ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p); + setclvalue(L, ra, ncl); + + ui = -1; // restart the loop to fill all upvalues + continue; + } + + // this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier + setobj(L, &ncl->l.uprefs[ui], uv); + luaC_barrier(L, ncl, uv); + } + + // this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time + ncl->preload = 0; + + if (kcl != ncl) + VM_PROTECT(luaC_checkGC(L)); + + pc += kcl->nupvalues; + return pc; +} + +const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int numparams = LUAU_INSN_A(insn); + + // all fixed parameters are copied after the top so we need more stack space + VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams)); + + // the caller must have filled extra fixed arguments with nil + LUAU_ASSERT(cast_int(L->top - base) >= numparams); + + // move fixed parameters to final position + StkId fixed = base; // first fixed argument + base = L->top; // final position of first argument + + for (int i = 0; i < numparams; ++i) + { + setobjs2s(L, base + i, fixed + i); + setnilvalue(fixed + i); + } + + // rewire our stack frame to point to the new base + L->ci->base = base; + L->ci->top = base + cl->stacksize; + + L->base = base; + L->top = L->ci->top; + return pc; +} + +const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + + pc += LUAU_INSN_D(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + uint32_t aux = *pc++; + TValue* kv = VM_KV(aux); + + setobj2s(L, ra, kv); + return pc; +} + +const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + VM_INTERRUPT(); + Instruction insn = *pc++; + + pc += LUAU_INSN_E(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int bfid = LUAU_INSN_A(insn); + int skip = LUAU_INSN_C(insn); + LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode)); + + Instruction call = pc[skip]; + LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + + StkId ra = VM_REG(LUAU_INSN_A(call)); + + int nparams = LUAU_INSN_B(call) - 1; + int nresults = LUAU_INSN_C(call) - 1; + + nparams = (nparams == LUA_MULTRET) ? int(L->top - ra - 1) : nparams; + + luau_FastFunction f = luauF_table[bfid]; + + if (cl->env->safeenv && f) + { + VM_PROTECT_PC(); + + int n = f(L, ra, ra + 1, nresults, ra + 2, nparams); + + if (n >= 0) + { + L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top; + + pc += skip + 1; // skip instructions that compute function as well as CALL + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + // continue execution through the fallback code + return pc; + } + } + else + { + // continue execution through the fallback code + return pc; + } +} + +const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int hits = LUAU_INSN_E(insn); + + // update hits with saturated add and patch the instruction in place + hits = (hits < (1 << 23) - 1) ? hits + 1 : hits; + VM_PATCH_E(pc - 1, hits); + + return pc; +} + +const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"CAPTURE is a pseudo-opcode and must be executed as part of NEWCLOSURE"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"Unsupported deprecated opcode"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"Unsupported deprecated opcode"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int bfid = LUAU_INSN_A(insn); + TValue* arg = VM_REG(LUAU_INSN_B(insn)); + int skip = LUAU_INSN_C(insn); + + LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode)); + + Instruction call = pc[skip]; + LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + + StkId ra = VM_REG(LUAU_INSN_A(call)); + + int nparams = 1; + int nresults = LUAU_INSN_C(call) - 1; + + luau_FastFunction f = luauF_table[bfid]; + + if (cl->env->safeenv && f) + { + VM_PROTECT_PC(); + + int n = f(L, ra, arg, nresults, NULL, nparams); + + if (n >= 0) + { + L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top; + + pc += skip + 1; // skip instructions that compute function as well as CALL + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + // continue execution through the fallback code + return pc; + } + } + else + { + // continue execution through the fallback code + return pc; + } +} + +const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int bfid = LUAU_INSN_A(insn); + int skip = LUAU_INSN_C(insn) - 1; + uint32_t aux = *pc++; + TValue* arg1 = VM_REG(LUAU_INSN_B(insn)); + TValue* arg2 = VM_REG(aux); + + LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode)); + + Instruction call = pc[skip]; + LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + + StkId ra = VM_REG(LUAU_INSN_A(call)); + + int nparams = 2; + int nresults = LUAU_INSN_C(call) - 1; + + luau_FastFunction f = luauF_table[bfid]; + + if (cl->env->safeenv && f) + { + VM_PROTECT_PC(); + + int n = f(L, ra, arg1, nresults, arg2, nparams); + + if (n >= 0) + { + L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top; + + pc += skip + 1; // skip instructions that compute function as well as CALL + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + // continue execution through the fallback code + return pc; + } + } + else + { + // continue execution through the fallback code + return pc; + } +} + +const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + int bfid = LUAU_INSN_A(insn); + int skip = LUAU_INSN_C(insn) - 1; + uint32_t aux = *pc++; + TValue* arg1 = VM_REG(LUAU_INSN_B(insn)); + TValue* arg2 = VM_KV(aux); + + LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode)); + + Instruction call = pc[skip]; + LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + + StkId ra = VM_REG(LUAU_INSN_A(call)); + + int nparams = 2; + int nresults = LUAU_INSN_C(call) - 1; + + luau_FastFunction f = luauF_table[bfid]; + + if (cl->env->safeenv && f) + { + VM_PROTECT_PC(); + + int n = f(L, ra, arg1, nresults, arg2, nparams); + + if (n >= 0) + { + L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top; + + pc += skip + 1; // skip instructions that compute function as well as CALL + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; + } + else + { + // continue execution through the fallback code + return pc; + } + } + else + { + // continue execution through the fallback code + return pc; + } +} + +const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + LUAU_ASSERT(!"Unsupported deprecated opcode"); + LUAU_UNREACHABLE(); +} + +const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + static_assert(LUA_TNIL == 0, "we expect type-1 to be negative iff type is nil"); + // condition is equivalent to: int(ttisnil(ra)) != (aux >> 31) + pc += int((ttype(ra) - 1) ^ aux) < 0 ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + + pc += int(ttisboolean(ra) && bvalue(ra) == int(aux & 1)) != (aux >> 31) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(aux & 0xffffff); + LUAU_ASSERT(ttisnumber(kv)); + +#if defined(__aarch64__) + // On several ARM chips (Apple M1/M2, Neoverse N1), comparing the result of a floating-point comparison is expensive, and a branch + // is much cheaper; on some 32-bit ARM chips (Cortex A53) the performance is about the same so we prefer less branchy variant there + if (aux >> 31) + pc += !(ttisnumber(ra) && nvalue(ra) == nvalue(kv)) ? LUAU_INSN_D(insn) : 1; + else + pc += (ttisnumber(ra) && nvalue(ra) == nvalue(kv)) ? LUAU_INSN_D(insn) : 1; +#else + pc += int(ttisnumber(ra) && nvalue(ra) == nvalue(kv)) != (aux >> 31) ? LUAU_INSN_D(insn) : 1; +#endif + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} + +const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k) +{ + Instruction insn = *pc++; + uint32_t aux = *pc; + StkId ra = VM_REG(LUAU_INSN_A(insn)); + TValue* kv = VM_KV(aux & 0xffffff); + LUAU_ASSERT(ttisstring(kv)); + + pc += int(ttisstring(ra) && gcvalue(ra) == gcvalue(kv)) != (aux >> 31) ? LUAU_INSN_D(insn) : 1; + LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); + return pc; +} diff --git a/CodeGen/src/Fallbacks.h b/CodeGen/src/Fallbacks.h new file mode 100644 index 00000000..3bec8c5b --- /dev/null +++ b/CodeGen/src/Fallbacks.h @@ -0,0 +1,93 @@ +// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand +#pragma once + +#include + +struct lua_State; +struct Closure; +typedef uint32_t Instruction; +typedef struct lua_TValue TValue; +typedef TValue* StkId; + +const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); +const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k); diff --git a/CodeGen/src/FallbacksProlog.h b/CodeGen/src/FallbacksProlog.h new file mode 100644 index 00000000..bbb06b84 --- /dev/null +++ b/CodeGen/src/FallbacksProlog.h @@ -0,0 +1,56 @@ +// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +#pragma once + +#include "lvm.h" + +#include "lbuiltins.h" +#include "lbytecode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lnumutils.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" + +#include + +// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT +// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call, +// and restores the stack pointer after in case stack gets reallocated +// Should only be used on the slow paths. +#define VM_PROTECT(x) \ + { \ + L->ci->savedpc = pc; \ + { \ + x; \ + }; \ + base = L->base; \ + } + +// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is +// a cheaper version of VM_PROTECT that can be called before the external call. +#define VM_PROTECT_PC() L->ci->savedpc = pc + +#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i]) +#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i]) +#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i]) + +#define VM_PATCH_C(pc, slot) *const_cast(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc))) +#define VM_PATCH_E(pc, slot) *const_cast(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc))) + +#define VM_INTERRUPT() \ + { \ + void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \ + if (LUAU_UNLIKELY(!!interrupt)) \ + { /* the interrupt hook is called right before we advance pc */ \ + VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \ + if (L->status != 0) \ + { \ + L->ci->savedpc--; \ + return NULL; \ + } \ + } \ + } diff --git a/CodeGen/src/UnwindBuilderDwarf2.cpp b/CodeGen/src/UnwindBuilderDwarf2.cpp index 38e3e712..f3886d9c 100644 --- a/CodeGen/src/UnwindBuilderDwarf2.cpp +++ b/CodeGen/src/UnwindBuilderDwarf2.cpp @@ -1,6 +1,8 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #include "Luau/UnwindBuilderDwarf2.h" +#include "ByteUtils.h" + #include // General information about Dwarf2 format can be found at: @@ -11,40 +13,6 @@ // https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf [System V Application Binary Interface (AMD64 Architecture Processor Supplement)] // Interaction between Dwarf2 and System V ABI can be found in sections '3.6.2 DWARF Register Number Mapping' and '4.2.4 EH_FRAME sections' -static char* writeu8(char* target, uint8_t value) -{ - memcpy(target, &value, sizeof(value)); - return target + sizeof(value); -} - -static char* writeu32(char* target, uint32_t value) -{ - memcpy(target, &value, sizeof(value)); - return target + sizeof(value); -} - -static char* writeu64(char* target, uint64_t value) -{ - memcpy(target, &value, sizeof(value)); - return target + sizeof(value); -} - -static char* writeuleb128(char* target, uint64_t value) -{ - do - { - char byte = value & 0x7f; - value >>= 7; - - if (value) - byte |= 0x80; - - *target++ = byte; - } while (value); - - return target; -} - // Call frame instruction opcodes #define DW_CFA_advance_loc 0x40 #define DW_CFA_offset 0x80 @@ -104,7 +72,7 @@ const int kFdeInitialLocationOffset = 8; const int kFdeAddressRangeOffset = 16; // Define canonical frame address expression as [reg + offset] -static char* defineCfaExpression(char* pos, int dwReg, uint32_t stackOffset) +static uint8_t* defineCfaExpression(uint8_t* pos, int dwReg, uint32_t stackOffset) { pos = writeu8(pos, DW_CFA_def_cfa); pos = writeuleb128(pos, dwReg); @@ -113,14 +81,14 @@ static char* defineCfaExpression(char* pos, int dwReg, uint32_t stackOffset) } // Update offset value in canonical frame address expression -static char* defineCfaExpressionOffset(char* pos, uint32_t stackOffset) +static uint8_t* defineCfaExpressionOffset(uint8_t* pos, uint32_t stackOffset) { pos = writeu8(pos, DW_CFA_def_cfa_offset); pos = writeuleb128(pos, stackOffset); return pos; } -static char* defineSavedRegisterLocation(char* pos, int dwReg, uint32_t stackOffset) +static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset) { LUAU_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units"); @@ -138,14 +106,14 @@ static char* defineSavedRegisterLocation(char* pos, int dwReg, uint32_t stackOff return pos; } -static char* advanceLocation(char* pos, uint8_t offset) +static uint8_t* advanceLocation(uint8_t* pos, uint8_t offset) { pos = writeu8(pos, DW_CFA_advance_loc1); pos = writeu8(pos, offset); return pos; } -static char* alignPosition(char* start, char* pos) +static uint8_t* alignPosition(uint8_t* start, uint8_t* pos) { size_t size = pos - start; size_t pad = ((size + kDwarfAlign - 1) & ~(kDwarfAlign - 1)) - size; @@ -163,7 +131,7 @@ namespace CodeGen void UnwindBuilderDwarf2::start() { - char* cieLength = pos; + uint8_t* cieLength = pos; pos = writeu32(pos, 0); // Length (to be filled later) pos = writeu32(pos, 0); // CIE id. 0 -- .eh_frame @@ -245,8 +213,8 @@ void UnwindBuilderDwarf2::finalize(char* target, void* funcAddress, size_t funcS memcpy(target, rawData, getSize()); unsigned fdeEntryStartPos = unsigned(fdeEntryStart - rawData); - writeu64(target + fdeEntryStartPos + kFdeInitialLocationOffset, uintptr_t(funcAddress)); - writeu64(target + fdeEntryStartPos + kFdeAddressRangeOffset, funcSize); + writeu64((uint8_t*)target + fdeEntryStartPos + kFdeInitialLocationOffset, uintptr_t(funcAddress)); + writeu64((uint8_t*)target + fdeEntryStartPos + kFdeAddressRangeOffset, funcSize); } } // namespace CodeGen diff --git a/Compiler/src/ConstantFolding.cpp b/Compiler/src/ConstantFolding.cpp index e35c883a..510f2b7b 100644 --- a/Compiler/src/ConstantFolding.cpp +++ b/Compiler/src/ConstantFolding.cpp @@ -3,6 +3,7 @@ #include "BuiltinFolding.h" +#include #include namespace Luau diff --git a/Makefile b/Makefile index f51f8ba4..5b8eb935 100644 --- a/Makefile +++ b/Makefile @@ -105,7 +105,7 @@ endif $(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include $(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include $(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include +$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include -IVM/include -IVM/src # Code generation needs VM internals $(VM_OBJECTS): CXXFLAGS+=-std=c++11 -ICommon/include -IVM/include $(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include $(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY diff --git a/Sources.cmake b/Sources.cmake index 580c8b3c..38100483 100644 --- a/Sources.cmake +++ b/Sources.cmake @@ -69,8 +69,13 @@ target_sources(Luau.CodeGen PRIVATE CodeGen/src/AssemblyBuilderX64.cpp CodeGen/src/CodeAllocator.cpp CodeGen/src/CodeBlockUnwind.cpp + CodeGen/src/Fallbacks.cpp CodeGen/src/UnwindBuilderDwarf2.cpp CodeGen/src/UnwindBuilderWin.cpp + + CodeGen/src/ByteUtils.h + CodeGen/src/Fallbacks.h + CodeGen/src/FallbacksProlog.h ) # Luau.Analysis Sources diff --git a/VM/include/lua.h b/VM/include/lua.h index 5c61f136..33e68517 100644 --- a/VM/include/lua.h +++ b/VM/include/lua.h @@ -304,6 +304,7 @@ LUA_API size_t lua_totalbytes(lua_State* L, int category); LUA_API l_noret lua_error(lua_State* L); LUA_API int lua_next(lua_State* L, int idx); +LUA_API int lua_rawiter(lua_State* L, int idx, int iter); LUA_API void lua_concat(lua_State* L, int n); diff --git a/VM/include/luaconf.h b/VM/include/luaconf.h index 7b0f4c30..1d8d7813 100644 --- a/VM/include/luaconf.h +++ b/VM/include/luaconf.h @@ -109,6 +109,11 @@ #define LUA_MAXCAPTURES 32 #endif +// enables callbacks to redirect code execution from Luau VM to a custom implementation +#ifndef LUA_CUSTOM_EXECUTION +#define LUA_CUSTOM_EXECUTION 0 +#endif + // }================================================================== /* diff --git a/VM/src/lapi.cpp b/VM/src/lapi.cpp index 968b8d00..e5ce4d5a 100644 --- a/VM/src/lapi.cpp +++ b/VM/src/lapi.cpp @@ -51,6 +51,12 @@ const char* luau_ident = "$Luau: Copyright (C) 2019-2022 Roblox Corporation $\n" L->top++; \ } +#define api_update_top(L, p) \ + { \ + api_check(L, p >= L->base && p < L->ci->top); \ + L->top = p; \ + } + #define updateatom(L, ts) \ { \ if (ts->atom == ATOM_UNDEF) \ @@ -851,7 +857,7 @@ void lua_rawsetfield(lua_State* L, int idx, const char* k) StkId t = index2addr(L, idx); api_check(L, ttistable(t)); if (hvalue(t)->readonly) - luaG_runerror(L, "Attempt to modify a readonly table"); + luaG_readonlyerror(L); setobj2t(L, luaH_setstr(L, hvalue(t), luaS_new(L, k)), L->top - 1); luaC_barriert(L, hvalue(t), L->top - 1); L->top--; @@ -1204,6 +1210,52 @@ int lua_next(lua_State* L, int idx) return more; } +int lua_rawiter(lua_State* L, int idx, int iter) +{ + luaC_threadbarrier(L); + StkId t = index2addr(L, idx); + api_check(L, ttistable(t)); + api_check(L, iter >= 0); + + Table* h = hvalue(t); + int sizearray = h->sizearray; + + // first we advance iter through the array portion + for (; unsigned(iter) < unsigned(sizearray); ++iter) + { + TValue* e = &h->array[iter]; + + if (!ttisnil(e)) + { + StkId top = L->top; + setnvalue(top + 0, double(iter + 1)); + setobj2s(L, top + 1, e); + api_update_top(L, top + 2); + return iter + 1; + } + } + + int sizenode = 1 << h->lsizenode; + + // then we advance iter through the hash portion + for (; unsigned(iter - sizearray) < unsigned(sizenode); ++iter) + { + LuaNode* n = &h->node[iter - sizearray]; + + if (!ttisnil(gval(n))) + { + StkId top = L->top; + getnodekey(L, top + 0, n); + setobj2s(L, top + 1, gval(n)); + api_update_top(L, top + 2); + return iter + 1; + } + } + + // traversal finished + return -1; +} + void lua_concat(lua_State* L, int n) { api_checknelems(L, n); @@ -1382,7 +1434,7 @@ void lua_cleartable(lua_State* L, int idx) api_check(L, ttistable(t)); Table* tt = hvalue(t); if (tt->readonly) - luaG_runerror(L, "Attempt to modify a readonly table"); + luaG_readonlyerror(L); luaH_clear(tt); } diff --git a/VM/src/ldebug.cpp b/VM/src/ldebug.cpp index fee9aaa9..7c181d4e 100644 --- a/VM/src/ldebug.cpp +++ b/VM/src/ldebug.cpp @@ -340,6 +340,11 @@ void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable) p->code[i] |= op; LUAU_ASSERT(LUAU_INSN_OP(p->code[i]) == op); +#if LUA_CUSTOM_EXECUTION + if (L->global->ecb.setbreakpoint) + L->global->ecb.setbreakpoint(L, p, i); +#endif + // note: this is important! // we only patch the *first* instruction in each proto that's attributed to a given line // this can be changed, but if requires making patching a bit more nuanced so that we don't patch AUX words diff --git a/VM/src/lfunc.cpp b/VM/src/lfunc.cpp index 3c1869b5..aeb3d710 100644 --- a/VM/src/lfunc.cpp +++ b/VM/src/lfunc.cpp @@ -31,6 +31,11 @@ Proto* luaF_newproto(lua_State* L) f->source = NULL; f->debugname = NULL; f->debuginsn = NULL; + +#if LUA_CUSTOM_EXECUTION + f->execdata = NULL; +#endif + return f; } @@ -149,6 +154,15 @@ void luaF_freeproto(lua_State* L, Proto* f, lua_Page* page) luaM_freearray(L, f->upvalues, f->sizeupvalues, TString*, f->memcat); if (f->debuginsn) luaM_freearray(L, f->debuginsn, f->sizecode, uint8_t, f->memcat); + +#if LUA_CUSTOM_EXECUTION + if (f->execdata) + { + LUAU_ASSERT(L->global->ecb.destroy); + L->global->ecb.destroy(L, f); + } +#endif + luaM_freegco(L, f, sizeof(Proto), f->memcat, page); } diff --git a/VM/src/lobject.h b/VM/src/lobject.h index 48aaf94b..97cbfbb1 100644 --- a/VM/src/lobject.h +++ b/VM/src/lobject.h @@ -281,6 +281,10 @@ typedef struct Proto TString* debugname; uint8_t* debuginsn; // a copy of code[] array with just opcodes +#if LUA_CUSTOM_EXECUTION + void* execdata; +#endif + GCObject* gclist; diff --git a/VM/src/lstate.cpp b/VM/src/lstate.cpp index fdd7fc2b..cfe2cbfb 100644 --- a/VM/src/lstate.cpp +++ b/VM/src/lstate.cpp @@ -212,6 +212,11 @@ lua_State* lua_newstate(lua_Alloc f, void* ud) g->memcatbytes[0] = sizeof(LG); g->cb = lua_Callbacks(); + +#if LUA_CUSTOM_EXECUTION + g->ecb = lua_ExecutionCallbacks(); +#endif + g->gcstats = GCStats(); #ifdef LUAI_GCMETRICS diff --git a/VM/src/lstate.h b/VM/src/lstate.h index 06544463..5b7d0883 100644 --- a/VM/src/lstate.h +++ b/VM/src/lstate.h @@ -146,6 +146,19 @@ struct GCMetrics }; #endif +#if LUA_CUSTOM_EXECUTION + +// Callbacks that can be used to to redirect code execution from Luau bytecode VM to a custom implementation (AoT/JiT/sandboxing/...) +typedef struct lua_ExecutionCallbacks +{ + void* context; + void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed + int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM + void (*setbreakpoint)(lua_State* L, Proto* proto, int line); // called when a breakpoint is set in a function +} lua_ExecutionCallbacks; + +#endif + /* ** `global state', shared by all threads of this state */ @@ -202,6 +215,10 @@ typedef struct global_State lua_Callbacks cb; +#if LUA_CUSTOM_EXECUTION + lua_ExecutionCallbacks ecb; +#endif + GCStats gcstats; #ifdef LUAI_GCMETRICS diff --git a/VM/src/lvm.h b/VM/src/lvm.h index 25a27166..c4b1c18b 100644 --- a/VM/src/lvm.h +++ b/VM/src/lvm.h @@ -24,6 +24,9 @@ LUAI_FUNC void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId v LUAI_FUNC void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val); LUAI_FUNC void luaV_concat(lua_State* L, int total, int last); LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil); +LUAI_FUNC void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit); +LUAI_FUNC void luaV_callTM(lua_State* L, int nparams, int res); +LUAI_FUNC void luaV_tryfuncTM(lua_State* L, StkId func); LUAI_FUNC void luau_execute(lua_State* L); LUAI_FUNC int luau_precall(lua_State* L, struct lua_TValue* func, int nresults); diff --git a/VM/src/lvmexecute.cpp b/VM/src/lvmexecute.cpp index c3c744b2..6ceed512 100644 --- a/VM/src/lvmexecute.cpp +++ b/VM/src/lvmexecute.cpp @@ -131,84 +131,6 @@ goto dispatchContinue #endif -LUAU_NOINLINE static void luau_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit) -{ - if (!ttisnumber(pinit) && !luaV_tonumber(pinit, pinit)) - luaG_forerror(L, pinit, "initial value"); - if (!ttisnumber(plimit) && !luaV_tonumber(plimit, plimit)) - luaG_forerror(L, plimit, "limit"); - if (!ttisnumber(pstep) && !luaV_tonumber(pstep, pstep)) - luaG_forerror(L, pstep, "step"); -} - -// calls a C function f with no yielding support; optionally save one resulting value to the res register -// the function and arguments have to already be pushed to L->top -LUAU_NOINLINE static void luau_callTM(lua_State* L, int nparams, int res) -{ - ++L->nCcalls; - - if (L->nCcalls >= LUAI_MAXCCALLS) - luaD_checkCstack(L); - - luaD_checkstack(L, LUA_MINSTACK); - - StkId top = L->top; - StkId fun = top - nparams - 1; - - CallInfo* ci = incr_ci(L); - ci->func = fun; - ci->base = fun + 1; - ci->top = top + LUA_MINSTACK; - ci->savedpc = NULL; - ci->flags = 0; - ci->nresults = (res >= 0); - LUAU_ASSERT(ci->top <= L->stack_last); - - LUAU_ASSERT(ttisfunction(ci->func)); - LUAU_ASSERT(clvalue(ci->func)->isC); - - L->base = fun + 1; - LUAU_ASSERT(L->top == L->base + nparams); - - lua_CFunction func = clvalue(fun)->c.f; - int n = func(L); - LUAU_ASSERT(n >= 0); // yields should have been blocked by nCcalls - - // ci is our callinfo, cip is our parent - // note that we read L->ci again since it may have been reallocated by the call - CallInfo* cip = L->ci - 1; - - // copy return value into parent stack - if (res >= 0) - { - if (n > 0) - { - setobj2s(L, &cip->base[res], L->top - n); - } - else - { - setnilvalue(&cip->base[res]); - } - } - - L->ci = cip; - L->base = cip->base; - L->top = cip->top; - - --L->nCcalls; -} - -LUAU_NOINLINE static void luau_tryfuncTM(lua_State* L, StkId func) -{ - const TValue* tm = luaT_gettmbyobj(L, func, TM_CALL); - if (!ttisfunction(tm)) - luaG_typeerror(L, func, "call"); - for (StkId p = L->top; p > func; p--) // open space for metamethod - setobjs2s(L, p, p - 1); - L->top++; // stack space pre-allocated by the caller - setobj2s(L, func, tm); // tag method is the new function to be called -} - LUAU_NOINLINE void luau_callhook(lua_State* L, lua_Hook hook, void* userdata) { ptrdiff_t base = savestack(L, L->base); @@ -284,6 +206,20 @@ static void luau_execute(lua_State* L) LUAU_ASSERT(L->isactive); LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black +#if LUA_CUSTOM_EXECUTION + Proto* p = clvalue(L->ci->func)->l.p; + + if (p->execdata) + { + if (L->global->ecb.enter(L, p) == 0) + return; + } + +reentry: +#endif + + LUAU_ASSERT(isLua(L->ci)); + pc = L->ci->savedpc; cl = clvalue(L->ci->func); base = L->base; @@ -564,7 +500,7 @@ static void luau_execute(lua_State* L) L->top = top + 3; L->cachedslot = LUAU_INSN_C(insn); - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); VM_NEXT(); @@ -601,7 +537,7 @@ static void luau_execute(lua_State* L) L->top = top + 3; L->cachedslot = LUAU_INSN_C(insn); - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); VM_NEXT(); @@ -680,7 +616,7 @@ static void luau_execute(lua_State* L) L->top = top + 4; L->cachedslot = LUAU_INSN_C(insn); - VM_PROTECT(luau_callTM(L, 3, -1)); + VM_PROTECT(luaV_callTM(L, 3, -1)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); VM_NEXT(); @@ -973,7 +909,7 @@ static void luau_execute(lua_State* L) // slow-path: not a function call if (LUAU_UNLIKELY(!ttisfunction(ra))) { - VM_PROTECT(luau_tryfuncTM(L, ra)); + VM_PROTECT(luaV_tryfuncTM(L, ra)); argtop++; // __call adds an extra self } @@ -1009,6 +945,18 @@ static void luau_execute(lua_State* L) setnilvalue(argi++); // complete missing arguments L->top = p->is_vararg ? argi : ci->top; +#if LUA_CUSTOM_EXECUTION + if (p->execdata) + { + LUAU_ASSERT(L->global->ecb.enter); + + if (L->global->ecb.enter(L, p) == 1) + goto reentry; + else + goto exit; + } +#endif + // reentry pc = p->code; cl = ccl; @@ -1092,11 +1040,26 @@ static void luau_execute(lua_State* L) LUAU_ASSERT(isLua(L->ci)); + Closure* nextcl = clvalue(cip->func); + Proto* nextproto = nextcl->l.p; + +#if LUA_CUSTOM_EXECUTION + if (nextproto->execdata) + { + LUAU_ASSERT(L->global->ecb.enter); + + if (L->global->ecb.enter(L, nextproto) == 1) + goto reentry; + else + goto exit; + } +#endif + // reentry pc = cip->savedpc; - cl = clvalue(cip->func); + cl = nextcl; base = L->base; - k = cl->l.p->k; + k = nextproto->k; VM_NEXT(); } @@ -1212,7 +1175,7 @@ static void luau_execute(lua_State* L) int res = int(top - base); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, res)); + VM_PROTECT(luaV_callTM(L, 2, res)); pc += !l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1; LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); VM_NEXT(); @@ -1324,7 +1287,7 @@ static void luau_execute(lua_State* L) int res = int(top - base); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, res)); + VM_PROTECT(luaV_callTM(L, 2, res)); pc += l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1; LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); VM_NEXT(); @@ -1519,7 +1482,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, rc); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -1565,7 +1528,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, rc); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -1626,7 +1589,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, rc); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -1687,7 +1650,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, rc); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -1819,7 +1782,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, kv); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -1865,7 +1828,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 2, kv); L->top = top + 3; - VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -2029,7 +1992,7 @@ static void luau_execute(lua_State* L) setobj2s(L, top + 1, rb); L->top = top + 2; - VM_PROTECT(luau_callTM(L, 1, LUAU_INSN_A(insn))); + VM_PROTECT(luaV_callTM(L, 1, LUAU_INSN_A(insn))); VM_NEXT(); } else @@ -2145,7 +2108,7 @@ static void luau_execute(lua_State* L) // Note: this doesn't reallocate stack so we don't need to recompute ra/base VM_PROTECT_PC(); - luau_prepareFORN(L, ra + 0, ra + 1, ra + 2); + luaV_prepareFORN(L, ra + 0, ra + 1, ra + 2); } double limit = nvalue(ra + 0); @@ -2861,7 +2824,7 @@ int luau_precall(lua_State* L, StkId func, int nresults) { if (!ttisfunction(func)) { - luau_tryfuncTM(L, func); + luaV_tryfuncTM(L, func); // L->top is incremented by tryfuncTM } diff --git a/VM/src/lvmutils.cpp b/VM/src/lvmutils.cpp index 33d47020..35124e63 100644 --- a/VM/src/lvmutils.cpp +++ b/VM/src/lvmutils.cpp @@ -508,3 +508,81 @@ void luaV_dolen(lua_State* L, StkId ra, const TValue* rb) if (!ttisnumber(res)) luaG_runerror(L, "'__len' must return a number"); // note, we can't access rb since stack may have been reallocated } + +LUAU_NOINLINE void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit) +{ + if (!ttisnumber(pinit) && !luaV_tonumber(pinit, pinit)) + luaG_forerror(L, pinit, "initial value"); + if (!ttisnumber(plimit) && !luaV_tonumber(plimit, plimit)) + luaG_forerror(L, plimit, "limit"); + if (!ttisnumber(pstep) && !luaV_tonumber(pstep, pstep)) + luaG_forerror(L, pstep, "step"); +} + +// calls a C function f with no yielding support; optionally save one resulting value to the res register +// the function and arguments have to already be pushed to L->top +LUAU_NOINLINE void luaV_callTM(lua_State* L, int nparams, int res) +{ + ++L->nCcalls; + + if (L->nCcalls >= LUAI_MAXCCALLS) + luaD_checkCstack(L); + + luaD_checkstack(L, LUA_MINSTACK); + + StkId top = L->top; + StkId fun = top - nparams - 1; + + CallInfo* ci = incr_ci(L); + ci->func = fun; + ci->base = fun + 1; + ci->top = top + LUA_MINSTACK; + ci->savedpc = NULL; + ci->flags = 0; + ci->nresults = (res >= 0); + LUAU_ASSERT(ci->top <= L->stack_last); + + LUAU_ASSERT(ttisfunction(ci->func)); + LUAU_ASSERT(clvalue(ci->func)->isC); + + L->base = fun + 1; + LUAU_ASSERT(L->top == L->base + nparams); + + lua_CFunction func = clvalue(fun)->c.f; + int n = func(L); + LUAU_ASSERT(n >= 0); // yields should have been blocked by nCcalls + + // ci is our callinfo, cip is our parent + // note that we read L->ci again since it may have been reallocated by the call + CallInfo* cip = L->ci - 1; + + // copy return value into parent stack + if (res >= 0) + { + if (n > 0) + { + setobj2s(L, &cip->base[res], L->top - n); + } + else + { + setnilvalue(&cip->base[res]); + } + } + + L->ci = cip; + L->base = cip->base; + L->top = cip->top; + + --L->nCcalls; +} + +LUAU_NOINLINE void luaV_tryfuncTM(lua_State* L, StkId func) +{ + const TValue* tm = luaT_gettmbyobj(L, func, TM_CALL); + if (!ttisfunction(tm)) + luaG_typeerror(L, func, "call"); + for (StkId p = L->top; p > func; p--) // open space for metamethod + setobjs2s(L, p, p - 1); + L->top++; // stack space pre-allocated by the caller + setobj2s(L, func, tm); // tag method is the new function to be called +} diff --git a/tests/Compiler.test.cpp b/tests/Compiler.test.cpp index e6222b02..9409c822 100644 --- a/tests/Compiler.test.cpp +++ b/tests/Compiler.test.cpp @@ -62,6 +62,29 @@ LOADN R0 5 LOADK R1 K0 RETURN R0 2 )"); + + CHECK_EQ("\n" + bcb.dumpEverything(), R"( +Function 0 (??): +LOADN R0 5 +LOADK R1 K0 +RETURN R0 2 + +)"); +} + +TEST_CASE("CompileError") +{ + std::string source = "local " + rep("a,", 300) + "a = ..."; + + // fails to parse + std::string bc1 = Luau::compile(source + " !#*$!#$^&!*#&$^*"); + + // parses, but fails to compile (too many locals) + std::string bc2 = Luau::compile(source); + + // 0 acts as a special marker for error bytecode + CHECK_EQ(bc1[0], 0); + CHECK_EQ(bc2[0], 0); } TEST_CASE("LocalsDirectReference") @@ -1230,6 +1253,27 @@ RETURN R0 0 )"); } +TEST_CASE("UnaryBasic") +{ + CHECK_EQ("\n" + compileFunction0("local a = ... return not a"), R"( +GETVARARGS R0 1 +NOT R1 R0 +RETURN R1 1 +)"); + + CHECK_EQ("\n" + compileFunction0("local a = ... return -a"), R"( +GETVARARGS R0 1 +MINUS R1 R0 +RETURN R1 1 +)"); + + CHECK_EQ("\n" + compileFunction0("local a = ... return #a"), R"( +GETVARARGS R0 1 +LENGTH R1 R0 +RETURN R1 1 +)"); +} + TEST_CASE("InterpStringWithNoExpressions") { ScopedFastFlag sff{"LuauInterpolatedStringBaseSupport", true}; @@ -4975,6 +5019,27 @@ MOVE R1 R0 CALL R1 0 1 RETURN R1 1 )"); + + // we can't inline any functions in modules with getfenv/setfenv + CHECK_EQ("\n" + compileFunction(R"( +local function foo() + return 42 +end + +local x = foo() +getfenv() +return x +)", + 1, 2), + R"( +DUPCLOSURE R0 K0 +MOVE R1 R0 +CALL R1 0 1 +GETIMPORT R2 2 +CALL R2 0 0 +RETURN R1 1 +)"); + } TEST_CASE("InlineNestedLoops") @@ -6101,6 +6166,7 @@ return bit32.extract(-1, 31), bit32.replace(100, 1, 0), math.log(100, 10), + typeof(nil), (type("fin")) )", 0, 2), @@ -6156,7 +6222,8 @@ LOADN R47 1 LOADN R48 101 LOADN R49 2 LOADK R50 K3 -RETURN R0 51 +LOADK R51 K4 +RETURN R0 52 )"); } diff --git a/tests/Conformance.test.cpp b/tests/Conformance.test.cpp index cf4a14c7..77b30487 100644 --- a/tests/Conformance.test.cpp +++ b/tests/Conformance.test.cpp @@ -786,6 +786,44 @@ TEST_CASE("ApiTables") lua_pop(L, 1); } +TEST_CASE("ApiIter") +{ + StateRef globalState(luaL_newstate(), lua_close); + lua_State* L = globalState.get(); + + lua_newtable(L); + lua_pushnumber(L, 123.0); + lua_setfield(L, -2, "key"); + lua_pushnumber(L, 456.0); + lua_rawsetfield(L, -2, "key2"); + lua_pushstring(L, "test"); + lua_rawseti(L, -2, 1); + + // Lua-compatible iteration interface: lua_next + double sum1 = 0; + lua_pushnil(L); + while (lua_next(L, -2)) + { + sum1 += lua_tonumber(L, -2); // key + sum1 += lua_tonumber(L, -1); // value + lua_pop(L, 1); // pop value, key is used by lua_next + } + CHECK(sum1 == 580); + + // Luau iteration interface: lua_rawiter (faster and preferable to lua_next) + double sum2 = 0; + for (int index = 0; index = lua_rawiter(L, -1, index), index >= 0; ) + { + sum2 += lua_tonumber(L, -2); // key + sum2 += lua_tonumber(L, -1); // value + lua_pop(L, 2); // pop both key and value + } + CHECK(sum2 == 580); + + // pop table + lua_pop(L, 1); +} + TEST_CASE("ApiCalls") { StateRef globalState = runConformance("apicalls.lua"); diff --git a/tests/Linter.test.cpp b/tests/Linter.test.cpp index 8c7d762e..921e6691 100644 --- a/tests/Linter.test.cpp +++ b/tests/Linter.test.cpp @@ -1705,8 +1705,6 @@ TEST_CASE_FIXTURE(Fixture, "TestStringInterpolation") TEST_CASE_FIXTURE(Fixture, "IntegerParsing") { - ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true}; - LintResult result = lint(R"( local _ = 0b10000000000000000000000000000000000000000000000000000000000000000 local _ = 0x10000000000000000 @@ -1720,7 +1718,6 @@ local _ = 0x10000000000000000 // TODO: remove with FFlagLuauErrorDoubleHexPrefix TEST_CASE_FIXTURE(Fixture, "IntegerParsingDoublePrefix") { - ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true}; ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", false}; // Lint will be available until we start rejecting code LintResult result = lint(R"( diff --git a/tests/Normalize.test.cpp b/tests/Normalize.test.cpp index 42e9a933..31df707d 100644 --- a/tests/Normalize.test.cpp +++ b/tests/Normalize.test.cpp @@ -2,6 +2,7 @@ #include "Fixture.h" +#include "Luau/Common.h" #include "doctest.h" #include "Luau/Normalize.h" @@ -747,7 +748,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_union") { ScopedFastFlag sff[] = { {"LuauLowerBoundsCalculation", true}, - {"LuauFixNormalizationOfCyclicUnions", true}, }; CheckResult result = check(R"( @@ -765,7 +765,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_intersection") { ScopedFastFlag sff[] = { {"LuauLowerBoundsCalculation", true}, - {"LuauFixNormalizationOfCyclicUnions", true}, }; CheckResult result = check(R"( @@ -784,7 +783,6 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_indexers") { ScopedFastFlag sff[] = { {"LuauLowerBoundsCalculation", true}, - {"LuauFixNormalizationOfCyclicUnions", true}, }; CheckResult result = check(R"( diff --git a/tests/Parser.test.cpp b/tests/Parser.test.cpp index 44a6b4ac..b4064cfb 100644 --- a/tests/Parser.test.cpp +++ b/tests/Parser.test.cpp @@ -682,7 +682,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_numbers_binary") TEST_CASE_FIXTURE(Fixture, "parse_numbers_error") { - ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true}; ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", true}; CHECK_EQ(getParseError("return 0b123"), "Malformed number"); @@ -695,7 +694,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_numbers_error") TEST_CASE_FIXTURE(Fixture, "parse_numbers_error_soft") { - ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true}; ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", false}; CHECK_EQ(getParseError("return 0x0x0x0x0x0x0x0"), "Malformed number"); diff --git a/tests/TypeInfer.aliases.test.cpp b/tests/TypeInfer.aliases.test.cpp index dd91467d..834391a7 100644 --- a/tests/TypeInfer.aliases.test.cpp +++ b/tests/TypeInfer.aliases.test.cpp @@ -198,8 +198,12 @@ TEST_CASE_FIXTURE(Fixture, "generic_aliases") LUAU_REQUIRE_ERROR_COUNT(1, result); + const char* expectedError = "Type '{ v: string }' could not be converted into 'T'\n" + "caused by:\n" + " Property 'v' is not compatible. Type 'string' could not be converted into 'number'"; + CHECK(result.errors[0].location == Location{{4, 31}, {4, 44}}); - CHECK(toString(result.errors[0]) == "Type '{ v: string }' could not be converted into 'T'"); + CHECK(toString(result.errors[0]) == expectedError); } TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases") @@ -215,8 +219,14 @@ TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases") LUAU_REQUIRE_ERROR_COUNT(1, result); + const char* expectedError = "Type '{ t: { v: string } }' could not be converted into 'U'\n" + "caused by:\n" + " Property 't' is not compatible. Type '{ v: string }' could not be converted into 'T'\n" + "caused by:\n" + " Property 'v' is not compatible. Type 'string' could not be converted into 'number'"; + CHECK(result.errors[0].location == Location{{4, 31}, {4, 52}}); - CHECK(toString(result.errors[0]) == "Type '{ t: { v: string } }' could not be converted into 'U'"); + CHECK(toString(result.errors[0]) == expectedError); } TEST_CASE_FIXTURE(Fixture, "mutually_recursive_generic_aliases") diff --git a/tests/TypeInfer.provisional.test.cpp b/tests/TypeInfer.provisional.test.cpp index 40ea0ca1..2c4c3504 100644 --- a/tests/TypeInfer.provisional.test.cpp +++ b/tests/TypeInfer.provisional.test.cpp @@ -558,11 +558,10 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "greedy_inference_with_shared_self_triggers_f CHECK_EQ("Not all codepaths in this function return 'self, a...'.", toString(result.errors[0])); } -TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint") +TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint") { ScopedFastFlag sff[] = { {"DebugLuauDeferredConstraintResolution", true}, - {"LuauSpecialTypesAsterisked", true}, }; CheckResult result = check(R"( @@ -577,7 +576,6 @@ TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint") LUAU_REQUIRE_NO_ERRORS(result); - // We should be able to resolve this to number, but we're not there yet. // Solving this requires recognizing that we can partially solve the // following constraint: // @@ -586,7 +584,7 @@ TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint") // The correct thing for us to do is to consider the constraint dispatched, // but we need to also record a new constraint number <: *blocked* to finish // the job later. - CHECK("(a, *error-type*) -> ()" == toString(requireType("prime_iter"))); + CHECK("(a, number) -> ()" == toString(requireType("prime_iter"))); } TEST_CASE_FIXTURE(Fixture, "free_options_cannot_be_unified_together") diff --git a/tests/TypeInfer.singletons.test.cpp b/tests/TypeInfer.singletons.test.cpp index 7d98b5db..5ee956d7 100644 --- a/tests/TypeInfer.singletons.test.cpp +++ b/tests/TypeInfer.singletons.test.cpp @@ -314,6 +314,31 @@ caused by: toString(result.errors[0])); } +TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias") +{ + ScopedFastFlag sff[] = { + {"DebugLuauDeferredConstraintResolution", true}, + }; + + CheckResult result = check(R"( + type Ok = {success: true, result: T} + type Err = {success: false, error: T} + type Result = Ok | Err + + local a : Result = {success = false, result = "hotdogs"} + local b : Result = {success = true, result = "hotdogs"} + )"); + + LUAU_REQUIRE_ERROR_COUNT(1, result); + + const std::string expectedError = "Type '{ result: string, success: false }' could not be converted into 'Err | Ok'\n" + "caused by:\n" + " None of the union options are compatible. For example: Table type '{ result: string, success: false }'" + " not compatible with type 'Err' because the former is missing field 'error'"; + + CHECK(toString(result.errors[0]) == expectedError); +} + TEST_CASE_FIXTURE(Fixture, "if_then_else_expression_singleton_options") { CheckResult result = check(R"( diff --git a/tests/TypeInfer.test.cpp b/tests/TypeInfer.test.cpp index e8bfb67f..a96c3676 100644 --- a/tests/TypeInfer.test.cpp +++ b/tests/TypeInfer.test.cpp @@ -1082,7 +1082,7 @@ TEST_CASE_FIXTURE(Fixture, "do_not_bind_a_free_table_to_a_union_containing_that_ )"); } -TEST_CASE_FIXTURE(Fixture, "types stored in astResolvedTypes") +TEST_CASE_FIXTURE(Fixture, "types_stored_in_astResolvedTypes") { CheckResult result = check(R"( type alias = typeof("hello") @@ -1122,4 +1122,41 @@ TEST_CASE_FIXTURE(Fixture, "bidirectional_checking_of_higher_order_function") CHECK(location.end.line == 4); } +TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint") +{ + ScopedFastFlag sff[] = { + {"DebugLuauDeferredConstraintResolution", true}, + }; + + CheckResult result = check(R"( + local function hasDivisors(value: number) + end + + function prime_iter(state, index) + hasDivisors(index) + index += 1 + end + )"); + + LUAU_REQUIRE_NO_ERRORS(result); + + // Solving this requires recognizing that we can't dispatch a constraint + // like this without doing further work: + // + // (*blocked*) -> () <: (number) -> (b...) + // + // We solve this by searching both types for BlockedTypeVars and block the + // constraint on any we find. It also gets the job done, but I'm worried + // about the efficiency of doing so many deep type traversals and it may + // make us more prone to getting stuck on constraint cycles. + // + // If this doesn't pan out, a possible solution is to go further down the + // path of supporting partial constraint dispatch. The way it would work is + // that we'd dispatch the above constraint by binding b... to (), but we + // would append a new constraint number <: *blocked* to the constraint set + // to be solved later. This should be faster and theoretically less prone + // to cyclic constraint dependencies. + CHECK("(a, number) -> ()" == toString(requireType("prime_iter"))); +} + TEST_SUITE_END(); diff --git a/tools/faillist.txt b/tools/faillist.txt index 9c2df805..825fb2f6 100644 --- a/tools/faillist.txt +++ b/tools/faillist.txt @@ -10,7 +10,6 @@ AnnotationTests.luau_ice_triggers_an_ice_handler AnnotationTests.luau_print_is_magic_if_the_flag_is_set AnnotationTests.occurs_check_on_cyclic_intersection_typevar AnnotationTests.occurs_check_on_cyclic_union_typevar -AnnotationTests.too_many_type_params AnnotationTests.two_type_params AnnotationTests.use_type_required_from_another_file AstQuery.last_argument_function_call_type @@ -28,7 +27,6 @@ AutocompleteTest.autocomplete_interpolated_string AutocompleteTest.autocomplete_on_string_singletons AutocompleteTest.autocomplete_oop_implicit_self AutocompleteTest.autocomplete_repeat_middle_keyword -AutocompleteTest.autocomplete_string_singleton_equality AutocompleteTest.autocomplete_string_singleton_escape AutocompleteTest.autocomplete_string_singletons AutocompleteTest.autocomplete_while_middle_keywords @@ -85,7 +83,6 @@ AutocompleteTest.type_correct_expected_argument_type_pack_suggestion AutocompleteTest.type_correct_expected_argument_type_suggestion AutocompleteTest.type_correct_expected_argument_type_suggestion_optional AutocompleteTest.type_correct_expected_argument_type_suggestion_self -AutocompleteTest.type_correct_expected_return_type_pack_suggestion AutocompleteTest.type_correct_expected_return_type_suggestion AutocompleteTest.type_correct_full_type_suggestion AutocompleteTest.type_correct_function_no_parenthesis @@ -113,7 +110,6 @@ BuiltinTests.dont_add_definitions_to_persistent_types BuiltinTests.find_capture_types BuiltinTests.find_capture_types2 BuiltinTests.find_capture_types3 -BuiltinTests.getfenv BuiltinTests.global_singleton_types_are_sealed BuiltinTests.gmatch_capture_types BuiltinTests.gmatch_capture_types2 @@ -130,13 +126,9 @@ BuiltinTests.next_iterator_should_infer_types_and_type_check BuiltinTests.os_time_takes_optional_date_table BuiltinTests.pairs_iterator_should_infer_types_and_type_check BuiltinTests.see_thru_select -BuiltinTests.see_thru_select_count -BuiltinTests.select_on_variadic BuiltinTests.select_slightly_out_of_range BuiltinTests.select_way_out_of_range BuiltinTests.select_with_decimal_argument_is_rounded_down -BuiltinTests.select_with_variadic_typepack_tail -BuiltinTests.select_with_variadic_typepack_tail_and_string_head BuiltinTests.set_metatable_needs_arguments BuiltinTests.setmetatable_should_not_mutate_persisted_types BuiltinTests.sort @@ -147,20 +139,16 @@ BuiltinTests.string_format_arg_types_inference BuiltinTests.string_format_as_method BuiltinTests.string_format_correctly_ordered_types BuiltinTests.string_format_report_all_type_errors_at_correct_positions -BuiltinTests.string_format_tostring_specifier -BuiltinTests.string_format_tostring_specifier_type_constraint BuiltinTests.string_format_use_correct_argument BuiltinTests.string_format_use_correct_argument2 BuiltinTests.string_format_use_correct_argument3 BuiltinTests.string_lib_self_noself BuiltinTests.table_concat_returns_string -BuiltinTests.table_dot_remove_optionally_returns_generic BuiltinTests.table_freeze_is_generic BuiltinTests.table_insert_correctly_infers_type_of_array_2_args_overload BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload BuiltinTests.table_pack BuiltinTests.table_pack_reduce -BuiltinTests.table_pack_variadic BuiltinTests.tonumber_returns_optional_number_type BuiltinTests.tonumber_returns_optional_number_type2 DefinitionTests.class_definition_overload_metamethods @@ -168,7 +156,6 @@ DefinitionTests.declaring_generic_functions DefinitionTests.definition_file_classes FrontendTest.ast_node_at_position FrontendTest.automatically_check_dependent_scripts -FrontendTest.dont_reparse_clean_file_when_linting FrontendTest.environments FrontendTest.imported_table_modification_2 FrontendTest.it_should_be_safe_to_stringify_errors_when_full_type_graph_is_discarded @@ -187,11 +174,8 @@ GenericsTests.check_mutual_generic_functions GenericsTests.correctly_instantiate_polymorphic_member_functions GenericsTests.do_not_always_instantiate_generic_intersection_types GenericsTests.do_not_infer_generic_functions -GenericsTests.dont_unify_bound_types GenericsTests.duplicate_generic_type_packs GenericsTests.duplicate_generic_types -GenericsTests.error_detailed_function_mismatch_generic_pack -GenericsTests.error_detailed_function_mismatch_generic_types GenericsTests.factories_of_generics GenericsTests.generic_argument_count_too_few GenericsTests.generic_argument_count_too_many @@ -205,30 +189,22 @@ GenericsTests.generic_type_pack_unification2 GenericsTests.generic_type_pack_unification3 GenericsTests.infer_generic_function_function_argument GenericsTests.infer_generic_function_function_argument_overloaded -GenericsTests.infer_generic_lib_function_function_argument GenericsTests.infer_generic_methods +GenericsTests.inferred_local_vars_can_be_polytypes GenericsTests.instantiate_cyclic_generic_function GenericsTests.instantiate_generic_function_in_assignments GenericsTests.instantiate_generic_function_in_assignments2 GenericsTests.instantiated_function_argument_names GenericsTests.instantiation_sharing_types GenericsTests.local_vars_can_be_instantiated_polytypes -GenericsTests.mutable_state_polymorphism GenericsTests.no_stack_overflow_from_quantifying GenericsTests.properties_can_be_instantiated_polytypes -GenericsTests.rank_N_types_via_typeof GenericsTests.reject_clashing_generic_and_pack_names GenericsTests.self_recursive_instantiated_param -IntersectionTypes.argument_is_intersection -IntersectionTypes.error_detailed_intersection_all -IntersectionTypes.error_detailed_intersection_part -IntersectionTypes.fx_intersection_as_argument -IntersectionTypes.fx_union_as_argument_fails IntersectionTypes.index_on_an_intersection_type_with_mixed_types IntersectionTypes.index_on_an_intersection_type_with_property_guaranteed_to_exist IntersectionTypes.index_on_an_intersection_type_works_at_arbitrary_depth IntersectionTypes.no_stack_overflow_from_flattenintersection -IntersectionTypes.overload_is_not_a_function IntersectionTypes.select_correct_union_fn IntersectionTypes.should_still_pick_an_overload_whose_arguments_are_unions IntersectionTypes.table_intersection_write_sealed @@ -236,17 +212,13 @@ IntersectionTypes.table_intersection_write_sealed_indirect IntersectionTypes.table_write_sealed_indirect isSubtype.intersection_of_tables isSubtype.table_with_table_prop -Linter.TableOperations ModuleTests.clone_self_property ModuleTests.deepClone_cyclic_table ModuleTests.do_not_clone_reexports -NonstrictModeTests.delay_function_does_not_require_its_argument_to_return_anything NonstrictModeTests.for_in_iterator_variables_are_any NonstrictModeTests.function_parameters_are_any NonstrictModeTests.inconsistent_module_return_types_are_ok -NonstrictModeTests.inconsistent_return_types_are_ok NonstrictModeTests.infer_nullary_function -NonstrictModeTests.infer_the_maximum_number_of_values_the_function_could_return NonstrictModeTests.inline_table_props_are_also_any NonstrictModeTests.local_tables_are_not_any NonstrictModeTests.locals_are_any_by_default @@ -294,20 +266,16 @@ ProvisionalTests.greedy_inference_with_shared_self_triggers_function_with_no_ret ProvisionalTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound ProvisionalTests.it_should_be_agnostic_of_actual_size ProvisionalTests.lower_bounds_calculation_is_too_permissive_with_overloaded_higher_order_functions -ProvisionalTests.lvalue_equals_another_lvalue_with_no_overlap ProvisionalTests.normalization_fails_on_certain_kinds_of_cyclic_tables -ProvisionalTests.operator_eq_completely_incompatible ProvisionalTests.pcall_returns_at_least_two_value_but_function_returns_nothing ProvisionalTests.setmetatable_constrains_free_type_into_free_table ProvisionalTests.typeguard_inference_incomplete ProvisionalTests.weirditer_should_not_loop_forever ProvisionalTests.while_body_are_also_refined RefinementTest.and_constraint -RefinementTest.and_or_peephole_refinement RefinementTest.apply_refinements_on_astexprindexexpr_whose_subscript_expr_is_constant_string RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number RefinementTest.assert_non_binary_expressions_actually_resolve_constraints -RefinementTest.assign_table_with_refined_property_with_a_similar_type_is_illegal RefinementTest.call_a_more_specific_function_using_typeguard RefinementTest.correctly_lookup_a_shadowed_local_that_which_was_previously_refined RefinementTest.correctly_lookup_property_whose_base_was_previously_refined @@ -319,24 +287,19 @@ RefinementTest.discriminate_tag RefinementTest.either_number_or_string RefinementTest.eliminate_subclasses_of_instance RefinementTest.falsiness_of_TruthyPredicate_narrows_into_nil -RefinementTest.free_type_is_equal_to_an_lvalue -RefinementTest.impossible_type_narrow_is_not_an_error RefinementTest.index_on_a_refined_property RefinementTest.invert_is_truthy_constraint RefinementTest.invert_is_truthy_constraint_ifelse_expression RefinementTest.is_truthy_constraint RefinementTest.is_truthy_constraint_ifelse_expression -RefinementTest.lvalue_is_equal_to_a_term -RefinementTest.lvalue_is_equal_to_another_lvalue RefinementTest.lvalue_is_not_nil RefinementTest.merge_should_be_fully_agnostic_of_hashmap_ordering +RefinementTest.narrow_boolean_to_true_or_false RefinementTest.narrow_property_of_a_bounded_variable RefinementTest.narrow_this_large_union RefinementTest.nonoptional_type_can_narrow_to_nil_if_sense_is_true RefinementTest.not_a_and_not_b RefinementTest.not_a_and_not_b2 -RefinementTest.not_a_or_not_b -RefinementTest.not_a_or_not_b2 RefinementTest.not_and_constraint RefinementTest.not_t_or_some_prop_of_t RefinementTest.or_predicate_with_truthy_predicates @@ -344,7 +307,6 @@ RefinementTest.parenthesized_expressions_are_followed_through RefinementTest.refine_a_property_not_to_be_nil_through_an_intersection_table RefinementTest.refine_the_correct_types_opposite_of_when_a_is_not_number_or_string RefinementTest.refine_unknowns -RefinementTest.string_not_equal_to_string_or_nil RefinementTest.term_is_equal_to_an_lvalue RefinementTest.truthy_constraint_on_properties RefinementTest.type_assertion_expr_carry_its_constraints @@ -363,11 +325,9 @@ RefinementTest.typeguard_narrows_for_functions RefinementTest.typeguard_narrows_for_table RefinementTest.typeguard_not_to_be_string RefinementTest.typeguard_only_look_up_types_from_global_scope -RefinementTest.unknown_lvalue_is_not_synonymous_with_other_on_not_equal RefinementTest.what_nonsensical_condition RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table RefinementTest.x_is_not_instance_or_else_not_part -RuntimeLimits.typescript_port_of_Result_type TableTests.a_free_shape_can_turn_into_a_scalar_if_it_is_compatible TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible TableTests.access_index_metamethod_that_returns_variadic @@ -383,7 +343,6 @@ TableTests.casting_sealed_tables_with_props_into_table_with_indexer TableTests.casting_tables_with_props_into_table_with_indexer3 TableTests.casting_tables_with_props_into_table_with_indexer4 TableTests.checked_prop_too_early -TableTests.confusing_indexing TableTests.defining_a_method_for_a_builtin_sealed_table_must_fail TableTests.defining_a_method_for_a_local_sealed_table_must_fail TableTests.defining_a_self_method_for_a_builtin_sealed_table_must_fail @@ -393,11 +352,7 @@ TableTests.dont_hang_when_trying_to_look_up_in_cyclic_metatable_index TableTests.dont_leak_free_table_props TableTests.dont_quantify_table_that_belongs_to_outer_scope TableTests.dont_suggest_exact_match_keys -TableTests.error_detailed_indexer_key -TableTests.error_detailed_indexer_value TableTests.error_detailed_metatable_prop -TableTests.error_detailed_prop -TableTests.error_detailed_prop_nested TableTests.expected_indexer_from_table_union TableTests.expected_indexer_value_type_extra TableTests.expected_indexer_value_type_extra_2 @@ -422,11 +377,7 @@ TableTests.infer_indexer_from_value_property_in_literal TableTests.inferred_return_type_of_free_table TableTests.inferring_crazy_table_should_also_be_quick TableTests.instantiate_table_cloning_3 -TableTests.instantiate_tables_at_scope_level TableTests.leaking_bad_metatable_errors -TableTests.length_operator_intersection -TableTests.length_operator_non_table_union -TableTests.length_operator_union TableTests.length_operator_union_errors TableTests.less_exponential_blowup_please TableTests.meta_add @@ -444,16 +395,15 @@ TableTests.open_table_unification_2 TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table_2 TableTests.pass_incompatible_union_to_a_generic_table_without_crashing +TableTests.passing_compatible_unions_to_a_generic_table_without_crashing TableTests.persistent_sealed_table_is_immutable TableTests.prop_access_on_key_whose_types_mismatches TableTests.property_lookup_through_tabletypevar_metatable TableTests.quantify_even_that_table_was_never_exported_at_all -TableTests.quantify_metatables_of_metatables_of_table TableTests.quantifying_a_bound_var_works TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table TableTests.result_is_always_any_if_lhs_is_any TableTests.result_is_bool_for_equality_operators_if_lhs_is_any -TableTests.right_table_missing_key TableTests.right_table_missing_key2 TableTests.scalar_is_a_subtype_of_a_compatible_polymorphic_shape_type TableTests.scalar_is_not_a_subtype_of_a_compatible_polymorphic_shape_type @@ -463,14 +413,11 @@ TableTests.shared_selfs_through_metatables TableTests.table_indexing_error_location TableTests.table_insert_should_cope_with_optional_properties_in_nonstrict TableTests.table_insert_should_cope_with_optional_properties_in_strict -TableTests.table_length TableTests.table_param_row_polymorphism_2 TableTests.table_param_row_polymorphism_3 TableTests.table_simple_call TableTests.table_subtyping_with_extra_props_dont_report_multiple_errors TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors -TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors2 -TableTests.table_unifies_into_map TableTests.tables_get_names_from_their_locals TableTests.tc_member_function TableTests.tc_member_function_2 @@ -480,10 +427,8 @@ TableTests.unification_of_unions_in_a_self_referential_type TableTests.unifying_tables_shouldnt_uaf2 TableTests.used_colon_instead_of_dot TableTests.used_dot_instead_of_colon -TableTests.width_subtyping ToDot.bound_table ToDot.function -ToDot.metatable ToDot.table ToString.exhaustive_toString_of_cyclic_table ToString.function_type_with_argument_names_generic @@ -500,7 +445,6 @@ TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType TryUnifyTests.result_of_failed_typepack_unification_is_constrained TryUnifyTests.typepack_unification_should_trim_free_tails TryUnifyTests.variadics_should_use_reversed_properly -TypeAliases.cli_38393_recursive_intersection_oom TypeAliases.forward_declared_alias_is_not_clobbered_by_prior_unification_with_any TypeAliases.generic_param_remap TypeAliases.mismatched_generic_pack_type_param @@ -517,29 +461,19 @@ TypeAliases.type_alias_local_mutation TypeAliases.type_alias_local_rename TypeAliases.type_alias_of_an_imported_recursive_generic_type TypeInfer.checking_should_not_ice -TypeInfer.cyclic_follow -TypeInfer.do_not_bind_a_free_table_to_a_union_containing_that_table TypeInfer.dont_report_type_errors_within_an_AstStatError TypeInfer.globals TypeInfer.globals2 TypeInfer.infer_assignment_value_types_mutable_lval TypeInfer.no_stack_overflow_from_isoptional TypeInfer.tc_after_error_recovery_no_replacement_name_in_error -TypeInfer.tc_if_else_expressions1 -TypeInfer.tc_if_else_expressions2 -TypeInfer.tc_if_else_expressions_expected_type_1 -TypeInfer.tc_if_else_expressions_expected_type_2 TypeInfer.tc_if_else_expressions_expected_type_3 -TypeInfer.tc_if_else_expressions_type_union TypeInfer.tc_interpolated_string_basic TypeInfer.tc_interpolated_string_constant_type TypeInfer.tc_interpolated_string_with_invalid_expression TypeInfer.type_infer_recursion_limit_no_ice TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any -TypeInferAnyError.can_get_length_of_any TypeInferAnyError.for_in_loop_iterator_is_any2 -TypeInferAnyError.length_of_error_type_does_not_produce_an_error -TypeInferAnyError.replace_every_free_type_when_unifying_a_complex_function_with_any TypeInferAnyError.union_of_types_regression_test TypeInferClasses.call_base_method TypeInferClasses.call_instance_method @@ -549,39 +483,23 @@ TypeInferClasses.class_type_mismatch_with_name_conflict TypeInferClasses.classes_can_have_overloaded_operators TypeInferClasses.classes_without_overloaded_operators_cannot_be_added TypeInferClasses.detailed_class_unification_error -TypeInferClasses.function_arguments_are_covariant -TypeInferClasses.higher_order_function_return_type_is_not_contravariant -TypeInferClasses.higher_order_function_return_values_are_covariant +TypeInferClasses.higher_order_function_arguments_are_contravariant TypeInferClasses.optional_class_field_access_error TypeInferClasses.table_class_unification_reports_sane_errors_for_missing_properties -TypeInferClasses.table_indexers_are_invariant -TypeInferClasses.table_properties_are_invariant TypeInferClasses.warn_when_prop_almost_matches TypeInferClasses.we_can_report_when_someone_is_trying_to_use_a_table_rather_than_a_class -TypeInferFunctions.another_recursive_local_function TypeInferFunctions.call_o_with_another_argument_after_foo_was_quantified -TypeInferFunctions.calling_function_with_anytypepack_doesnt_leak_free_types TypeInferFunctions.calling_function_with_incorrect_argument_type_yields_errors_spanning_argument -TypeInferFunctions.complicated_return_types_require_an_explicit_annotation TypeInferFunctions.dont_give_other_overloads_message_if_only_one_argument_matching_overload_exists TypeInferFunctions.dont_infer_parameter_types_for_functions_from_their_call_site TypeInferFunctions.duplicate_functions_with_different_signatures_not_allowed_in_nonstrict -TypeInferFunctions.error_detailed_function_mismatch_arg -TypeInferFunctions.error_detailed_function_mismatch_arg_count -TypeInferFunctions.error_detailed_function_mismatch_ret -TypeInferFunctions.error_detailed_function_mismatch_ret_count -TypeInferFunctions.error_detailed_function_mismatch_ret_mult TypeInferFunctions.free_is_not_bound_to_unknown TypeInferFunctions.func_expr_doesnt_leak_free TypeInferFunctions.function_cast_error_uses_correct_language -TypeInferFunctions.function_decl_non_self_sealed_overwrite TypeInferFunctions.function_decl_non_self_sealed_overwrite_2 TypeInferFunctions.function_decl_non_self_unsealed_overwrite TypeInferFunctions.function_does_not_return_enough_values TypeInferFunctions.function_statement_sealed_table_assignment_through_indexer -TypeInferFunctions.higher_order_function_2 -TypeInferFunctions.higher_order_function_4 -TypeInferFunctions.ignored_return_values TypeInferFunctions.improved_function_arg_mismatch_error_nonstrict TypeInferFunctions.improved_function_arg_mismatch_errors TypeInferFunctions.inconsistent_higher_order_function @@ -589,15 +507,11 @@ TypeInferFunctions.inconsistent_return_types TypeInferFunctions.infer_anonymous_function_arguments TypeInferFunctions.infer_return_type_from_selected_overload TypeInferFunctions.infer_that_function_does_not_return_a_table -TypeInferFunctions.it_is_ok_not_to_supply_enough_retvals TypeInferFunctions.list_all_overloads_if_no_overload_takes_given_argument_count TypeInferFunctions.list_only_alternative_overloads_that_match_argument_count TypeInferFunctions.no_lossy_function_type -TypeInferFunctions.occurs_check_failure_in_function_return_type TypeInferFunctions.quantify_constrained_types TypeInferFunctions.record_matching_overload -TypeInferFunctions.recursive_function -TypeInferFunctions.recursive_local_function TypeInferFunctions.report_exiting_without_return_nonstrict TypeInferFunctions.report_exiting_without_return_strict TypeInferFunctions.return_type_by_overload @@ -608,11 +522,7 @@ TypeInferFunctions.too_few_arguments_variadic_generic2 TypeInferFunctions.too_many_arguments TypeInferFunctions.too_many_return_values TypeInferFunctions.vararg_function_is_quantified -TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_values -TypeInferLoops.for_in_loop_with_custom_iterator -TypeInferLoops.for_in_loop_with_next TypeInferLoops.for_in_with_just_one_iterator_is_ok -TypeInferLoops.loop_iter_iter_metamethod TypeInferLoops.loop_iter_no_indexer_nonstrict TypeInferLoops.loop_iter_trailing_nil TypeInferLoops.loop_typecheck_crash_on_empty_optional @@ -631,7 +541,6 @@ TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2 TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_not_defined_with_colon TypeInferOOP.inferred_methods_of_free_tables_have_the_same_level_as_the_enclosing_table TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory -TypeInferOOP.methods_are_topologically_sorted TypeInferOperators.and_adds_boolean TypeInferOperators.and_adds_boolean_no_superfluous_union TypeInferOperators.and_binexps_dont_unify @@ -641,13 +550,10 @@ TypeInferOperators.cannot_compare_tables_that_do_not_have_the_same_metatable TypeInferOperators.cannot_indirectly_compare_types_that_do_not_have_a_metatable TypeInferOperators.cannot_indirectly_compare_types_that_do_not_offer_overloaded_ordering_operators TypeInferOperators.cli_38355_recursive_union -TypeInferOperators.compare_numbers -TypeInferOperators.compare_strings TypeInferOperators.compound_assign_mismatch_metatable TypeInferOperators.compound_assign_mismatch_op TypeInferOperators.compound_assign_mismatch_result TypeInferOperators.concat_op_on_free_lhs_and_string_rhs -TypeInferOperators.concat_op_on_string_lhs_and_free_rhs TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops TypeInferOperators.dont_strip_nil_from_rhs_or_operator TypeInferOperators.equality_operations_succeed_if_any_union_branch_succeeds @@ -655,18 +561,12 @@ TypeInferOperators.error_on_invalid_operand_types_to_relational_operators TypeInferOperators.error_on_invalid_operand_types_to_relational_operators2 TypeInferOperators.expected_types_through_binary_and TypeInferOperators.expected_types_through_binary_or -TypeInferOperators.in_nonstrict_mode_strip_nil_from_intersections_when_considering_relational_operators TypeInferOperators.infer_any_in_all_modes_when_lhs_is_unknown -TypeInferOperators.operator_eq_operands_are_not_subtypes_of_each_other_but_has_overlap -TypeInferOperators.operator_eq_verifies_types_do_intersect TypeInferOperators.or_joins_types TypeInferOperators.or_joins_types_with_no_extras -TypeInferOperators.primitive_arith_no_metatable -TypeInferOperators.primitive_arith_no_metatable_with_follows TypeInferOperators.primitive_arith_possible_metatable TypeInferOperators.produce_the_correct_error_message_when_comparing_a_table_with_a_metatable_with_one_that_does_not TypeInferOperators.refine_and_or -TypeInferOperators.some_primitive_binary_ops TypeInferOperators.strict_binary_op_where_lhs_unknown TypeInferOperators.strip_nil_from_lhs_or_operator TypeInferOperators.strip_nil_from_lhs_or_operator2 @@ -676,13 +576,11 @@ TypeInferOperators.typecheck_unary_len_error TypeInferOperators.typecheck_unary_minus TypeInferOperators.typecheck_unary_minus_error TypeInferOperators.unary_not_is_boolean -TypeInferOperators.unknown_type_in_comparison TypeInferOperators.UnknownGlobalCompoundAssign TypeInferPrimitives.CheckMethodsOfNumber TypeInferPrimitives.singleton_types TypeInferPrimitives.string_function_other TypeInferPrimitives.string_index -TypeInferPrimitives.string_length TypeInferPrimitives.string_method TypeInferUnknownNever.assign_to_global_which_is_never TypeInferUnknownNever.assign_to_local_which_is_never @@ -692,9 +590,7 @@ TypeInferUnknownNever.call_never TypeInferUnknownNever.dont_unify_operands_if_one_of_the_operand_is_never_in_any_ordering_operators TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_never TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_sorta_never -TypeInferUnknownNever.length_of_never TypeInferUnknownNever.math_operators_and_never -TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable2 TypeInferUnknownNever.unary_minus_of_never TypePackTests.higher_order_function @@ -718,18 +614,17 @@ TypePackTests.type_alias_type_packs TypePackTests.type_alias_type_packs_errors TypePackTests.type_alias_type_packs_import TypePackTests.type_alias_type_packs_nested -TypePackTests.type_pack_hidden_free_tail_infinite_growth TypePackTests.type_pack_type_parameters +TypePackTests.unify_variadic_tails_in_arguments +TypePackTests.unify_variadic_tails_in_arguments_free TypePackTests.varargs_inference_through_multiple_scopes TypePackTests.variadic_packs -TypeSingletons.bool_singleton_subtype -TypeSingletons.bool_singletons -TypeSingletons.bool_singletons_mismatch TypeSingletons.enums_using_singletons TypeSingletons.enums_using_singletons_mismatch TypeSingletons.enums_using_singletons_subtyping TypeSingletons.error_detailed_tagged_union_mismatch_bool TypeSingletons.error_detailed_tagged_union_mismatch_string +TypeSingletons.function_call_with_singletons TypeSingletons.function_call_with_singletons_mismatch TypeSingletons.if_then_else_expression_singleton_options TypeSingletons.indexing_on_string_singletons @@ -752,7 +647,6 @@ TypeSingletons.widening_happens_almost_everywhere TypeSingletons.widening_happens_almost_everywhere_except_for_tables UnionTypes.error_detailed_optional UnionTypes.error_detailed_union_all -UnionTypes.error_detailed_union_part UnionTypes.error_takes_optional_arguments UnionTypes.index_on_a_union_type_with_missing_property UnionTypes.index_on_a_union_type_with_mixed_types @@ -771,6 +665,4 @@ UnionTypes.optional_union_follow UnionTypes.optional_union_functions UnionTypes.optional_union_members UnionTypes.optional_union_methods -UnionTypes.return_types_can_be_disjoint UnionTypes.table_union_write_indirect -UnionTypes.union_equality_comparisons diff --git a/tools/lvmexecute_split.py b/tools/lvmexecute_split.py new file mode 100644 index 00000000..10e3ccbb --- /dev/null +++ b/tools/lvmexecute_split.py @@ -0,0 +1,100 @@ +#!/usr/bin/python +# This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details + +# This code can be used to split lvmexecute.cpp VM switch into separate functions for use as native code generation fallbacks +import sys +import re + +input = sys.stdin.readlines() + +inst = "" + +header = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand +#pragma once + +#include + +struct lua_State; +struct Closure; +typedef uint32_t Instruction; +typedef struct lua_TValue TValue; +typedef TValue* StkId; + +""" + +source = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details +// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand +#include "Fallbacks.h" +#include "FallbacksProlog.h" + +""" + +function = "" + +state = 0 + +# parse with the state machine +for line in input: + # find the start of an instruction + if state == 0: + match = re.match("\s+VM_CASE\((LOP_[A-Z_0-9]+)\)", line) + + if match: + inst = match[1] + signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)" + header += signature + ";\n" + function = signature + "\n" + state = 1 + + # find the end of an instruction + elif state == 1: + # remove jumps back into the native code + if line == "#if LUA_CUSTOM_EXECUTION\n": + state = 2 + continue + + if line[0] == ' ': + finalline = line[12:-1] + "\n" + else: + finalline = line + + finalline = finalline.replace("VM_NEXT();", "return pc;"); + finalline = finalline.replace("goto exit;", "return NULL;"); + finalline = finalline.replace("return;", "return NULL;"); + + function += finalline + match = re.match(" }", line) + + if match: + # break is not supported + if inst == "LOP_BREAK": + function = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)\n" + function += "{\n LUAU_ASSERT(!\"Unsupported deprecated opcode\");\n LUAU_UNREACHABLE();\n}\n" + # handle fallthrough + elif inst == "LOP_NAMECALL": + function = function[:-len(finalline)] + function += " return pc;\n}\n" + + source += function + "\n" + state = 0 + + # skip LUA_CUSTOM_EXECUTION code blocks + elif state == 2: + if line == "#endif\n": + state = 3 + continue + + # skip extra line + elif state == 3: + state = 1 + +# make sure we found the ending +assert(state == 0) + +with open("Fallbacks.h", "w") as fp: + fp.writelines(header) + +with open("Fallbacks.cpp", "w") as fp: + fp.writelines(source)