diff --git a/Source/DafnyCore/AST/ExtremeCloner.cs b/Source/DafnyCore/AST/ExtremeCloner.cs index d4d725e82e3..c5d24c32921 100644 --- a/Source/DafnyCore/AST/ExtremeCloner.cs +++ b/Source/DafnyCore/AST/ExtremeCloner.cs @@ -17,7 +17,7 @@ protected ExtremeCloner(Expression k, ErrorReporter reporter) { Contract.Requires(reporter != null); this.k = k; this.reporter = reporter; - this.suffix = string.Format("#[{0}]", Printer.ExprToString(reporter.Options, k)); + this.suffix = $"#[{Printer.ExprToString(reporter.Options, k)}]"; } protected Expression CloneCallAndAddK(ApplySuffix e) { Contract.Requires(e != null); @@ -63,4 +63,16 @@ protected Expression CloneCallAndAddK(FunctionCallExpr e) { reporter.Info(MessageSource.Cloner, e.tok, e.Name + suffix); return fexp; } + + protected Expression CloneEqualityAndAddK(BinaryExpr binaryExpr) { + if (this.CloneResolvedFields) { + throw new NotImplementedException(); + } + + var eq = new TernaryExpr(Tok(binaryExpr.tok), + binaryExpr.ResolvedOp == BinaryExpr.ResolvedOpcode.EqCommon ? TernaryExpr.Opcode.PrefixEqOp : TernaryExpr.Opcode.PrefixNeqOp, + k, CloneExpr(binaryExpr.E0), CloneExpr(binaryExpr.E1)); + reporter.Info(MessageSource.Cloner, binaryExpr.tok, "==" + suffix); + return eq; + } } diff --git a/Source/DafnyCore/AST/ExtremeLemmaBodyCloner.cs b/Source/DafnyCore/AST/ExtremeLemmaBodyCloner.cs index f71835f6ce2..98a3033e6d6 100644 --- a/Source/DafnyCore/AST/ExtremeLemmaBodyCloner.cs +++ b/Source/DafnyCore/AST/ExtremeLemmaBodyCloner.cs @@ -11,42 +11,31 @@ namespace Microsoft.Dafny; class ExtremeLemmaBodyCloner : ExtremeCloner { readonly ExtremeLemma context; readonly ISet focalPredicates; - public ExtremeLemmaBodyCloner(ExtremeLemma context, Expression k, ISet focalPredicates, ErrorReporter reporter) + readonly ISet focalCodatatypeEquality; + public ExtremeLemmaBodyCloner(ExtremeLemma context, Expression k, + ISet focalPredicates, ISet focalCodatatypeEquality, ErrorReporter reporter) : base(k, reporter) { Contract.Requires(context != null); Contract.Requires(k != null); Contract.Requires(reporter != null); this.context = context; this.focalPredicates = focalPredicates; + this.focalCodatatypeEquality = focalCodatatypeEquality; } public override Expression CloneExpr(Expression expr) { if (reporter.Options.RewriteFocalPredicates) { - if (expr is FunctionCallExpr) { - var e = (FunctionCallExpr)expr; -#if DEBUG_PRINT - if (e.Function.Name.EndsWith("#") && Contract.Exists(focalPredicates, p => e.Function.Name == p.Name + "#")) { - Options.Writer.WriteLine("{0}({1},{2}): DEBUG: Possible opportunity to rely on new rewrite: {3}", e.tok.filename, e.tok.line, e.tok.col, Printer.ExprToString(e)); - } -#endif + if (expr is FunctionCallExpr functionCallExpr) { // Note, we don't actually ever get here, because all calls will have been parsed as ApplySuffix. // However, if something changes in the future (for example, some rewrite that changing an ApplySuffix // to its resolved FunctionCallExpr), then we do want this code, so with the hope of preventing // some error in the future, this case is included. (Of course, it is currently completely untested!) - var f = e.Function as ExtremePredicate; - if (f != null && focalPredicates.Contains(f)) { -#if DEBUG_PRINT - var r = CloneCallAndAddK(e); - Options.Writer.WriteLine("{0}({1},{2}): DEBUG: Rewrote extreme predicate into prefix predicate: {3}", e.tok.filename, e.tok.line, e.tok.col, Printer.ExprToString(r)); - return r; -#else - return CloneCallAndAddK(e); -#endif + if (functionCallExpr.Function is ExtremePredicate f && focalPredicates.Contains(f)) { + return CloneCallAndAddK(functionCallExpr); } } else if (expr is StaticReceiverExpr ee) { return new StaticReceiverExpr(Tok(ee.tok), ee.Type, ee.IsImplicit); - } else if (expr is ApplySuffix) { - var apply = (ApplySuffix)expr; + } else if (expr is ApplySuffix apply) { if (!apply.WasResolved()) { // Since we're assuming the enclosing statement to have been resolved, this ApplySuffix must // be part of an ExprRhs that actually designates a method call. Such an ApplySuffix does @@ -54,56 +43,46 @@ public override Expression CloneExpr(Expression expr) { var mse = (MemberSelectExpr)apply.Lhs.Resolved; Contract.Assume(mse.Member is Method); } else { - var fce = apply.Resolved as FunctionCallExpr; - if (fce != null) { -#if DEBUG_PRINT - if (fce.Function.Name.EndsWith("#") && Contract.Exists(focalPredicates, p => fce.Function.Name == p.Name + "#")) { - Options.Writer.WriteLine("{0}({1},{2}): DEBUG: Possible opportunity to rely on new rewrite: {3}", fce.tok.filename, fce.tok.line, fce.tok.col, Printer.ExprToString(fce)); - } -#endif - var f = fce.Function as ExtremePredicate; - if (f != null && focalPredicates.Contains(f)) { -#if DEBUG_PRINT - var r = CloneCallAndAddK(fce); - Options.Writer.WriteLine("{0}({1},{2}): DEBUG: Rewrote extreme predicate into prefix predicate: {3}", fce.tok.filename, fce.tok.line, fce.tok.col, Printer.ExprToString(r)); - return r; -#else + if (apply.Resolved is FunctionCallExpr fce) { + if (fce.Function is ExtremePredicate f && focalPredicates.Contains(f)) { return CloneCallAndAddK(fce); -#endif } } } + } else if (expr is BinaryExpr { ResolvedOp: BinaryExpr.ResolvedOpcode.EqCommon or BinaryExpr.ResolvedOpcode.NeqCommon } binaryExpr) { + if (binaryExpr.E0.Type.AsCoDatatype is { } coDatatypeDecl && focalCodatatypeEquality.Contains(coDatatypeDecl)) { + return CloneEqualityAndAddK(binaryExpr); + } } } return base.CloneExpr(expr); } public override AssignmentRhs CloneRHS(AssignmentRhs rhs) { - var r = rhs as ExprRhs; - if (r != null && r.Expr is ApplySuffix) { - var apply = (ApplySuffix)r.Expr; - var mse = apply.Lhs.Resolved as MemberSelectExpr; - if (mse != null && mse.Member is ExtremeLemma && ModuleDefinition.InSameSCC(context, (ExtremeLemma)mse.Member)) { + if (rhs is ExprRhs { Expr: ApplySuffix apply }) { + if (apply.Lhs.Resolved is MemberSelectExpr { Member: ExtremeLemma extremeLemma } && ModuleDefinition.InSameSCC(context, extremeLemma)) { // we're looking at a recursive call to an extreme lemma - Contract.Assert(apply.Lhs is NameSegment || apply.Lhs is ExprDotName); // this is the only way a call statement can have been parsed + Contract.Assert(apply.Lhs is NameSegment or ExprDotName); // this is the only way a call statement can have been parsed // clone "apply.Lhs", changing the least/greatest lemma to the prefix lemma; then clone "apply", adding in the extra argument Expression lhsClone; if (apply.Lhs is NameSegment) { var lhs = (NameSegment)apply.Lhs; - lhsClone = new NameSegment(Tok(lhs.tok), lhs.Name + "#", lhs.OptTypeArguments == null ? null : lhs.OptTypeArguments.ConvertAll(CloneType)); + lhsClone = new NameSegment(Tok(lhs.tok), lhs.Name + "#", lhs.OptTypeArguments?.ConvertAll(CloneType)); } else { var lhs = (ExprDotName)apply.Lhs; - lhsClone = new ExprDotName(Tok(lhs.tok), CloneExpr(lhs.Lhs), lhs.SuffixName + "#", lhs.OptTypeArguments == null ? null : lhs.OptTypeArguments.ConvertAll(CloneType)); + lhsClone = new ExprDotName(Tok(lhs.tok), CloneExpr(lhs.Lhs), lhs.SuffixName + "#", lhs.OptTypeArguments?.ConvertAll(CloneType)); } + var args = new List(); args.Add(new ActualBinding(null, k)); apply.Bindings.ArgumentBindings.ForEach(arg => args.Add(CloneActualBinding(arg))); var applyClone = new ApplySuffix(Tok(apply.tok), apply.AtTok == null ? null : Tok(apply.AtTok), lhsClone, args, Tok(apply.CloseParen)); var c = new ExprRhs(applyClone, CloneAttributes(rhs.Attributes)); - reporter.Info(MessageSource.Cloner, apply.Lhs.tok, mse.Member.Name + suffix); + reporter.Info(MessageSource.Cloner, apply.Lhs.tok, extremeLemma.Name + suffix); return c; } } + return base.CloneRHS(rhs); } } diff --git a/Source/DafnyCore/AST/Grammar/Printer/Printer.cs b/Source/DafnyCore/AST/Grammar/Printer/Printer.cs index 87afafd59a9..4d1d3c25d03 100644 --- a/Source/DafnyCore/AST/Grammar/Printer/Printer.cs +++ b/Source/DafnyCore/AST/Grammar/Printer/Printer.cs @@ -78,6 +78,14 @@ public static string ExprToString(DafnyOptions options, Expression expr, [CanBeN return wr.ToString(); } + public static string ExprListToString(DafnyOptions options, List expressions, [CanBeNull] PrintFlags printFlags = null) { + Contract.Requires(expressions != null); + using var wr = new StringWriter(); + var pr = new Printer(wr, options, printFlags: printFlags); + pr.PrintExpressionList(expressions, false); + return wr.ToString(); + } + public static string GuardToString(DafnyOptions options, bool isBindingGuard, Expression expr) { Contract.Requires(!isBindingGuard || (expr is ExistsExpr && ((ExistsExpr)expr).Range == null)); using (var wr = new System.IO.StringWriter()) { diff --git a/Source/DafnyCore/Backends/Rust/Dafny-compiler-rust-proofs.dfy b/Source/DafnyCore/Backends/Rust/Dafny-compiler-rust-proofs.dfy index 9b7e31813f0..a1110275254 100644 --- a/Source/DafnyCore/Backends/Rust/Dafny-compiler-rust-proofs.dfy +++ b/Source/DafnyCore/Backends/Rust/Dafny-compiler-rust-proofs.dfy @@ -136,6 +136,7 @@ module {:extern "DafnyToRustCompilerProofs"} {:compile false} DafnyToRustCompile if i[0] == '_' { assert |i| >= 2 && i[1] in "_qkh" && IsDafnyEncodedIdTail(i[2..]); + assert [i[0]] + replaceDots(i[1..]) == [i[0]] + [i[1]] + replaceDots(i[2..]); ReplaceDotsInvertible(i[2..]); assert ReverseReplaceDots(replaceDots(i)) == i; } else { diff --git a/Source/DafnyCore/Resolver/CollectFriendlyCallsInSpec_Visitor.cs b/Source/DafnyCore/Resolver/CollectFriendlyCallsInSpec_Visitor.cs index 76bccae2139..02b9ba438c1 100644 --- a/Source/DafnyCore/Resolver/CollectFriendlyCallsInSpec_Visitor.cs +++ b/Source/DafnyCore/Resolver/CollectFriendlyCallsInSpec_Visitor.cs @@ -19,9 +19,8 @@ protected override bool VisitOneExpr(Expression expr, ref CallingPosition cp) { // no friendly calls in "expr" return false; // don't recurse into subexpressions } - if (expr is FunctionCallExpr) { + if (expr is FunctionCallExpr fexp) { if (cp == CallingPosition.Positive) { - var fexp = (FunctionCallExpr)expr; if (IsCoContext ? fexp.Function is GreatestPredicate : fexp.Function is LeastPredicate) { if (Context.KNat != ((ExtremePredicate)fexp.Function).KNat) { KNatMismatchError(expr.tok, Context.Name, Context.TypeOfK, ((ExtremePredicate)fexp.Function).TypeOfK); @@ -31,8 +30,7 @@ protected override bool VisitOneExpr(Expression expr, ref CallingPosition cp) { } } return false; // don't explore subexpressions any further - } else if (expr is BinaryExpr && IsCoContext) { - var bin = (BinaryExpr)expr; + } else if (expr is BinaryExpr bin && IsCoContext) { if (cp == CallingPosition.Positive && bin.ResolvedOp == BinaryExpr.ResolvedOpcode.EqCommon && bin.E0.Type.IsCoDatatype) { friendlyCalls.Add(bin); return false; // don't explore subexpressions any further diff --git a/Source/DafnyCore/Resolver/ModuleResolver.cs b/Source/DafnyCore/Resolver/ModuleResolver.cs index ae9de04f6b2..91398372d1d 100644 --- a/Source/DafnyCore/Resolver/ModuleResolver.cs +++ b/Source/DafnyCore/Resolver/ModuleResolver.cs @@ -174,7 +174,7 @@ public void ComputeIsRecursiveBit(CompilationData compilation, ModuleDefinition } foreach (var rewriter in rewriters) { - rewriter.PostCyclicityResolve(module, Reporter); + rewriter.PostCyclicityResolve(module); } } @@ -1685,6 +1685,7 @@ private void FillInPostConditionsAndBodiesOfPrefixLemmas(List decl var k = prefixLemma.Ins[0]; var focalPredicates = new HashSet(); + var focalCodatatypeEquality = new HashSet(); if (com is GreatestLemma) { // compute the postconditions of the prefix lemma Contract.Assume(prefixLemma.Ens.Count == 0); // these are not supposed to have been filled in before @@ -1696,19 +1697,19 @@ private void FillInPostConditionsAndBodiesOfPrefixLemmas(List decl var post = subst.CloneExpr(p.E); prefixLemma.Ens.Add(new AttributedExpression(post)); foreach (var e in coConclusions) { - var fce = e as FunctionCallExpr; - if (fce != null) { - // the other possibility is that "e" is a BinaryExpr + if (e is FunctionCallExpr fce) { GreatestPredicate predicate = (GreatestPredicate)fce.Function; focalPredicates.Add(predicate); // For every focal predicate P in S, add to S all greatest predicates in the same strongly connected // component (in the call graph) as P - foreach (var node in predicate.EnclosingClass.EnclosingModuleDefinition.CallGraph.GetSCC( - predicate)) { - if (node is GreatestPredicate) { - focalPredicates.Add((GreatestPredicate)node); + foreach (var node in predicate.EnclosingClass.EnclosingModuleDefinition.CallGraph.GetSCC(predicate)) { + if (node is GreatestPredicate greatestPredicate) { + focalPredicates.Add(greatestPredicate); } } + } else { + var binExpr = (BinaryExpr)e; // each "coConclusion" is either a FunctionCallExpr or a BinaryExpr + focalCodatatypeEquality.Add(binExpr.E0.Type.AsCoDatatype ?? binExpr.E1.Type.AsCoDatatype); } } } @@ -1729,24 +1730,29 @@ private void FillInPostConditionsAndBodiesOfPrefixLemmas(List decl // For every focal predicate P in S, add to S all least predicates in the same strongly connected // component (in the call graph) as P foreach (var node in predicate.EnclosingClass.EnclosingModuleDefinition.CallGraph.GetSCC(predicate)) { - if (node is LeastPredicate) { - focalPredicates.Add((LeastPredicate)node); + if (node is LeastPredicate leastPredicate) { + focalPredicates.Add(leastPredicate); } } } } } - reporter.Info(MessageSource.Resolver, com.tok, - focalPredicates.Count == 0 - ? $"{com.PrefixLemma.Name} has no focal predicates" - : $"{com.PrefixLemma.Name} with focal predicate{Util.Plural(focalPredicates.Count)} {Util.Comma(focalPredicates, p => p.Name)}"); + var focalCount = focalPredicates.Count + focalCodatatypeEquality.Count; + if (focalCount == 0) { + reporter.Info(MessageSource.Resolver, com.tok, $"{com.PrefixLemma.Name} has no focal predicates"); + } else { + var predicates = Util.Comma(focalPredicates, p => p.Name); + var equalities = Util.Comma(focalCodatatypeEquality, decl => $"{decl.Name}.=="); + var focals = predicates + (predicates.Length != 0 && equalities.Length != 0 ? ", " : "") + equalities; + reporter.Info(MessageSource.Resolver, com.tok, $"{com.PrefixLemma.Name} with focal predicate{Util.Plural(focalCount)} {focals}"); + } // Compute the statement body of the prefix lemma Contract.Assume(prefixLemma.Body == null); // this is not supposed to have been filled in before if (com.Body != null) { var kMinusOne = new BinaryExpr(com.tok, BinaryExpr.Opcode.Sub, new IdentifierExpr(k.tok, k.Name), new LiteralExpr(com.tok, 1)); - var subst = new ExtremeLemmaBodyCloner(com, kMinusOne, focalPredicates, this.reporter); + var subst = new ExtremeLemmaBodyCloner(com, kMinusOne, focalPredicates, focalCodatatypeEquality, this.reporter); var mainBody = subst.CloneBlockStmt(com.Body); Expression kk; Statement els; diff --git a/Source/DafnyCore/Rewriters/IRewriter.cs b/Source/DafnyCore/Rewriters/IRewriter.cs index 0a16b6fd953..8fd56e4bfef 100644 --- a/Source/DafnyCore/Rewriters/IRewriter.cs +++ b/Source/DafnyCore/Rewriters/IRewriter.cs @@ -76,7 +76,7 @@ internal virtual void PostResolveIntermediate(ModuleDefinition moduleDefinition) /// A module definition after it /// is resolved, type-checked and SCC/Cyclicity/Recursivity have been performed /// - internal virtual void PostCyclicityResolve(ModuleDefinition moduleDefinition, ErrorReporter errorReporter) { + internal virtual void PostCyclicityResolve(ModuleDefinition moduleDefinition) { Contract.Requires(moduleDefinition != null); } diff --git a/Source/DafnyCore/Rewriters/InductionHeuristic.cs b/Source/DafnyCore/Rewriters/InductionHeuristic.cs index 3787ab4303b..5afadabe5aa 100644 --- a/Source/DafnyCore/Rewriters/InductionHeuristic.cs +++ b/Source/DafnyCore/Rewriters/InductionHeuristic.cs @@ -1,3 +1,4 @@ +#nullable enable using System.Diagnostics.Contracts; namespace Microsoft.Dafny; @@ -6,6 +7,8 @@ public static class InductionHeuristic { /// /// Returns 'true' iff by looking at 'expr' the Induction Heuristic determines that induction should be applied to 'n'. + /// Variable 'n' can be passed in as 'null', in which case it stands for 'this'. + /// /// More precisely: /// DafnyInductionHeuristic Return 'true' /// ----------------------- ------------- @@ -17,10 +20,10 @@ public static class InductionHeuristic { /// 5 if 'n' occurs as a prominent subexpression of any argument to a recursive function /// 6 if 'n' occurs as a prominent subexpression of any decreases-influencing argument to a recursive function /// - public static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Expression expr, IVariable n) { + public static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Expression expr, IVariable? n) { switch (options.InductionHeuristic) { case 0: return true; - case 1: return FreeVariablesUtil.ContainsFreeVariable(expr, false, n); + case 1: return FreeVariablesUtil.ContainsFreeVariable(expr, n == null, n); default: return VarOccursInArgumentToRecursiveFunction(options, expr, n, false); } } @@ -29,17 +32,20 @@ public static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, /// Worker routine for VarOccursInArgumentToRecursiveFunction(expr,n), where the additional parameter 'exprIsProminent' says whether or /// not 'expr' has prominent status in its context. /// DafnyInductionHeuristic cases 0 and 1 are assumed to be handled elsewhere (i.e., a precondition of this method is DafnyInductionHeuristic is at least 2). + /// Variable 'n' can be passed in as 'null', in which case it stands for 'this'. /// - static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Expression expr, IVariable n, bool exprIsProminent) { + static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Expression expr, IVariable? n, bool exprIsProminent) { Contract.Requires(expr != null); Contract.Requires(n != null); // The following variable is what gets passed down to recursive calls if the subexpression does not itself acquire prominent status. var subExprIsProminent = options.InductionHeuristic == 2 || options.InductionHeuristic == 4 ? /*once prominent, always prominent*/exprIsProminent : /*reset the prominent status*/false; - if (expr is IdentifierExpr) { + if (n != null && expr is IdentifierExpr) { var e = (IdentifierExpr)expr; return exprIsProminent && e.Var == n; + } else if (n == null && expr is ThisExpr) { + return exprIsProminent; } else if (expr is SeqSelectExpr) { var e = (SeqSelectExpr)expr; var q = options.InductionHeuristic < 4 || subExprIsProminent; @@ -108,7 +114,7 @@ static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Express } } else if (expr is DatatypeValue) { var e = (DatatypeValue)expr; - var q = n.Type.IsDatatype ? exprIsProminent : subExprIsProminent; // prominent status continues, if we're looking for a variable whose type is a datatype + var q = n != null && n.Type.IsDatatype ? exprIsProminent : subExprIsProminent; // prominent status continues, if we're looking for a variable whose type is a datatype return e.Arguments.Exists(exp => VarOccursInArgumentToRecursiveFunction(options, exp, n, q)); } else if (expr is UnaryExpr) { var e = (UnaryExpr)expr; @@ -145,7 +151,7 @@ static bool VarOccursInArgumentToRecursiveFunction(DafnyOptions options, Express } else if (expr is StmtExpr) { var e = (StmtExpr)expr; // ignore the statement - return VarOccursInArgumentToRecursiveFunction(options, e.E, n); + return VarOccursInArgumentToRecursiveFunction(options, e.E, n, exprIsProminent); } else if (expr is ITEExpr) { var e = (ITEExpr)expr; diff --git a/Source/DafnyCore/Rewriters/InductionRewriter.cs b/Source/DafnyCore/Rewriters/InductionRewriter.cs index 03b8e06f0ba..d5ad7866664 100644 --- a/Source/DafnyCore/Rewriters/InductionRewriter.cs +++ b/Source/DafnyCore/Rewriters/InductionRewriter.cs @@ -1,5 +1,7 @@ using System.Collections.Generic; using System.Diagnostics.Contracts; +using System.Linq; +using JetBrains.Annotations; using static Microsoft.Dafny.RewriterErrors; namespace Microsoft.Dafny; @@ -40,12 +42,9 @@ internal override void PostDecreasesResolve(ModuleDefinition m) { } } - if (decl is NewtypeDecl) { - var nt = (NewtypeDecl)decl; - if (nt.Constraint != null) { - var visitor = new Induction_Visitor(this); - visitor.Visit(nt.Constraint); - } + if (decl is NewtypeDecl { Constraint: { } constraint }) { + var visitor = new InductionVisitor(this); + visitor.Visit(constraint); } } } @@ -53,7 +52,7 @@ internal override void PostDecreasesResolve(ModuleDefinition m) { void ProcessMethodExpressions(Method method) { Contract.Requires(method != null); - var visitor = new Induction_Visitor(this); + var visitor = new InductionVisitor(this); method.Req.ForEach(mfe => visitor.Visit(mfe.E)); method.Ens.ForEach(mfe => visitor.Visit(mfe.E)); if (method.Body != null) { @@ -63,7 +62,7 @@ void ProcessMethodExpressions(Method method) { void ProcessFunctionExpressions(Function function) { Contract.Requires(function != null); - var visitor = new Induction_Visitor(this); + var visitor = new InductionVisitor(this); function.Req.ForEach(visitor.Visit); function.Ens.ForEach(visitor.Visit); if (function.Body != null) { @@ -73,20 +72,31 @@ void ProcessFunctionExpressions(Function function) { void ComputeLemmaInduction(Method method) { Contract.Requires(method != null); - if (method.Body != null && method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && - !(method is ExtremeLemma)) { - var specs = new List(); - method.Req.ForEach(mfe => specs.Add(mfe.E)); - method.Ens.ForEach(mfe => specs.Add(mfe.E)); - ComputeInductionVariables(method.tok, method.Ins, specs, method, ref method.Attributes); + if (method is { IsGhost: true, AllowsAllocation: false, Outs: { Count: 0 }, Body: not null } and not ExtremeLemma) { + Expression pre = Expression.CreateBoolLiteral(method.tok, true); + foreach (var req in method.Req) { + pre = Expression.CreateAnd(pre, req.E); + } + + Expression post = Expression.CreateBoolLiteral(method.tok, true); + foreach (var ens in method.Ens) { + post = Expression.CreateAnd(post, ens.E); + } + + ComputeInductionVariables(method.tok, method.Ins, Expression.CreateImplies(pre, post), method, ref method.Attributes); } } - void ComputeInductionVariables(IToken tok, List boundVars, List searchExprs, - Method lemma, ref Attributes attributes) where VarType : class, IVariable { + /// + /// Look at the command-line options and any {:induction} attribute to determine a good list of induction + /// variables. If there are any, then record them in an attribute {:_induction ...} added to "attributes". + /// "body" is the condition that the induction would support. + /// + void ComputeInductionVariables(IToken tok, List boundVars, Expression body, + [CanBeNull] Method lemma, ref Attributes attributes) where TVarType : class, IVariable { Contract.Requires(tok != null); Contract.Requires(boundVars != null); - Contract.Requires(searchExprs != null); + Contract.Requires(body != null); Contract.Requires(Reporter.Options.Induction != 0); var args = Attributes.FindExpressions(attributes, @@ -106,9 +116,9 @@ void ComputeInductionVariables(IToken tok, List boundVars, Lis } else if (args.Count == 0) { // {:induction} is treated the same as {:induction true}, which says to automatically infer induction variables // GO INFER below (all boundVars) - } else if (args.Count == 1 && args[0] is LiteralExpr && ((LiteralExpr)args[0]).Value is bool) { + } else if (args.Count == 1 && args[0] is LiteralExpr { Value: bool and var boolValue }) { // {:induction false} or {:induction true} - if (!(bool)((LiteralExpr)args[0]).Value) { + if (!boolValue) { // we're told not to infer anything return; } @@ -117,12 +127,11 @@ void ComputeInductionVariables(IToken tok, List boundVars, Lis // Here, we're expecting the arguments to {:induction args} to be a sublist of "this;boundVars", where "this" is allowed only // if "lemma" denotes an instance lemma. var goodArguments = new List(); - var i = lemma != null && !lemma.IsStatic + var i = lemma is { IsStatic: false } ? -1 : 0; // -1 says it's okay to see "this" or any other parameter; 0 <= i says it's okay to see parameter i or higher foreach (var arg in args) { - var ie = arg.Resolved as IdentifierExpr; - if (ie != null) { + if (arg.Resolved is IdentifierExpr ie) { var j = boundVars.FindIndex(v => v == ie.Var); if (0 <= j && i <= j) { goodArguments.Add(ie); @@ -156,27 +165,49 @@ void ComputeInductionVariables(IToken tok, List boundVars, Lis return; } - // The argument list was legal, so let's use it for the _induction attribute + // The argument list was legal, so let's use it for the _induction attribute. + // Next, look for matching patterns for the induction hypothesis. + if (lemma != null) { + var triggers = ComputeInductionTriggers(goodArguments, body, lemma.EnclosingClass.EnclosingModuleDefinition, tok, attributes); + ReportInductionTriggers(lemma, ref attributes, triggers); + } + attributes = new Attributes("_induction", goodArguments, attributes); + return; } // Okay, here we go, coming up with good induction setting for the given situation var inductionVariables = new List(); - if (lemma != null && !lemma.IsStatic) { - if (args != null || searchExprs.Exists(expr => FreeVariablesUtil.ContainsFreeVariable(expr, true, null))) { + if (lemma is { IsStatic: false }) { + if (args != null || InductionHeuristic.VarOccursInArgumentToRecursiveFunction(Reporter.Options, body, null)) { inductionVariables.Add(new ThisExpr(lemma)); } } foreach (IVariable n in boundVars) { - if (!(n.Type.IsTypeParameter || n.Type.IsAbstractType || n.Type.IsInternalTypeSynonym) && (args != null || - searchExprs.Exists(expr => InductionHeuristic.VarOccursInArgumentToRecursiveFunction(Reporter.Options, expr, n)))) { + if (!(n.Type.IsTypeParameter || n.Type.IsAbstractType || n.Type.IsInternalTypeSynonym || n.Type.IsArrowType) && + (args != null || InductionHeuristic.VarOccursInArgumentToRecursiveFunction(Reporter.Options, body, n))) { inductionVariables.Add(new IdentifierExpr(n.Tok, n)); } } if (inductionVariables.Count != 0) { + List> triggers = null; + if (lemma != null) { + // Compute the induction triggers, but don't report their patterns into attributes yet. Instead, + // call ReportInductionTriggers only after the "_induction" attribute has been added. This will cause the + // tooltips to appear in a logical order (showing the induction variables first, followed by the matching patterns). + triggers = ComputeInductionTriggers(inductionVariables, body, lemma.EnclosingClass.EnclosingModuleDefinition, + args != null ? tok : null, attributes); + if (triggers.Count == 0 && args == null) { + // The user didn't ask for induction. But since there were candidate induction variables, report an informational message. + var candidates = $"candidate{Util.Plural(inductionVariables.Count)} {Printer.ExprListToString(Reporter.Options, inductionVariables)}"; + Reporter.Info(MessageSource.Rewriter, tok, $"omitting automatic induction (for induction-variable {candidates}) because of lack of triggers"); + return; + } + } + // We found something usable, so let's record that in an attribute attributes = new Attributes("_induction", inductionVariables, attributes); // And since we're inferring something, let's also report that in a hover text. @@ -184,24 +215,106 @@ void ComputeInductionVariables(IToken tok, List boundVars, Lis if (lemma is PrefixLemma) { s = lemma.Name + " " + s; } - Reporter.Info(MessageSource.Rewriter, tok, s); + + if (triggers != null) { + ReportInductionTriggers(lemma, ref attributes, triggers); + } + } + } + + /// + /// Report as tooltips the matching patterns selected for the induction hypothesis. + /// + private void ReportInductionTriggers(Method lemma, ref Attributes attributes, List> triggers) { + foreach (var trigger in triggers) { + attributes = new Attributes("_inductionPattern", trigger, attributes); +#if DEBUG + var ss = Printer.OneAttributeToString(Reporter.Options, attributes, "inductionPattern"); + if (lemma is PrefixLemma) { + ss = lemma.Name + " " + ss; + } + + Reporter.Info(MessageSource.Rewriter, lemma.tok, ss); +#endif + } + } + + /// + /// Obtain and return matching patterns for + /// (forall inductionVariables :: body) + /// If there aren't any, then return an empty list. + /// + /// If "errorToken" is non-null and there are no matching patterns, then a warning/info message is emitted. + /// The selection between warning vs info is done by looking for a {:nowarn} attribute among "attributes". + /// + List> ComputeInductionTriggers(List inductionVariables, Expression body, ModuleDefinition moduleDefinition, + [CanBeNull] IToken errorToken, Attributes attributes) { + Contract.Requires(inductionVariables.Count != 0); + + // Construct a quantifier, because that's what the trigger-generating machinery expects. + // We start by creating a new BoundVar for each ThisExpr-or-IdentifierExpr in "inductionVariables". + var boundVars = new List(); + var substMap = new Dictionary(); + var reverseSubstMap = new Dictionary(); + Expression receiverReplacement = null; + foreach (var inductionVariableExpr in inductionVariables) { + var tok = inductionVariableExpr.tok; + BoundVar boundVar; + if (inductionVariableExpr is IdentifierExpr identifierExpr) { + boundVar = new BoundVar(tok, identifierExpr.Var.Name, identifierExpr.Var.Type); + substMap.Add(identifierExpr.Var, new IdentifierExpr(tok, boundVar)); + } else { + Contract.Assert(inductionVariableExpr is ThisExpr); + boundVar = new BoundVar(tok, "this", inductionVariableExpr.Type); + receiverReplacement = new IdentifierExpr(tok, boundVar); + } + boundVars.Add(boundVar); + reverseSubstMap.Add(boundVar, inductionVariableExpr); + } + + var substituter = new Substituter(receiverReplacement, substMap, new Dictionary()); + var quantifier = new ForallExpr(body.tok, body.RangeToken, boundVars, null, substituter.Substitute(body), null) { + Type = Type.Bool + }; + + var finder = new Triggers.QuantifierCollector(Reporter); + var triggersCollector = new Triggers.TriggersCollector(finder.exprsInOldContext, Reporter.Options, moduleDefinition); + var quantifierCollection = new Triggers.ComprehensionTriggerGenerator(quantifier, Enumerable.Repeat(quantifier, 1), Reporter); + quantifierCollection.ComputeTriggers(triggersCollector); + // Get the computed triggers, but only ask for those that do not require additional bound variables. (An alternative to this + // design would be to add {:matchinglooprewrite false} to "quantifier" above. However, that would cause certain matching loops + // to be ignored, so it is safer to not include triggers that require additional bound variables.) + var triggers = quantifierCollection.GetTriggers(false); + var reverseSubstituter = new Substituter(null, reverseSubstMap, new Dictionary()); + var result = triggers.ConvertAll(trigger => trigger.ConvertAll(reverseSubstituter.Substitute)); + + if (result.Count == 0 && errorToken != null) { + // The user explicitly asked for induction (with {:induction}, {:induction true}, or {:induction }). + // Respect this choice, but generate a warning that no triggers were found. + var suppressWarnings = Attributes.Contains(attributes, "nowarn"); + var warningLevel = suppressWarnings ? ErrorLevel.Info : ErrorLevel.Warning; + + Reporter.Message(MessageSource.Rewriter, warningLevel, null, errorToken, + "Could not find a trigger for the induction hypothesis. Without a trigger, this may cause brittle verification. " + + $"Change or remove the {{:induction}} attribute to generate a different induction hypothesis, or add {{:nowarn}} to silence this warning. " + + "For more information, see the section quantifier instantiation rules in the reference manual."); } + + return result; } - class Induction_Visitor : BottomUpVisitor { + class InductionVisitor : BottomUpVisitor { readonly InductionRewriter IndRewriter; - public Induction_Visitor(InductionRewriter inductionRewriter) { + public InductionVisitor(InductionRewriter inductionRewriter) { Contract.Requires(inductionRewriter != null); IndRewriter = inductionRewriter; } protected override void VisitOneExpr(Expression expr) { - var q = expr as QuantifierExpr; - if (q != null && q.SplitQuantifier == null) { - IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List() { q.LogicalBody() }, null, - ref q.Attributes); + if (expr is QuantifierExpr { SplitQuantifier: null } q) { + IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, q.LogicalBody(), null, ref q.Attributes); } } } diff --git a/Source/DafnyCore/Rewriters/TriggerGeneratingRewriter.cs b/Source/DafnyCore/Rewriters/TriggerGeneratingRewriter.cs index e8283ee85e0..e676be5dfcb 100644 --- a/Source/DafnyCore/Rewriters/TriggerGeneratingRewriter.cs +++ b/Source/DafnyCore/Rewriters/TriggerGeneratingRewriter.cs @@ -9,8 +9,8 @@ internal TriggerGeneratingRewriter(ErrorReporter reporter, SystemModuleManager s this.systemModuleManager = systemModuleManager; } - internal override void PostCyclicityResolve(ModuleDefinition definition, ErrorReporter reporter) { - var finder = new Triggers.QuantifierCollector(reporter); + internal override void PostCyclicityResolve(ModuleDefinition definition) { + var finder = new Triggers.QuantifierCollector(Reporter); foreach (var decl in ModuleDefinition.AllCallablesIncludingPrefixDeclarations(definition.TopLevelDecls)) { finder.Visit(decl, null); diff --git a/Source/DafnyCore/Triggers/ComprehensionTriggerGenerator.cs b/Source/DafnyCore/Triggers/ComprehensionTriggerGenerator.cs index be69c3977c4..6c62a1fec9d 100644 --- a/Source/DafnyCore/Triggers/ComprehensionTriggerGenerator.cs +++ b/Source/DafnyCore/Triggers/ComprehensionTriggerGenerator.cs @@ -122,7 +122,7 @@ bool DetectAndFilterLoopingCandidates() { // In addition, we ignore cases where the only differences between a trigger // and a trigger match are places where a variable is replaced with an // expression whose free variables do not intersect that of the quantifier - // in which that expression is found. For examples of this behavious, see + // in which that expression is found. For examples of this behavior, see // triggers/literals-do-not-cause-loops. // This ignoring logic is implemented by the CouldCauseLoops method. bool foundLoop = false; @@ -222,7 +222,7 @@ internal void CommitTriggers(SystemModuleManager systemModuleManager) { reporter.Message(MessageSource.Rewriter, ErrorLevel.Info, null, comprehension.Tok, $"Quantifier was split into {partWriters.Count} parts. " + "Better verification performance and error reporting may be obtained by splitting the quantifier in source. " + - $"For more information, see the section quantifier instantiation rules in the reference manual."); + "For more information, see the section quantifier instantiation rules in the reference manual."); } for (var index = 0; index < partWriters.Count; index++) { @@ -230,5 +230,18 @@ internal void CommitTriggers(SystemModuleManager systemModuleManager) { triggerWriter.CommitTrigger(reporter, partWriters.Count > 1 ? index : null, systemModuleManager); } } + + public List> GetTriggers(bool includeTriggersThatRequireNamedExpressions) { + var triggers = new List>(); + foreach (var triggerWriter in partWriters) { + if (includeTriggersThatRequireNamedExpressions || triggerWriter.NamedExpressions.Count == 0) { + foreach (var triggerCandidate in triggerWriter.Candidates) { + var trigger = triggerCandidate.Terms.ConvertAll(t => t.Expr); + triggers.Add(trigger); + } + } + } + return triggers; + } } } diff --git a/Source/DafnyCore/Triggers/QuantifiersCollector.cs b/Source/DafnyCore/Triggers/QuantifiersCollector.cs index 1f169e161ba..869640da766 100644 --- a/Source/DafnyCore/Triggers/QuantifiersCollector.cs +++ b/Source/DafnyCore/Triggers/QuantifiersCollector.cs @@ -7,6 +7,7 @@ using System.Text; using System.Collections.ObjectModel; using System.Diagnostics.Contracts; +using JetBrains.Annotations; namespace Microsoft.Dafny.Triggers { internal class QuantifierCollector : TopDownVisitor { @@ -20,21 +21,24 @@ public QuantifierCollector(ErrorReporter reporter) { this.reporter = reporter; } + public void AddComprehension(ComprehensionExpr comprehensionExpr, [CanBeNull] List splitQuantifier) { + quantifiers.Add(comprehensionExpr); + if (splitQuantifier != null) { + var collection = splitQuantifier.OfType(); + quantifierCollections.Add(new ComprehensionTriggerGenerator(comprehensionExpr, collection, reporter)); + quantifiers.UnionWith(splitQuantifier); + } else { + quantifierCollections.Add(new ComprehensionTriggerGenerator(comprehensionExpr, Enumerable.Repeat(comprehensionExpr, 1), reporter)); + } + } + protected override bool VisitOneExpr(Expression expr, ref OldExpr/*?*/ enclosingOldContext) { // only consider quantifiers that are not empty (Bound.Vars.Count > 0) if (expr is ComprehensionExpr e && e.BoundVars.Count > 0 && !quantifiers.Contains(e)) { if (e is SetComprehension or MapComprehension) { - quantifiers.Add(e); - quantifierCollections.Add(new ComprehensionTriggerGenerator(e, Enumerable.Repeat(e, 1), reporter)); + AddComprehension(e, null); } else if (e is QuantifierExpr quantifier) { - quantifiers.Add(quantifier); - if (quantifier.SplitQuantifier != null) { - var collection = quantifier.SplitQuantifier.Select(q => q as ComprehensionExpr).Where(q => q != null); - quantifierCollections.Add(new ComprehensionTriggerGenerator(e, collection, reporter)); - quantifiers.UnionWith(quantifier.SplitQuantifier); - } else { - quantifierCollections.Add(new ComprehensionTriggerGenerator(e, Enumerable.Repeat(quantifier, 1), reporter)); - } + AddComprehension(quantifier, quantifier.SplitQuantifier); } } diff --git a/Source/DafnyCore/Triggers/SplitPartTriggerWriter.cs b/Source/DafnyCore/Triggers/SplitPartTriggerWriter.cs index 4e5eedefb82..344742a2235 100644 --- a/Source/DafnyCore/Triggers/SplitPartTriggerWriter.cs +++ b/Source/DafnyCore/Triggers/SplitPartTriggerWriter.cs @@ -13,6 +13,7 @@ class SplitPartTriggerWriter { public List CandidateTerms { get; set; } public List Candidates { get; set; } private List RejectedCandidates { get; } + public List> NamedExpressions { get; } private List loopingMatches; private bool AllowsLoops { @@ -28,6 +29,7 @@ private bool AllowsLoops { internal SplitPartTriggerWriter(ComprehensionExpr comprehension) { this.Comprehension = comprehension; this.RejectedCandidates = new List(); + this.NamedExpressions = new(); } internal void TrimInvalidTriggers() { @@ -84,9 +86,7 @@ public bool RewriteMatchingLoop(ErrorReporter reporter, ModuleDefinition module) var entry = substMap.Find(x => ExprExtensions.ExpressionEq(sub, x.Item1)); if (entry == null) { var newBv = new BoundVar(sub.tok, "_t#" + substMap.Count, sub.Type); - var ie = new IdentifierExpr(sub.tok, newBv.Name); - ie.Var = newBv; - ie.Type = newBv.Type; + var ie = new IdentifierExpr(sub.tok, newBv.Name) { Var = newBv, Type = newBv.Type }; substMap.Add(new Tuple(sub, ie)); } } @@ -98,6 +98,7 @@ public bool RewriteMatchingLoop(ErrorReporter reporter, ModuleDefinition module) if (substMap.Count > 0) { var s = new ExprSubstituter(substMap); expr = s.Substitute(Comprehension) as QuantifierExpr; + NamedExpressions.AddRange(substMap); } else { // make a copy of the expr if (expr is ForallExpr) { @@ -187,14 +188,14 @@ string InfoFirstLineEnd(int count) { if (!CandidateTerms.Any() || !Candidates.Any()) { errorReporter.Message(MessageSource.Rewriter, warningLevel, null, reportingToken, - $"Could not find a trigger for this quantifier. Without a trigger, the quantifier may cause brittle verification. " + + "Could not find a trigger for this quantifier. Without a trigger, the quantifier may cause brittle verification. " + $"To silence this warning, add an explicit trigger using the {{:trigger}} attribute. " + - $"For more information, see the section quantifier instantiation rules in the reference manual."); + "For more information, see the section quantifier instantiation rules in the reference manual."); } else if (!CouldSuppressLoops && !AllowsLoops) { errorReporter.Message(MessageSource.Rewriter, warningLevel, null, reportingToken, - $"Triggers were added to this quantifier that may introduce matching loops, which may cause brittle verification. " + + "Triggers were added to this quantifier that may introduce matching loops, which may cause brittle verification. " + $"To silence this warning, add an explicit trigger using the {{:trigger}} attribute. " + - $"For more information, see the section quantifier instantiation rules in the reference manual."); + "For more information, see the section quantifier instantiation rules in the reference manual."); } } diff --git a/Source/DafnyCore/Triggers/TriggerExtensions.cs b/Source/DafnyCore/Triggers/TriggerExtensions.cs index e1417a6492a..6a79257962e 100644 --- a/Source/DafnyCore/Triggers/TriggerExtensions.cs +++ b/Source/DafnyCore/Triggers/TriggerExtensions.cs @@ -268,12 +268,7 @@ private static bool ShallowEq(WildcardExpr expr1, WildcardExpr expr2) { } private static bool ShallowEq(LambdaExpr expr1, LambdaExpr expr2) { -#if THROW_UNSUPPORTED_COMPARISONS - Contract.Assume(false); // This kind of expression never appears in a trigger - throw new NotImplementedException(); -#else - return false; -#endif + return true; } private static bool ShallowEq(MapComprehension expr1, MapComprehension expr2) { diff --git a/Source/DafnyCore/Triggers/TriggersCollector.cs b/Source/DafnyCore/Triggers/TriggersCollector.cs index a39c6ce38ed..a210052fcab 100644 --- a/Source/DafnyCore/Triggers/TriggersCollector.cs +++ b/Source/DafnyCore/Triggers/TriggersCollector.cs @@ -101,6 +101,7 @@ expr is ApplyExpr || expr is DisplayExpression || expr is MapDisplayExpr || expr is DatatypeValue || + expr is TernaryExpr || TranslateToFunctionCall(expr)) { return true; } else if (expr is BinaryExpr) { diff --git a/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs b/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs index 10d78a37498..4f4b707f9c9 100644 --- a/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs +++ b/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs @@ -1674,17 +1674,18 @@ public Boogie.Expr TrBoundVariables(List boundVars, List boundVars, List bvars, out Dictionary substMap, out Boogie.Trigger antitriggers) { + public Boogie.Expr TrBoundVariablesRename(List boundVars, List bvars, out Dictionary substMap) { Contract.Requires(boundVars != null); Contract.Requires(bvars != null); substMap = new Dictionary(); - antitriggers = null; Boogie.Expr typeAntecedent = Boogie.Expr.True; foreach (BoundVar bv in boundVars) { var newBoundVar = new BoundVar(bv.tok, bv.Name, bv.Type); - IdentifierExpr ie = new IdentifierExpr(newBoundVar.tok, newBoundVar.AssignUniqueName(BoogieGenerator.currentDeclaration.IdGenerator)); - ie.Var = newBoundVar; ie.Type = ie.Var.Type; // resolve ie here + IdentifierExpr ie = new IdentifierExpr(newBoundVar.tok, newBoundVar.AssignUniqueName(BoogieGenerator.currentDeclaration.IdGenerator)) { + Var = newBoundVar, + Type = newBoundVar.Type + }; substMap.Add(bv, ie); Boogie.Variable bvar = new Boogie.BoundVariable(newBoundVar.tok, new Boogie.TypedIdent(newBoundVar.tok, newBoundVar.AssignUniqueName(BoogieGenerator.currentDeclaration.IdGenerator), BoogieGenerator.TrType(newBoundVar.Type))); bvars.Add(bvar); diff --git a/Source/DafnyCore/Verifier/BoogieGenerator.Methods.cs b/Source/DafnyCore/Verifier/BoogieGenerator.Methods.cs index d6fc9b6adaf..7a893ade472 100644 --- a/Source/DafnyCore/Verifier/BoogieGenerator.Methods.cs +++ b/Source/DafnyCore/Verifier/BoogieGenerator.Methods.cs @@ -759,6 +759,7 @@ private StmtList TrMethodBody(Method m, BoogieStmtListBuilder builder, Variables foreach (var pre in m.Req) { parRange = Expression.CreateAnd(parRange, Substitute(pre.E, receiverSubst, substMap)); } + // construct an expression (generator) for: VF' << VF ExpressionConverter decrCheck = delegate (Dictionary decrSubstMap, ExpressionTranslator exprTran) { var decrToks = new List(); @@ -781,14 +782,17 @@ private StmtList TrMethodBody(Method m, BoogieStmtListBuilder builder, Variables null, null, false, true); }; + var triggers = Attributes.FindAllExpressions(m.Attributes, "_inductionPattern"); #if VERIFY_CORRECTNESS_OF_TRANSLATION_FORALL_STATEMENT_RANGE - var definedness = new BoogieStmtListBuilder(this, options); - var exporter = new BoogieStmtListBuilder(this, options); - TrForallStmtCall(m.tok, parBoundVars, parRange, decrCheck, null, recursiveCall, definedness, exporter, localVariables, etran); - // All done, so put the two pieces together - builder.Add(new Bpl.IfCmd(m.tok, null, definedness.Collect(m.tok), null, exporter.Collect(m.tok))); + var definedness = new BoogieStmtListBuilder(this, options, builder.Context); + var exporter = new BoogieStmtListBuilder(this, options, builder.Context); + TrForallStmtCall(m.tok, parBoundVars, parBounds, parRange, decrCheck, null, triggers, recursiveCall, definedness, + exporter, localVariables, etran); + // All done, so put the two pieces together + builder.Add(new Bpl.IfCmd(m.tok, null, definedness.Collect(m.tok), null, exporter.Collect(m.tok))); #else - TrForallStmtCall(m.tok, parBoundVars, parBounds, parRange, decrCheck, null, recursiveCall, null, builder, localVariables, etran); + TrForallStmtCall(m.tok, parBoundVars, parBounds, parRange, decrCheck, null, triggers, recursiveCall, null, + builder, localVariables, etran); #endif } // translate the body of the method diff --git a/Source/DafnyCore/Verifier/Statements/BoogieGenerator.TrForallStmt.cs b/Source/DafnyCore/Verifier/Statements/BoogieGenerator.TrForallStmt.cs index ab7e6ae4a58..20c4126c10d 100644 --- a/Source/DafnyCore/Verifier/Statements/BoogieGenerator.TrForallStmt.cs +++ b/Source/DafnyCore/Verifier/Statements/BoogieGenerator.TrForallStmt.cs @@ -40,12 +40,14 @@ private void TrForallStmt(BoogieStmtListBuilder builder, Variables locals, Expre } else { var s0 = (CallStmt)forallStmt.S0; if (Attributes.Contains(forallStmt.Attributes, "_trustWellformed")) { - TrForallStmtCall(forallStmt.Tok, forallStmt.BoundVars, forallStmt.Bounds, forallStmt.Range, null, forallStmt.EffectiveEnsuresClauses, s0, null, builder, locals, etran); + TrForallStmtCall(forallStmt.Tok, forallStmt.BoundVars, forallStmt.Bounds, forallStmt.Range, null, + forallStmt.EffectiveEnsuresClauses, null, s0, null, builder, locals, etran); } else { var definedness = new BoogieStmtListBuilder(this, options, builder.Context); DefineFuelConstant(forallStmt.Tok, forallStmt.Attributes, definedness, etran); var exporter = new BoogieStmtListBuilder(this, options, builder.Context); - TrForallStmtCall(forallStmt.Tok, forallStmt.BoundVars, forallStmt.Bounds, forallStmt.Range, null, forallStmt.EffectiveEnsuresClauses, s0, definedness, exporter, locals, etran); + TrForallStmtCall(forallStmt.Tok, forallStmt.BoundVars, forallStmt.Bounds, forallStmt.Range, null, + forallStmt.EffectiveEnsuresClauses, null, s0, definedness, exporter, locals, etran); // All done, so put the two pieces together builder.Add(new Bpl.IfCmd(forallStmt.Tok, null, definedness.Collect(forallStmt.Tok), null, exporter.Collect(forallStmt.Tok))); } @@ -71,13 +73,14 @@ private void TrForallStmt(BoogieStmtListBuilder builder, Variables locals, Expre void TrForallStmtCall(IToken tok, List boundVars, List bounds, - Expression range, ExpressionConverter additionalRange, List forallExpressions, CallStmt s0, + Expression range, ExpressionConverter additionalRange, List forallExpressions, List> triggers, CallStmt s0, BoogieStmtListBuilder definedness, BoogieStmtListBuilder exporter, Variables locals, ExpressionTranslator etran) { Contract.Requires(tok != null); Contract.Requires(boundVars != null); Contract.Requires(bounds != null); Contract.Requires(range != null); // additionalRange is allowed to be null + Contract.Requires(forallExpressions == null || triggers == null || triggers.Count == 0); Contract.Requires(s0 != null); // definedness is allowed to be null Contract.Requires(exporter != null); @@ -158,15 +161,14 @@ void TrForallStmtCall(IToken tok, List boundVars, List bo // Note, in the following, we need to do a bit of a song and dance. The actual arguments of the // call should be translated using "initEtran", whereas the method postcondition should be translated - // using "callEtran". To accomplish this, we translate the argument and then tuck the resulting + // using "callEtran". To accomplish this, we translate the arguments and then tuck the resulting // Boogie expressions into BoogieExprWrappers that are used in the DafnyExpr-to-DafnyExpr substitution. var bvars = new List(); Dictionary substMap; - Bpl.Trigger antitriggerBoundVarTypes; - Bpl.Expr ante; var argsSubstMap = new Dictionary(); // maps formal arguments to actuals Contract.Assert(s0.Method.Ins.Count == s0.Args.Count); var callEtran = new ExpressionTranslator(this, predef, etran.HeapExpr, initHeap, etran.scope); + Bpl.Expr ante; Bpl.Expr post = Bpl.Expr.True; Bpl.Trigger tr; if (forallExpressions != null) { @@ -176,29 +178,46 @@ void TrForallStmtCall(IToken tok, List boundVars, List bo expr = (QuantifierExpr)expr.SplitQuantifierExpression; } boundVars = expr.BoundVars; - ante = initEtran.TrBoundVariablesRename(boundVars, bvars, out substMap, out antitriggerBoundVarTypes); - ante = BplAnd(ante, initEtran.TrExpr(Substitute(expr.Range, null, substMap))); + ante = initEtran.TrBoundVariablesRename(boundVars, bvars, out substMap); + tr = TrTrigger(callEtran, expr.Attributes, expr.tok, bvars, substMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); + + var p = Substitute(expr.Range, null, substMap); + ante = BplAnd(ante, initEtran.TrExpr(p)); if (additionalRange != null) { ante = BplAnd(ante, additionalRange(substMap, initEtran)); } tr = TrTrigger(callEtran, expr.Attributes, expr.tok, bvars, substMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); post = callEtran.TrExpr(Substitute(expr.Term, null, substMap)); } else { - ante = initEtran.TrBoundVariablesRename(boundVars, bvars, out substMap, out antitriggerBoundVarTypes); - for (int i = 0; i < s0.Method.Ins.Count; i++) { - var arg = Substitute(s0.Args[i], null, substMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); // substitute the renamed bound variables for the declared ones - argsSubstMap.Add(s0.Method.Ins[i], new BoogieWrapper(initEtran.TrExpr(arg), s0.Args[i].Type)); - } - ante = BplAnd(ante, initEtran.TrExpr(Substitute(range, null, substMap))); + ante = initEtran.TrBoundVariablesRename(boundVars, bvars, out substMap); + + var p = Substitute(range, null, substMap); + ante = BplAnd(ante, initEtran.TrExpr(p)); if (additionalRange != null) { ante = BplAnd(ante, additionalRange(substMap, initEtran)); } + var receiver = new BoogieWrapper(initEtran.TrExpr(Substitute(s0.Receiver, null, substMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents())), s0.Receiver.Type); + for (int i = 0; i < s0.Method.Ins.Count; i++) { + var arg = Substitute(s0.Args[i], null, substMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); // substitute the renamed bound variables for the declared ones + argsSubstMap.Add(s0.Method.Ins[i], new BoogieWrapper(initEtran.TrExpr(arg), s0.Args[i].Type)); + } foreach (var ens in s0.Method.Ens) { - var p = Substitute(ens.E, receiver, argsSubstMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); // substitute the call's actuals for the method's formals + p = Substitute(ens.E, receiver, argsSubstMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); // substitute the call's actuals for the method's formals post = BplAnd(post, callEtran.TrExpr(p)); } - tr = antitriggerBoundVarTypes; + + tr = null; + if (triggers != null) { + foreach (var trigger in triggers) { + Contract.Assert(trigger.Count != 0); + var terms = trigger.ConvertAll(expr => { + expr = Substitute(expr, receiver, argsSubstMap, s0.MethodSelect.TypeArgumentSubstitutionsWithParents()); + return callEtran.TrExpr(expr); + }); + tr = new Trigger(trigger[0].tok, true, terms, tr); + } + } } // TRIG (forall $ih#s0#0: Seq :: $Is($ih#s0#0, TSeq(TChar)) && $IsAlloc($ih#s0#0, TSeq(TChar), $initHeapForallStmt#0) && Seq#Length($ih#s0#0) != 0 && Seq#Rank($ih#s0#0) < Seq#Rank(s#0) ==> (forall i#2: int :: true ==> LitInt(0) <= i#2 && i#2 < Seq#Length($ih#s0#0) ==> char#ToInt(_module.CharChar.MinChar($LS($LZ), $Heap, this, $ih#s0#0)) <= char#ToInt($Unbox(Seq#Index($ih#s0#0, i#2)): char))) @@ -532,4 +551,4 @@ void TrForallProof(ForallStmt forallStmt, BoogieStmtListBuilder definedness, Boo exporter.Add(TrAssumeCmd(forallStmt.Tok, BplAnd(se, ((Bpl.ForallExpr)qq).Body))); } } -} \ No newline at end of file +} diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo index 472917014fc..a945bf26f7c 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo index e42777eee01..82f1dd9c04c 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo index 4c1f17a2c99..c10fa53897d 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo index a7e521f03bd..0833926024b 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo index a1da8699385..fdf2bb7c73d 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo index e823114e535..17a311f71f7 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo index 66b99929968..6b731da1c49 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo differ diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy index 79da13c75d1..4d8a84b6aae 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy @@ -122,8 +122,16 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { requires |xs| == 2 ensures ToNatRight(xs) == First(xs) + xs[1] * BASE() { - reveal ToNatRight(); - LemmaSeqLen1(DropLast(xs)); + var xs1 := DropFirst(xs); + calc { + ToNatRight(xs); + { reveal ToNatRight(); } + ToNatRight(xs1) * BASE() + First(xs); + { reveal ToNatRight(); } + (ToNatRight([]) * BASE() + First(xs1)) * BASE() + First(xs); + { reveal ToNatRight(); } + (0 * BASE() + First(xs1)) * BASE() + First(xs); + } } /* Appending a zero does not change the nat representation of the sequence. */ diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Mul.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Mul.dfy index 8731e740e23..d6dc6d9b40a 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Mul.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Mul.dfy @@ -41,8 +41,21 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.Mul { requires x >= 0 ensures x * y == MulPos(x, y) { - reveal MulPos(); - LemmaMulInductionAuto(x, u => u >= 0 ==> u * y == MulPos(u, y)); + if x == 0 { + assert MulPos(x, y) == 0 by { + reveal MulPos(); + } + } else { + calc { + MulPos(x, y); + { reveal MulPos(x, y); } + y + MulPos(x - 1, y); + { LemmaMulIsMulPos(x - 1, y); } + y + (x - 1) * y; + { LemmaMulDistributes(); } + x * y; + } + } } /* ensures that the basic properties of multiplication, including the identity and zero properties */ diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Power.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Power.dfy index e8c99e338a6..62419d5ea8d 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Power.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Power.dfy @@ -417,9 +417,8 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.Power { ensures Pow(b, e1) <= Pow(b, e2) { reveal Pow(); - LemmaPowAuto(); var f := e => 0 <= e ==> Pow(b, e1) <= Pow(b, e1 + e); - forall i {:trigger IsLe(0, i)} | IsLe(0, i) && f(i) + forall i | IsLe(0, i) && f(i) ensures f(i + 1) { calc { diff --git a/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy b/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy index eaddebc3a3c..7dbfe3c42c6 100644 --- a/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy @@ -781,10 +781,10 @@ module Std.Collections.Seq { /* Filtering a sequence is distributive over concatenation. That is, concatenating two sequences and then using "Filter" is the same as using "Filter" on each sequence separately, and then concatenating the two resulting sequences. */ - lemma {:isolate_assertions} - LemmaFilterDistributesOverConcat(f: (T ~> bool), xs: seq, ys: seq) - requires forall i {:trigger xs[i]}:: 0 <= i < |xs| ==> f.requires(xs[i]) - requires forall j {:trigger ys[j]}:: 0 <= j < |ys| ==> f.requires(ys[j]) + lemma {:isolate_assertions} {:induction false} + LemmaFilterDistributesOverConcat(f: T ~> bool, xs: seq, ys: seq) + requires forall i :: 0 <= i < |xs| ==> f.requires(xs[i]) + requires forall j :: 0 <= j < |ys| ==> f.requires(ys[j]) ensures Filter(f, xs + ys) == Filter(f, xs) + Filter(f, ys) { reveal Filter(); @@ -793,10 +793,11 @@ module Std.Collections.Seq { } else { calc { Filter(f, xs + ys); - { assert {:split_here} (xs + ys)[0] == xs[0]; assert (xs + ys)[1..] == xs[1..] + ys; } + { assert (xs + ys)[0] == xs[0]; assert (xs + ys)[1..] == xs[1..] + ys; } Filter(f, [xs[0]]) + Filter(f, xs[1..] + ys); + { LemmaFilterDistributesOverConcat(f, xs[1..], ys); } Filter(f, [xs[0]]) + (Filter(f, xs[1..]) + Filter(f, ys)); - { assert {:split_here} [(xs + ys)[0]] + (xs[1..] + ys) == xs + ys; } + { assert [(xs + ys)[0]] + (xs[1..] + ys) == xs + ys; } Filter(f, xs) + Filter(f, ys); } } diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/AutoContracts.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/AutoContracts.dfy.expect index 50ce7f167c5..934b0a287e3 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/AutoContracts.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/AutoContracts.dfy.expect @@ -248,13 +248,13 @@ module OneModule { data < 20 } - lemma /*{:_induction this}*/ L() + lemma L() requires Valid() ensures data < 100 { } - twostate lemma /*{:_induction this}*/ TL() + twostate lemma TL() requires old(Valid()) ensures old(data) <= data { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy index 708f0291219..f23c066103c 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy @@ -135,7 +135,7 @@ greatest lemma {:induction false} Compare(h: T) Compare(Next(h)); if { case true => - assert FF(h).tail == GG(h).tail; // error: full equality is not known here + assert FF(h).tail == GG(h).tail; // yes, this full equality is a focal predicate, so it's rewritten into ==#[_k - 1] case true => assert FF(h) ==#[_k] GG(h); // yes, this is the postcondition to be proved, and it is known to hold case true => diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy.expect index dd2100b12db..d5f27399a5a 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoPrefix.dfy.expect @@ -3,7 +3,6 @@ CoPrefix.dfy(76,55): Error: cannot prove termination; try supplying a decreases CoPrefix.dfy(114,0): Error: a postcondition could not be proved on this return path CoPrefix.dfy(113,10): Related location: this is the postcondition that could not be proved CoPrefix.dfy(101,16): Related location: this proposition could not be proved -CoPrefix.dfy(138,24): Error: assertion might not hold CoPrefix.dfy(142,24): Error: assertion might not hold CoPrefix.dfy(117,22): Related location: this proposition could not be proved CoPrefix.dfy(151,0): Error: a postcondition could not be proved on this return path @@ -17,4 +16,4 @@ CoPrefix.dfy(205,6): Error: the calculation step between the previous line and t CoPrefix.dfy(207,6): Error: the calculation step between the previous line and this line could not be proved CoPrefix.dfy(220,12): Error: ORDINAL subtraction might underflow a limit ordinal (that is, RHS might be too large) -Dafny program verifier finished with 13 verified, 12 errors +Dafny program verifier finished with 13 verified, 11 errors diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoinductiveProofs.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoinductiveProofs.dfy.expect index d56b584921a..2730c4fe541 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoinductiveProofs.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/CoinductiveProofs.dfy.expect @@ -31,5 +31,5 @@ CoinductiveProofs.dfy(208,21): Related location: this is the postcondition that CoinductiveProofs.dfy(4,23): Related location: this proposition could not be proved Dafny program verifier finished with 23 verified, 12 errors -Total resources used is 770796 +Total resources used is 770576 Max resources used by VC is 60436 diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/LetExpr.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/LetExpr.dfy index 9cdc06d5040..8b3300900aa 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/LetExpr.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/LetExpr.dfy @@ -127,11 +127,11 @@ method Theorem0(n: int) } } -ghost method Theorem1(n: int) +lemma Theorem1(n: int) requires 1 <= n; ensures 1 <= Fib(n); { - // in a ghost method, the induction tactic takes care of it + // in a lemma, the induction tactic takes care of it } ghost function Theorem2(n: int): int diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/PrefixTypeSubst.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/PrefixTypeSubst.dfy.expect index 71102103951..7f1f51f43c1 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/PrefixTypeSubst.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/PrefixTypeSubst.dfy.expect @@ -231,7 +231,7 @@ greatest lemma N(o: MyClass) N(o); } /*** -lemma {:axiom} /*{:_induction _k}*/ N#[_k: ORDINAL](o: MyClass) +lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern o.R#[_k]()}*/ N#[_k: ORDINAL](o: MyClass) ensures o.R#[_k]() decreases _k, o { @@ -253,7 +253,7 @@ greatest lemma O() O(); } /*** -lemma {:axiom} /*{:_induction _k}*/ O#[_k: ORDINAL]() +lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern MyClass.S#[_k]()}*/ O#[_k: ORDINAL]() ensures MyClass.S#[_k]() decreases _k { @@ -448,7 +448,7 @@ greatest lemma RstRst7() } } /*** -lemma {:axiom} /*{:_induction _k}*/ RstRst7#[_k: ORDINAL]() +lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern MyClass.RST#[_k]()}*/ RstRst7#[_k: ORDINAL]() ensures MyClass.RST#[_k]() decreases _k { @@ -512,7 +512,7 @@ greatest lemma RstRst10[nat]() { } /*** -lemma {:axiom} /*{:_induction _k}*/ RstRst10#[_k: nat]() +lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern MyClass.RST_Nat#[_k]()}*/ RstRst10#[_k: nat]() ensures MyClass.RST_Nat#[_k]() decreases _k { @@ -588,7 +588,7 @@ class MyClass { L(u, v); } /*** - lemma {:axiom} /*{:_induction this, _k}*/ L#[_k: ORDINAL](u: U, v: V) + lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern P#[_k](u, v)}*/ L#[_k: ORDINAL](u: U, v: V) ensures P#[_k](u, v) decreases _k { @@ -611,7 +611,7 @@ class MyClass { assert R#[_k - 1](); } /*** - lemma {:axiom} /*{:_induction this, _k}*/ M#[_k: ORDINAL]() + lemma {:axiom} /*{:_induction _k}*/ /*{:_inductionPattern R#[_k]()}*/ M#[_k: ORDINAL]() ensures R#[_k]() decreases _k { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy index 8ac6f9a37ae..26376f13b2a 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy @@ -392,7 +392,7 @@ module MultisetTests { if n == 0 then 0 else F'(a, n-1) } - ghost method M(n: nat, b: multiset) + lemma M(n: nat, b: multiset) ensures F(b, n) == 0 // proved via automatic induction { } @@ -410,7 +410,7 @@ module MapTests { if n == 0 then 0 else F'(a, n-1) } - ghost method M(n: nat, b: map) + lemma M(n: nat, b: map) ensures F(b, n) == 0 // proved via automatic induction { } diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/Calculations.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/Calculations.dfy index 14926010e4e..ecf9db84e66 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/Calculations.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/Calculations.dfy @@ -49,6 +49,14 @@ lemma Lemma_ConcatNil(xs : List) lemma Lemma_RevCatCommute(xs : List) ensures forall ys, zs :: revacc(xs, concat(ys, zs)) == concat(revacc(xs, ys), zs); { + forall ys, zs + ensures revacc(xs, concat(ys, zs)) == concat(revacc(xs, ys), zs) + { + match xs + case Nil => + case Cons(_, xs') => + Lemma_RevCatCommute(xs'); + } } // Here is a theorem that says "qreverse" and "reverse" calculate the same result. The proof diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/Abstemious.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/Abstemious.dfy index 9645b0cf2f3..258e7be2e6f 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/Abstemious.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/Abstemious.dfy @@ -143,9 +143,10 @@ greatest lemma OhOnesTail_Correct() { } -greatest lemma OhOnes_Correct() +lemma OhOnes_Correct() ensures OhOnes() == Cons(0, ones()) { + OhOnesTail_Correct(); } lemma OhOnes_Correct'(n: nat) @@ -205,3 +206,25 @@ lemma Fib_Correct(n: nat) } } } + +// --------------- ternary expression is a trigger --------------- + +lemma OrdinalLemma(k: ORDINAL) + ensures OhOnes().tail ==#[k] ones() +{ + // automatic induction on k +} + +lemma NaturalLemma(k: nat) + ensures OhOnes().tail ==#[k] ones() +{ + // automatic induction on k +} + +lemma Quantifier() + // the following quantifiers use the entire body as a trigger (previously, ternary expressions + // had not been considered as trigger candidates) + requires forall k: nat :: OhOnes().tail ==#[k] ones() + requires forall k: ORDINAL :: OhOnes().tail ==#[k] ones() +{ +} diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/SimpleCoinduction.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/SimpleCoinduction.dfy index 352efe03eab..5ac106cae58 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/SimpleCoinduction.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny3/SimpleCoinduction.dfy @@ -1,4 +1,4 @@ -// RUN: %testDafnyForEachResolver "%s" -- --allow-deprecation +// RUN: %testDafnyForEachResolver "%s" codatatype Stream = Cons(head: T, tail: Stream) @@ -22,7 +22,7 @@ ghost function Inc(s: Stream): Stream } lemma {:induction false} UpLemma(k: nat, n: int) - ensures Inc(Up(n)) ==#[k] Up(n+1); + ensures Inc(Up(n)) ==#[k] Up(n+1) { if (k != 0) { UpLemma(k-1, n+1); @@ -30,18 +30,18 @@ lemma {:induction false} UpLemma(k: nat, n: int) } greatest lemma {:induction false} CoUpLemma(n: int) - ensures Inc(Up(n)) == Up(n+1); + ensures Inc(Up(n)) == Up(n+1) { CoUpLemma(n+1); } lemma UpLemma_Auto(k: nat, n: int, nn: int) - ensures nn == n+1 ==> Inc(Up(n)) ==#[k] Up(nn); // note: it would be nice to do an automatic rewrite (from "ensures Inc(Up(n)) ==#[k] Up(n+1)") to obtain the good trigger here + ensures nn == n+1 ==> Inc(Up(n)) ==#[k] Up(nn) // note: it would be nice to do an automatic rewrite (from "ensures Inc(Up(n)) ==#[k] Up(n+1)") to obtain the good trigger here { } greatest lemma CoUpLemma_Auto(n: int, nn: int) - ensures nn == n+1 ==> Inc(Up(n)) == Up(nn); // see comment above + ensures nn == n+1 ==> Inc(Up(n)) == Up(nn) // see comment above { } @@ -53,8 +53,9 @@ ghost function Repeat(n: int): Stream } greatest lemma RepeatLemma(n: int) - ensures Inc(Repeat(n)) == Repeat(n+1); + ensures Inc(Repeat(n)) == Repeat(n+1) { + RepeatLemma(n); } // ----------------------------------------------------------------------- @@ -65,7 +66,7 @@ greatest predicate True(s: Stream) } greatest lemma AlwaysTrue(s: Stream) - ensures True(s); + ensures True(s) { } @@ -75,7 +76,7 @@ greatest predicate AlsoTrue(s: Stream) } greatest lemma AlsoAlwaysTrue(s: Stream) - ensures AlsoTrue(s); + ensures AlsoTrue(s) { } @@ -85,7 +86,7 @@ greatest predicate TT(y: int) } greatest lemma AlwaysTT(y: int) - ensures TT(y); + ensures TT(y) { } @@ -116,11 +117,11 @@ greatest predicate AtMost(a: IList, b: IList) } greatest lemma ZerosAndOnes_Theorem0() - ensures AtMost(zeros(), ones()); + ensures AtMost(zeros(), ones()) { } greatest lemma ZerosAndOnes_Theorem1() - ensures Append(zeros(), ones()) == zeros(); + ensures Append(zeros(), ones()) == zeros() { } diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Bug170.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Bug170.dfy.expect index 9b8aeb71c83..976b1a237f1 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Bug170.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Bug170.dfy.expect @@ -14,8 +14,6 @@ Bug170.dfy(10,12): Info: B#[_k - 1] Bug170.dfy(15,12): Info: A#[_k - 1] Bug170.dfy(18,14): Info: AA# decreases _k, x Bug170.dfy(26,14): Info: BB# decreases _k, x -Bug170.dfy(18,14): Info: AA# {:induction _k, x} -Bug170.dfy(26,14): Info: BB# {:induction _k, x} Bug170.dfy(36,21): Info: _k: ORDINAL Bug170.dfy(41,21): Info: _k: ORDINAL Bug170.dfy(46,17): Info: _k: ORDINAL @@ -33,7 +31,9 @@ Bug170.dfy(43,4): Info: A#[_k - 1] Bug170.dfy(46,17): Info: AA# decreases _k, x Bug170.dfy(53,17): Info: BB# decreases _k, x Bug170.dfy(46,17): Info: AA# {:induction _k, x} +Bug170.dfy(46,17): Info: AA# {:inductionPattern A#[_k](x)} Bug170.dfy(53,17): Info: BB# {:induction _k, x} +Bug170.dfy(53,17): Info: BB# {:inductionPattern B#[_k](x)} Bug170.dfy(64,18): Info: _k: ORDINAL Bug170.dfy(69,14): Info: _k: ORDINAL Bug170.dfy(70,14): Info: A#[_k] @@ -42,7 +42,6 @@ Bug170.dfy(72,7): Info: A#[_k - 1] Bug170.dfy(73,6): Info: AA#[_k - 1] Bug170.dfy(66,12): Info: A#[_k - 1] Bug170.dfy(69,14): Info: AA# decreases _k, x -Bug170.dfy(69,14): Info: AA# {:induction _k, x} Bug170.dfy(50,11): Info: Some instances of this call are not inlined. Bug170.dfy(57,11): Info: Some instances of this call are not inlined. diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/GHC-MergeSort.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/GHC-MergeSort.dfy index a4022dc988a..bd80b089d5c 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/GHC-MergeSort.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/GHC-MergeSort.dfy @@ -472,6 +472,17 @@ lemma stable_sequences(g: G, xs: List) case Cons(a, ys) => match ys { case Nil => + calc { + flatten(sequences(xs)); + // def. sequences, since xs == Cons(a, Nil) + flatten(Cons(xs, Nil)); + // def. flatten + append(xs, flatten(Nil)); + // def. flatten + append(xs, Nil); + { append_Nil(xs); } + xs; + } case Cons(b, zs) => if !Below(a, b) { calc { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Lucas-up.legacy.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Lucas-up.legacy.dfy index 38413c3be06..5ba9023a9a7 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Lucas-up.legacy.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/Lucas-up.legacy.dfy @@ -66,7 +66,7 @@ ghost function binom(a: nat, b: nat): nat // div-2 is applied to both arguments, except in the case where // the first argument to "binom" is even and the second argument // is odd, in which case "binom" is always even. -lemma {:resource_limit "8e6"} Lucas_Binary(a: nat, b: nat) +lemma {:resource_limit "8e6"} {:induction a, b} {:nowarn} Lucas_Binary(a: nat, b: nat) ensures EVEN(binom(2*a, 2*b + 1)) ensures EVEN(binom(2*a, 2*b)) <==> EVEN(binom(a, b)) ensures EVEN(binom(2*a + 1, 2*b + 1)) <==> EVEN(binom(a, b)) @@ -83,7 +83,7 @@ lemma {:resource_limit "8e6"} Lucas_Binary(a: nat, b: nat) } // Here is an alternative way to phrase the previous lemma. -lemma {:resource_limit "200e6"} Lucas_Binary'(a: nat, b: nat) +lemma {:resource_limit "200e6"} {:induction a, b} {:nowarn} Lucas_Binary'(a: nat, b: nat) ensures binom(2*a, 2*b) % 2 == binom(a, b) % 2 ensures binom(2*a, 2*b + 1) % 2 == 0 ensures binom(2*a + 1, 2*b) % 2 == binom(a, b) % 2 diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/UnionFind.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/UnionFind.dfy index d80ac5cac0c..3831148a1cc 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/UnionFind.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/UnionFind.dfy @@ -264,7 +264,7 @@ module M3 refines M2 { } } - lemma {:autocontracts false} ReachUnaffectedByChangeFromRoot'(d: nat, e: Element, r: Element, C: CMap, td: nat, r0: Element, r1: Element, C': CMap) + lemma {:autocontracts false} {:induction d, e, r, C} {:nowarn} ReachUnaffectedByChangeFromRoot'(d: nat, e: Element, r: Element, C: CMap, td: nat, r0: Element, r1: Element, C': CMap) requires GoodCMap(C) requires e in C && Reaches(d, e, r, C) requires r0 in C && r1 in C && C[r0].Root? && C[r1].Root? && C[r0].depth == C[r1].depth && r0 != r1 diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-977.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-977.dfy.expect index a88c1419457..db3dee30b5b 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-977.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-977.dfy.expect @@ -28,8 +28,17 @@ git-issue-977.dfy(84,16): Info: decreases num git-issue-977.dfy(129,16): Info: decreases k, num git-issue-977.dfy(146,16): Info: decreases k, num git-issue-977.dfy(54,6): Info: {:induction num} +git-issue-977.dfy(54,6): Info: {:inductionPattern GreatestPredNat(num)} +git-issue-977.dfy(54,6): Info: {:inductionPattern GreatestPredOrd(num)} +git-issue-977.dfy(54,6): Info: {:inductionPattern Pred(num)} git-issue-977.dfy(61,6): Info: {:induction k, num} +git-issue-977.dfy(61,6): Info: {:inductionPattern RicochetOrd(k, num)} +git-issue-977.dfy(61,6): Info: {:inductionPattern GreatestManualOrd(k, num)} +git-issue-977.dfy(61,6): Info: {:inductionPattern GreatestPredOrd#[k](num)} git-issue-977.dfy(77,6): Info: {:induction k, num} +git-issue-977.dfy(77,6): Info: {:inductionPattern RicochetNat(k, num)} +git-issue-977.dfy(77,6): Info: {:inductionPattern GreatestManualNat(k, num)} +git-issue-977.dfy(77,6): Info: {:inductionPattern GreatestPredNat#[k](num)} git-issue-977.dfy(71,4): Info: ensures GreatestPredOrd#[m](num) git-issue-977.dfy(71,4): Info: ensures GreatestManualOrd(m, num) git-issue-977.dfy(71,4): Info: ensures RicochetOrd(m, num) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/github-issue-4804.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/github-issue-4804.dfy index 35dc939d8eb..6516b3be1a8 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/github-issue-4804.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/github-issue-4804.dfy @@ -1,4 +1,4 @@ -// RUN: %exits-with 4 %verify --allow-axioms --resource-limit 1000000 %s > %t +// RUN: %exits-with 4 %verify --allow-axioms --resource-limit 1200 %s > %t // RUN: %diff "%s.expect" "%t" module Power { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/hofs/SumSum.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/hofs/SumSum.dfy index 4a7f8894e41..bda1eb00196 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/hofs/SumSum.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/hofs/SumSum.dfy @@ -20,6 +20,7 @@ lemma ExchangeEta(n: nat, f: int -> int, g: int -> int) requires forall i :: 0 <= i < n ==> f(i) == g(i) ensures Sum(n, x => f(x)) == Sum(n, x => g(x)) { + if n != 0 { ExchangeEta(n - 1, f, g); } } lemma NestedAlphaRenaming(n: nat, g: (int,int) -> int) @@ -37,10 +38,10 @@ lemma Distribute(n: nat, f: int -> int, g: int -> int) { } -lemma {:induction false} PrettyBasicBetaReduction(n: nat, g: (int,int) -> int, i: int) +lemma PrettyBasicBetaReduction(n: nat, g: (int,int) -> int, i: int) ensures (x => Sum(n, y => g(x,y)))(i) == Sum(n, y => g(i,y)) { - // NOTE: This proof is by induction on n (it can be done automatically) + // NOTE: This proof is by induction on n if n == 0 { calc { (x => Sum(n, y => g(x,y)))(i); @@ -62,7 +63,6 @@ lemma {:induction false} PrettyBasicBetaReduction(n: nat, g: (int,int) -> int, i lemma BetaReduction0(n: nat, g: (int,int) -> int, i: int) ensures (x => Sum(n, y => g(x,y)))(i) == Sum(n, y => g(i,y)) { - // automatic proof by induction on n } lemma BetaReduction1(n': nat, g: (int,int) -> int, i: int) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy index 0c777579f63..9a528b39724 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy @@ -73,7 +73,7 @@ lemma distr_mult(f: Index -> int, x: int) /** Σ_k (Σ_l f(k,l)) == Σ_l (Σ_k f(k,l)) */ /** proof by induction */ -lemma sum_assoc_n(m: Matrix, n1: nat, n2: nat) +lemma {:induction m, n1, n2} {:nowarn} sum_assoc_n(m: Matrix, n1: nat, n2: nat) requires n1 <= N && n2 <= N ensures Sum_n((k: Index) => Sum_n((l: Index) => m(k)(l), n1), n2) == diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy new file mode 100644 index 00000000000..5e35dcdf2ef --- /dev/null +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy @@ -0,0 +1,125 @@ +// RUN: %testDafnyForEachResolver "%s" --expect-exit-code=4 + +ghost function Sum(n: nat, f: int -> int): int +{ + if n == 0 then 0 else f(n-1) + Sum(n-1, f) +} + +lemma TestTriggerWithLambdaExpression(n: nat, f: int -> int, g: int -> int) +{ + // Once, trigger selection would crash on the following quantifier, which uses a LambdaExpr. + assert forall n: nat, f: int -> int :: Sum(n, x => 500 + f(x)) == n + Sum(n, f); // error: this does not hold +} + +// With an explicit :induction attribute, the induction hypothesis emitted lets the proof of the lemma go through. +// However, there is no good matching pattern for the induction hypothesis, so a warning is generated. +// For a manual proof of this lemma, see lemma ExchangeEta in hofs/SumSum.dfy. +lemma {:induction n} ExchangeEtaWithInductionAttribute(n: nat, f: int -> int, g: int -> int) // warning: no trigger + requires forall i :: 0 <= i < n ==> f(i) == g(i) + ensures Sum(n, x => f(x)) == Sum(n, x => g(x)) +{ +} + +// ------------------------------------------- + +type OpaqueType + +// Recursive predicates. Note that the heuristics for finding candidate induction variables pay attention +// to whether or not the predicate is recursive. + +predicate P(n: nat) { + if n <= 0 then true else P(n - 1) +} + +predicate Q(n: nat, b: bool) { + if n <= 0 then true else Q(n - 1, !b) +} + +// -------------------- + +lemma {:induction n, b} GivenListNoTrigger0(n: nat, o: OpaqueType, b: bool) // warning: no trigger + requires 0 <= n < 100 + ensures P(if b then n else n) +{ +} + +lemma {:induction n, b} GivenListNoTrigger1(n: nat, o: OpaqueType, b: bool) // warning: no trigger + requires 0 <= n < 100 + ensures P(n) +{ +} + +lemma {:induction n} GivenList(n: nat, o: OpaqueType, b: bool) // matching pattern for IH: P(n) + requires 0 <= n < 100 + ensures P(n) +{ +} + +// -------------------- + +lemma {:induction} YesToIH(n: nat, o: OpaqueType, b: bool) // induction: n, b; warning: no trigger + requires 0 <= n < 100 + ensures P(if b then n else n) +{ // cannot prove postcondition +} + +lemma {:induction} YesToIH1(n: nat, o: OpaqueType, b: bool) // induction: n, b; warning: no trigger + requires 0 <= n < 100 + ensures P(n) +{ // cannot prove postcondition +} + +lemma {:induction} YesToIH2(n: nat, o: OpaqueType) // induction: n + requires 0 <= n < 100 + ensures P(n) +{ +} + +// -------------------- + +lemma {:induction false} NoIH(n: nat, o: OpaqueType, b: bool) + requires 0 <= n < 100 + ensures P(if b then n else n) +{ // cannot prove postcondition +} + +lemma {:induction false} NoIHButManualProof(n: nat, o: OpaqueType, b: bool) + requires 0 <= n < 100 + ensures P(if b then n else n) +{ + if n != 0 { + NoIHButManualProof(n - 1, o, b); + } +} + +// -------------------- + +lemma AutomaticInduction(n: nat, o: OpaqueType, b: bool) // no induction, because no triggers (candidates: n) + requires 0 <= n < 100 + ensures P(if b then n else n) +{ // cannot prove postcondition +} + +lemma AutomaticInduction1(n: nat, o: OpaqueType, b: bool) // induction: n; trigger: P(n) + requires 0 <= n < 100 + ensures P(n) +{ +} + +lemma AutomaticInduction2(n: nat, o: OpaqueType, b: bool) // induction: n, b; trigger: Q(n, b) + requires 0 <= n < 100 + ensures Q(n, b) +{ +} + +lemma AutomaticInduction3(n: nat, o: OpaqueType, b: bool) // induction: n; trigger: Q(n, true) + requires 0 <= n < 100 + ensures Q(n, true) +{ +} + +lemma AutomaticInduction4(n: nat, o: OpaqueType, b: bool) // no induction, because no triggers (candidates: n, b) + requires 0 <= n < 100 + ensures P(n) && Q(n + 12, b) +{ // cannot prove postcondition +} diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy.expect new file mode 100644 index 00000000000..815d9943bf2 --- /dev/null +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/triggers/InductionWithoutTriggers.dfy.expect @@ -0,0 +1,4 @@ +InductionWithoutTriggers.dfy(17,21): Warning: Could not find a trigger for the induction hypothesis. Without a trigger, this may cause brittle verification. Change or remove the {:induction} attribute to generate a different induction hypothesis, or add {:nowarn} to silence this warning. For more information, see the section quantifier instantiation rules in the reference manual. +InductionWithoutTriggers.dfy(11,9): Error: assertion might not hold + +Dafny program verifier finished with 2 verified, 1 error diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy index e76d541dd1c..83a50b795a4 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy @@ -1,5 +1,5 @@ -// RUN: ! %verify "%s" --disable-nonlinear-arithmetic --allow-axioms --resource-limit 1e6 > "%t" -// RUN: ! %verify "%s" --allow-axioms --resource-limit 1e6 >> "%t" +// RUN: %verify "%s" --disable-nonlinear-arithmetic --allow-axioms --resource-limit 1e6 > "%t" +// RUN: %verify "%s" --allow-axioms --resource-limit 1e6 >> "%t" // RUN: %diff "%s.expect" "%t" module Power0 { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy.expect index 0337e36f8f0..9efe4297819 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy.expect +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/nonLinearArithmetic.dfy.expect @@ -1,7 +1,4 @@ -nonLinearArithmetic.dfy(97,8): Error: Verification out of resource (Power3.LemmaPowSubAddCancel) -Dafny program verifier finished with 15 verified, 0 errors, 1 out of resource -nonLinearArithmetic.dfy(21,8): Error: Verification out of resource (Power0.LemmaPowSubAddCancel) -nonLinearArithmetic.dfy(97,8): Error: Verification out of resource (Power3.LemmaPowSubAddCancel) +Dafny program verifier finished with 16 verified, 0 errors -Dafny program verifier finished with 14 verified, 0 errors, 2 out of resource +Dafny program verifier finished with 16 verified, 0 errors diff --git a/docs/dev/news/5835.feat b/docs/dev/news/5835.feat new file mode 100644 index 00000000000..5858b08bbe0 --- /dev/null +++ b/docs/dev/news/5835.feat @@ -0,0 +1,41 @@ +Fill in matching patterns for the quantifiers introduced by automatic induction to represent the induction hypothesis. Suppress the generation of the induction hypothesis if no such matching patterns are found. Enhance tooltips accordingly. This feature is added to stabilize verification, but by sometimes not generating induction hypotheses, some automatic proofs may no longer go through. For backward compatibility, use an explicit `{:induction ...}` where `...` is the list of variables to use for the induction-hypothesis quantifier. Additionally, use a `{:nowarn}` attribute to suppress any warning about lack of matching patterns. + +Improve the selection of induction variables. + +Allow codatatype equality in matching patterns and as a focal predicate for extreme predicates. + +More specifically: + +* If a lemma bears `{:induction x, y, z}`, where `x, y, z` is a subset of the lemma's parameters (in the same order + that the lemma gives them), then an induction hypothesis (IH) is generated. The IH quantifies over the + given variables. + + For an instance-member lemma, the variables may include the implicit `this` parameter. + For an extreme lemma, the IH generated is the for corresponding prefix lemma, and the given variables may + include the implicit parameter `_k`. + + If good matching patterns are found for the quantifier, then these are indicated in tooltips. + If no patterns are found, then a warning is generated; except, if the lemma bears `{:nowarn}`, then only + an informational message is given. + +* If a lemma bears `{:induction}` or `{:induction true}`, then a list of induction variables is determined heuristically. + If the list is empty, then a warning message is generated and no IH is generated. If the list is nonempty, + an IH is generated and the list of variables is indicated in a tooltip. + + If good matching patterns are found for the quantifier, then these are indicated in tooltips. + If no patterns are found, then a warning is generated; except, if the lemma bears {:nowarn}, then only + an informational message is given. + +* If a lemma bears `{:induction false}`, then no IH is generated. + +* If a lemma bears an `:induction` attribute other than those listed above, then an error is generated. + +* If a lemma bears no `:induction` attribute, then a list of induction variables is determined heuristically. + If this list is empty, then no IH is applied and no warning/info is given. + + If the list is nonempty, then the machinery looks for matching patterns for the IH quantifier. If none are + found, then an informational message is generated, saying which candidate variables were used and saying that + no matching patterns were found. + + If patterns are found, then the list of variables and the patterns are indicated in tooltips, and the + patterns are used with the IH quantifier.