path
stringlengths
11
71
content
stringlengths
75
124k
Tactic\Rify.lean
/- Copyright (c) 2023 Patrick Massot. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Moritz Doll, Mario Carneiro, Robert Y. Lewis, Patrick Massot -/ import Mathlib.Data.Rat.Cast.Order import Mathlib.Data.Real.Basic import Mathlib.Tactic.Qify /-! # `rify` tactic The `rify` tactic is used to shift propositions from `ℕ`, `ℤ` or `ℚ` to `ℝ`. Although less useful than its cousins `zify` and `qify`, it can be useful when your goal or context already involves real numbers. In the example below, assumption `hn` is about natural numbers, `hk` is about integers and involves casting a natural number to `ℤ`, and the conclusion is about real numbers. The proof uses `rify` to lift both assumptions to `ℝ` before calling `linarith`. ``` import Mathlib.Tactic.Linarith import Mathlib.Tactic.Rify example {n : ℕ} {k : ℤ} (hn : 8 ≤ n) (hk : 2 * k ≤ n + 2) : (0 : ℝ) < n - k - 1 := by rify at hn hk linarith ``` TODO: Investigate whether we should generalize this to other fields. -/ namespace Mathlib.Tactic.Rify open Lean open Lean.Meta open Lean.Parser.Tactic open Lean.Elab.Tactic /-- The `rify` tactic is used to shift propositions from `ℕ`, `ℤ` or `ℚ` to `ℝ`. Although less useful than its cousins `zify` and `qify`, it can be useful when your goal or context already involves real numbers. In the example below, assumption `hn` is about natural numbers, `hk` is about integers and involves casting a natural number to `ℤ`, and the conclusion is about real numbers. The proof uses `rify` to lift both assumptions to `ℝ` before calling `linarith`. ``` example {n : ℕ} {k : ℤ} (hn : 8 ≤ n) (hk : 2 * k ≤ n + 2) : (0 : ℝ) < n - k - 1 := by rify at hn hk /- Now have hn : 8 ≤ (n : ℝ) hk : 2 * (k : ℝ) ≤ (n : ℝ) + 2-/ linarith ``` `rify` makes use of the `@[zify_simps]`, `@[qify_simps]` and `@[rify_simps]` attributes to move propositions, and the `push_cast` tactic to simplify the `ℝ`-valued expressions. `rify` can be given extra lemmas to use in simplification. This is especially useful in the presence of nat subtraction: passing `≤` arguments will allow `push_cast` to do more work. ``` example (a b c : ℕ) (h : a - b < c) (hab : b ≤ a) : a < b + c := by rify [hab] at h ⊢ linarith ``` Note that `zify` or `qify` would work just as well in the above example (and `zify` is the natural choice since it is enough to get rid of the pathological `ℕ` subtraction). -/ syntax (name := rify) "rify" (simpArgs)? (location)? : tactic macro_rules | `(tactic| rify $[[$simpArgs,*]]? $[at $location]?) => let args := simpArgs.map (·.getElems) |>.getD #[] `(tactic| simp (config := {decide := false}) only [zify_simps, qify_simps, rify_simps, push_cast, $args,*] $[at $location]?) @[rify_simps] lemma ratCast_eq (a b : ℚ) : a = b ↔ (a : ℝ) = (b : ℝ) := by simp @[rify_simps] lemma ratCast_le (a b : ℚ) : a ≤ b ↔ (a : ℝ) ≤ (b : ℝ) := by simp @[rify_simps] lemma ratCast_lt (a b : ℚ) : a < b ↔ (a : ℝ) < (b : ℝ) := by simp @[rify_simps] lemma ratCast_ne (a b : ℚ) : a ≠ b ↔ (a : ℝ) ≠ (b : ℝ) := by simp @[deprecated (since := "2024-04-17")] alias rat_cast_ne := ratCast_ne -- See note [no_index around OfNat.ofNat] @[rify_simps] lemma ofNat_rat_real (a : ℕ) [a.AtLeastTwo] : ((no_index (OfNat.ofNat a : ℚ)) : ℝ) = (OfNat.ofNat a : ℝ) := rfl end Rify end Tactic end Mathlib
Tactic\Ring.lean
import Mathlib.Tactic.Ring.Basic import Mathlib.Tactic.Ring.RingNF import Mathlib.Tactic.Ring.PNat
Tactic\RSuffices.lean
/- Copyright (c) 2022 Moritz Doll. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Moritz Doll -/ import Mathlib.Tactic.Basic /-! # `rsuffices` tactic The `rsuffices` tactic is an alternative version of `suffices`, that allows the usage of any syntax that would be valid in an `obtain` block. This tactic just calls `obtain` on the expression, and then `rotate_left`. -/ /-- The `rsuffices` tactic is an alternative version of `suffices`, that allows the usage of any syntax that would be valid in an `obtain` block. This tactic just calls `obtain` on the expression, and then `rotate_left`. -/ syntax (name := rsuffices) "rsuffices" (ppSpace Lean.Parser.Tactic.rcasesPatMed)? (" : " term)? (" := " term,+)? : tactic macro_rules | `(tactic| rsuffices $[$pred]? $[: $foo]? $[:= $bar]?) => `(tactic | (obtain $[$pred]? $[: $foo]? $[:= $bar]?; rotate_left))
Tactic\Says.lean
/- Copyright (c) 2023 Kim Liesinger. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kim Liesinger -/ import Batteries.Data.String.Basic import Lean.Meta.Tactic.TryThis import Batteries.Linter.UnreachableTactic import Qq.Match /-! # The `says` tactic combinator. If you write `X says`, where `X` is a tactic that produces a "Try this: Y" message, then you will get a message "Try this: X says Y". Once you've clicked to replace `X says` with `X says Y`, afterwards `X says Y` will only run `Y`. The typical usage case is: ``` simp? [X] says simp only [X, Y, Z] ``` If you use `set_option says.verify true` (set automatically during CI) then `X says Y` runs `X` and verifies that it still prints "Try this: Y". -/ open Lean Elab Tactic open Lean.Meta.Tactic.TryThis namespace Mathlib.Tactic.Says register_option says.verify : Bool := { defValue := false group := "says" descr := "For every appearance of the `X says Y` combinator, \ re-verify that running `X` produces `Try this: Y`." } register_option says.no_verify_in_CI : Bool := { defValue := false group := "says" descr := "Disable reverification, even if `the `CI` environment variable is set." } open Parser Tactic /-- This is a slight modification of `Parser.runParserCategory`. -/ def parseAsTacticSeq (env : Environment) (input : String) (fileName := "<input>") : Except String (TSyntax ``tacticSeq) := let p := andthenFn whitespace Tactic.tacticSeq.fn let ictx := mkInputContext input fileName let s := p.run ictx { env, options := {} } (getTokenTable env) (mkParserState input) if s.hasError then Except.error (s.toErrorMsg ictx) else if input.atEnd s.pos then Except.ok ⟨s.stxStack.back⟩ else Except.error ((s.mkError "end of input").toErrorMsg ictx) /-- Run `evalTactic`, capturing any new messages. The optional `only` argument allows selecting which messages should be captured, or left in the message log. -/ def evalTacticCapturingMessages (tac : TSyntax `tactic) (only : Message → Bool := fun _ => true) : TacticM (List Message) := do let mut msgs ← modifyGetThe Core.State fun st => (st.messages, { st with messages := {} }) try evalTactic tac let (capture, leave) := (← getThe Core.State).messages.toList.partition only msgs := leave.foldl (·.add) msgs return capture catch e => msgs := msgs ++ (← getThe Core.State).messages throw e finally modifyThe Core.State fun st => { st with messages := msgs } /-- Run `evalTactic`, capturing any new info messages. -/ def evalTacticCapturingInfo (tac : TSyntax `tactic) : TacticM (List Message) := evalTacticCapturingMessages tac fun m => match m.severity with | .information => true | _ => false /-- Run `evalTactic`, capturing a "Try this:" message and converting it back to syntax. -/ def evalTacticCapturingTryThis (tac : TSyntax `tactic) : TacticM (TSyntax ``tacticSeq) := do let msg ← match ← evalTacticCapturingInfo tac with | [] => throwError m!"Tactic `{tac}` did not produce any messages." | [msg] => msg.toString | _ => throwError m!"Tactic `{tac}` produced multiple messages." let tryThis ← match msg.dropPrefix? "Try this:" with | none => throwError m!"Tactic output did not begin with 'Try this:': {msg}" | some S => pure S.toString.removeLeadingSpaces match parseAsTacticSeq (← getEnv) tryThis with | .ok stx => return stx | .error err => throwError m!"Failed to parse tactic output: {tryThis}\n{err}" /-- If you write `X says`, where `X` is a tactic that produces a "Try this: Y" message, then you will get a message "Try this: X says Y". Once you've clicked to replace `X says` with `X says Y`, afterwards `X says Y` will only run `Y`. The typical usage case is: ``` simp? [X] says simp only [X, Y, Z] ``` If you use `set_option says.verify true` (set automatically during CI) then `X says Y` runs `X` and verifies that it still prints "Try this: Y". -/ syntax (name := says) tactic " says" (colGt tacticSeq)? : tactic elab_rules : tactic | `(tactic| $tac:tactic says%$tk $[$result:tacticSeq]?) => do let verify := says.verify.get (← getOptions) || !says.no_verify_in_CI.get (← getOptions) && (← IO.getEnv "CI").isSome match result, verify with | some _, true | none, _ => let stx ← evalTacticCapturingTryThis tac match result with | some r => let stx' := (← Lean.PrettyPrinter.ppTactic ⟨Syntax.stripPos stx⟩).pretty let r' := (← Lean.PrettyPrinter.ppTactic ⟨Syntax.stripPos r⟩).pretty if stx' != r' then throwError m!"Tactic `{tac}` produced `{stx'}`,\nbut was expecting it to produce `{r'}`!" ++ m!"\n\nYou can reproduce this error locally using `set_option says.verify true`." | none => addSuggestion tk (← `(tactic| $tac says $stx)) (origSpan? := (← `(tactic| $tac says))) | some result, false => evalTactic result initialize Batteries.Linter.UnreachableTactic.addIgnoreTacticKind `Mathlib.Tactic.Says.says
Tactic\ScopedNS.lean
/- Copyright (c) 2021 Gabriel Ebner. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Gabriel Ebner, Mario Carneiro -/ import Mathlib.Util.WithWeakNamespace /-! # `scoped[NS]` syntax This is a replacement for the `localized` command in mathlib. It is similar to `scoped`, but it scopes the syntax in the specified namespace instead of the current namespace. -/ namespace Mathlib.Tactic open Lean /-- `scoped[NS]` is similar to the `scoped` modifier on attributes and notations, but it scopes the syntax in the specified namespace instead of the current namespace. ``` scoped[Matrix] infixl:72 " ⬝ᵥ " => Matrix.dotProduct -- declares `*` as a notation for vector dot productss -- which is only accessible if you `open Matrix` or `open scoped Matrix`. namespace Nat scoped[Nat.Count] attribute [instance] CountSet.fintype -- make the definition Nat.CountSet.fintype an instance, -- but only if `Nat.Count` is open ``` -/ syntax (name := scopedNS) (docComment)? (Parser.Term.attributes)? "scoped" "[" ident "] " command : command macro_rules | `($[$doc]? $(attr)? scoped[$ns] notation $(prec)? $(n)? $(prio)? $sym* => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped notation $(prec)? $(n)? $(prio)? $sym* => $t) | `($[$doc]? $(attr)? scoped[$ns] $mk:prefix $prec $(n)? $(prio)? $sym => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped $mk:prefix $prec $(n)? $(prio)? $sym => $t) | `($[$doc]? $(attr)? scoped[$ns] $mk:infix $prec $(n)? $(prio)? $sym => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped $mk:infix $prec $(n)? $(prio)? $sym => $t) | `($[$doc]? $(attr)? scoped[$ns] $mk:infixl $prec $(n)? $(prio)? $sym => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped $mk:infixl $prec $(n)? $(prio)? $sym => $t) | `($[$doc]? $(attr)? scoped[$ns] $mk:infixr $prec $(n)? $(prio)? $sym => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped $mk:infixr $prec $(n)? $(prio)? $sym => $t) | `($[$doc]? $(attr)? scoped[$ns] $mk:postfix $prec $(n)? $(prio)? $sym => $t) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) $[$doc]? $(attr)? scoped $mk:postfix $prec $(n)? $(prio)? $sym => $t) | `(scoped[$ns] attribute [$[$attr:attr],*] $ids*) => `(with_weak_namespace $(mkIdentFrom ns <| rootNamespace ++ ns.getId) attribute [$[scoped $attr:attr],*] $ids*)
Tactic\Set.lean
/- Copyright (c) 2022 Ian Benway. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Ian Benway -/ import Lean /-! # The `set` tactic This file defines the `set` tactic and its variant `set!`. `set a := t with h` is a variant of `let a := t`. It adds the hypothesis `h : a = t` to the local context and replaces `t` with `a` everywhere it can. `set a := t with ← h` will add `h : t = a` instead. `set! a := t with h` does not do any replacing. -/ namespace Mathlib.Tactic open Lean Elab Elab.Tactic Meta syntax setArgsRest := ppSpace ident (" : " term)? " := " term (" with " "← "? ident)? -- This is called `setTactic` rather than `set` -- as we sometimes refer to `MonadStateOf.set` from inside `Mathlib.Tactic`. syntax (name := setTactic) "set" "!"? setArgsRest : tactic macro "set!" rest:setArgsRest : tactic => `(tactic| set ! $rest:setArgsRest) /-- `set a := t with h` is a variant of `let a := t`. It adds the hypothesis `h : a = t` to the local context and replaces `t` with `a` everywhere it can. `set a := t with ← h` will add `h : t = a` instead. `set! a := t with h` does not do any replacing. ```lean example (x : Nat) (h : x + x - x = 3) : x + x - x = 3 := by set y := x with ← h2 sorry /- x : Nat y : Nat := x h : y + y - y = 3 h2 : x = y ⊢ y + y - y = 3 -/ ``` -/ elab_rules : tactic | `(tactic| set%$tk $[!%$rw]? $a:ident $[: $ty:term]? := $val:term $[with $[←%$rev]? $h:ident]?) => withMainContext do let (ty, vale) ← match ty with | some ty => let ty ← Term.elabType ty pure (ty, ← elabTermEnsuringType val ty) | none => let val ← elabTerm val none pure (← inferType val, val) let fvar ← liftMetaTacticAux fun goal ↦ do let (fvar, goal) ← (← goal.define a.getId ty vale).intro1P pure (fvar, [goal]) withMainContext <| Term.addTermInfo' (isBinder := true) a (mkFVar fvar) if rw.isNone then evalTactic (← `(tactic| try rewrite [show $(← Term.exprToSyntax vale) = $a from rfl] at *)) match h, rev with | some h, some none => evalTactic (← `(tactic| have%$tk $h : $a = ($(← Term.exprToSyntax vale) : $(← Term.exprToSyntax ty)) := rfl)) | some h, some (some _) => evalTactic (← `(tactic| have%$tk $h : ($(← Term.exprToSyntax vale) : $(← Term.exprToSyntax ty)) = $a := rfl)) | _, _ => pure ()
Tactic\SetLike.lean
/- Copyright (c) 2023 Jireh Loreaux. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jireh Loreaux -/ import Aesop /-! # SetLike Rule Set This module defines the `SetLike` Aesop rule set which is used by the `SetLike` tactic. Aesop rule sets only become visible once the file in which they're declared is imported, so we must put this declaration into its own file. -/ declare_aesop_rule_sets [SetLike] (default := true)
Tactic\SimpIntro.lean
/- Copyright (c) 2022 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ import Lean /-! # `simp_intro` tactic -/ namespace Mathlib.Tactic open Lean Meta Elab Tactic /-- Main loop of the `simp_intro` tactic. * `g`: the original goal * `ctx`: the simp context, which is extended with local variables as we enter the binders * `discharge?`: the discharger * `more`: if true, we will keep introducing binders as long as we can * `ids`: the list of binder identifiers -/ partial def simpIntroCore (g : MVarId) (ctx : Simp.Context) (simprocs : Simp.SimprocsArray := #[]) (discharge? : Option Simp.Discharge) (more : Bool) (ids : List (TSyntax ``binderIdent)) : TermElabM (Option MVarId) := do let done := return (← simpTargetCore g ctx simprocs discharge?).1 let (transp, var, ids') ← match ids with | [] => if more then pure (.reducible, mkHole (← getRef), []) else return ← done | v::ids => pure (.default, v.raw[0], ids) let t ← withTransparency transp g.getType' let n := if var.isIdent then var.getId else `_ let withFVar := fun (fvar, g) ↦ g.withContext do Term.addLocalVarInfo var (mkFVar fvar) let simpTheorems ← ctx.simpTheorems.addTheorem (.fvar fvar) (.fvar fvar) simpIntroCore g { ctx with simpTheorems } simprocs discharge? more ids' match t with | .letE .. => withFVar (← g.intro n) | .forallE (body := body) .. => let (fvar, g) ← g.intro n if body.hasLooseBVars then withFVar (fvar, g) else match (← simpLocalDecl g fvar ctx simprocs discharge?).1 with | none => g.withContext <| Term.addLocalVarInfo var (mkFVar fvar) return none | some g' => withFVar g' | _ => if more && ids.isEmpty then done else throwErrorAt var "simp_intro failed to introduce {var}\n{g}" open Parser.Tactic /-- The `simp_intro` tactic is a combination of `simp` and `intro`: it will simplify the types of variables as it introduces them and uses the new variables to simplify later arguments and the goal. * `simp_intro x y z` introduces variables named `x y z` * `simp_intro x y z ..` introduces variables named `x y z` and then keeps introducing `_` binders * `simp_intro (config := cfg) (discharger := tac) x y .. only [h₁, h₂]`: `simp_intro` takes the same options as `simp` (see `simp`) ``` example : x + 0 = y → x = z := by simp_intro h -- h: x = y ⊢ y = z sorry ``` -/ elab "simp_intro" cfg:(config)? disch:(discharger)? ids:(ppSpace colGt binderIdent)* more:" .."? only:(&" only")? args:(simpArgs)? : tactic => do let args := args.map fun args ↦ ⟨args.raw[1].getArgs⟩ let stx ← `(tactic| simp $(cfg)? $(disch)? $[only%$only]? $[[$args,*]]?) let { ctx, simprocs, dischargeWrapper } ← withMainContext <| mkSimpContext stx (eraseLocal := false) dischargeWrapper.with fun discharge? ↦ do let g ← getMainGoal g.checkNotAssigned `simp_intro g.withContext do let g? ← simpIntroCore g ctx (simprocs := simprocs) discharge? more.isSome ids.toList replaceMainGoal <| if let some g := g? then [g] else []
Tactic\SimpRw.lean
/- Copyright (c) 2020 Anne Baanen. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Anne Baanen, Mario Carneiro, Alex J. Best -/ import Lean /-! # The `simp_rw` tactic This file defines the `simp_rw` tactic: it functions as a mix of `simp` and `rw`. Like `rw`, it applies each rewrite rule in the given order, but like `simp` it repeatedly applies these rules and also under binders like `∀ x, ...`, `∃ x, ...` and `fun x ↦ ...`. -/ namespace Mathlib.Tactic open Lean Parser.Tactic Elab.Tactic /-- A version of `withRWRulesSeq` (in core) that doesn't attempt to find equation lemmas, and simply passes the rw rules on to `x`. -/ def withSimpRWRulesSeq (token : Syntax) (rwRulesSeqStx : Syntax) (x : (symm : Bool) → (term : Syntax) → TacticM Unit) : TacticM Unit := do let lbrak := rwRulesSeqStx[0] let rules := rwRulesSeqStx[1].getArgs -- show initial state up to (incl.) `[` withTacticInfoContext (mkNullNode #[token, lbrak]) (pure ()) let numRules := (rules.size + 1) / 2 for i in [:numRules] do let rule := rules[i * 2]! let sep := rules.getD (i * 2 + 1) Syntax.missing -- show rule state up to (incl.) next `,` withTacticInfoContext (mkNullNode #[rule, sep]) do -- show errors on rule withRef rule do let symm := !rule[0].isNone let term := rule[1] -- let processId (id : Syntax) : TacticM Unit := do x symm term /-- `simp_rw` functions as a mix of `simp` and `rw`. Like `rw`, it applies each rewrite rule in the given order, but like `simp` it repeatedly applies these rules and also under binders like `∀ x, ...`, `∃ x, ...` and `fun x ↦...`. Usage: - `simp_rw [lemma_1, ..., lemma_n]` will rewrite the goal by applying the lemmas in that order. A lemma preceded by `←` is applied in the reverse direction. - `simp_rw [lemma_1, ..., lemma_n] at h₁ ... hₙ` will rewrite the given hypotheses. - `simp_rw [...] at *` rewrites in the whole context: all hypotheses and the goal. Lemmas passed to `simp_rw` must be expressions that are valid arguments to `simp`. For example, neither `simp` nor `rw` can solve the following, but `simp_rw` can: ```lean example {a : ℕ} (h1 : ∀ a b : ℕ, a - 1 ≤ b ↔ a ≤ b + 1) (h2 : ∀ a b : ℕ, a ≤ b ↔ ∀ c, c < a → c < b) : (∀ b, a - 1 ≤ b) = ∀ b c : ℕ, c < a → c < b + 1 := by simp_rw [h1, h2] ``` -/ elab s:"simp_rw " cfg:(config)? rws:rwRuleSeq g:(location)? : tactic => focus do let cfg' : TSyntax `Lean.Parser.Tactic.config ← do match cfg with | Option.none => `(config| (config := ({ failIfUnchanged := false } : Lean.Meta.Simp.Config))) | Option.some c => match c with | `(config| (config := $cfg)) => `(config| (config := ({ ($cfg : Lean.Meta.Simp.Config) with failIfUnchanged := false }))) | _ => throwError "malformed cfg" evalTactic (← `(tactic| simp%$s $cfg' only $g ?)) withSimpRWRulesSeq s rws fun symm term => do evalTactic (← match term with | `(term| $e:term) => if symm then `(tactic| simp%$e $[$cfg]? only [← $e:term] $g ?) else `(tactic| simp%$e $[$cfg]? only [$e:term] $g ?))
Tactic\SlimCheck.lean
/- Copyright (c) 2020 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon, Scott Morrison -/ import Mathlib.Testing.SlimCheck.Testable import Qq /-! ## Finding counterexamples automatically using `slim_check` A proposition can be tested by writing it out as: ```lean example (xs : List ℕ) (w : ∃ x ∈ xs, x < 3) : ∀ y ∈ xs, y < 5 := by slim_check -- =================== -- Found problems! -- xs := [0, 5] -- x := 0 -- y := 5 -- ------------------- example (x : ℕ) (h : 2 ∣ x) : x < 100 := by slim_check -- =================== -- Found problems! -- x := 258 -- ------------------- example (α : Type) (xs ys : List α) : xs ++ ys = ys ++ xs := by slim_check -- =================== -- Found problems! -- α := ℤ -- xs := [-4] -- ys := [1] -- ------------------- example : ∀ x ∈ [1,2,3], x < 4 := by slim_check -- Success ``` In the first example, `slim_check` is called on the following goal: ```lean xs : List ℕ, h : ∃ (x : ℕ) (H : x ∈ xs), x < 3 ⊢ ∀ (y : ℕ), y ∈ xs → y < 5 ``` The local constants are reverted and an instance is found for `Testable (∀ (xs : List ℕ), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5))`. The `Testable` instance is supported by instances of `Sampleable (List ℕ)`, `Decidable (x < 3)` and `Decidable (y < 5)`. `slim_check` builds a `Testable` instance step by step with: ``` - Testable (∀ (xs : List ℕ), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5)) -: Sampleable (List xs) - Testable ((∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5)) - Testable (∀ x ∈ xs, x < 3 → (∀ y ∈ xs, y < 5)) - Testable (x < 3 → (∀ y ∈ xs, y < 5)) -: Decidable (x < 3) - Testable (∀ y ∈ xs, y < 5) -: Decidable (y < 5) ``` `Sampleable (List ℕ)` lets us create random data of type `List ℕ` in a way that helps find small counter-examples. Next, the test of the proposition hinges on `x < 3` and `y < 5` to both be decidable. The implication between the two could be tested as a whole but it would be less informative. Indeed, if we generate lists that only contain numbers greater than `3`, the implication will always trivially hold but we should conclude that we haven't found meaningful examples. Instead, when `x < 3` does not hold, we reject the example (i.e. we do not count it toward the 100 required positive examples) and we start over. Therefore, when `slim_check` prints `Success`, it means that a hundred suitable lists were found and successfully tested. If no counter-examples are found, `slim_check` behaves like `admit`. `slim_check` can also be invoked using `#eval`: ```lean #eval SlimCheck.Testable.check (∀ (α : Type) (xs ys : List α), xs ++ ys = ys ++ xs) -- =================== -- Found problems! -- α := ℤ -- xs := [-4] -- ys := [1] -- ------------------- ``` For more information on writing your own `Sampleable` and `Testable` instances, see `Testing.SlimCheck.Testable`. -/ open Lean Elab Meta Tactic open Parser.Tactic open Qq open SlimCheck Decorations initialize registerTraceClass `slim_check.instance initialize registerTraceClass `slim_check.decoration initialize registerTraceClass `slim_check.discarded initialize registerTraceClass `slim_check.success initialize registerTraceClass `slim_check.shrink.steps initialize registerTraceClass `slim_check.shrink.candidates /-- `slim_check` considers a proof goal and tries to generate examples that would contradict the statement. Let's consider the following proof goal. ```lean xs : List ℕ, h : ∃ (x : ℕ) (H : x ∈ xs), x < 3 ⊢ ∀ (y : ℕ), y ∈ xs → y < 5 ``` The local constants will be reverted and an instance will be found for `Testable (∀ (xs : List ℕ), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5))`. The `Testable` instance is supported by an instance of `Sampleable (List ℕ)`, `Decidable (x < 3)` and `Decidable (y < 5)`. Examples will be created in ascending order of size (more or less) The first counter-examples found will be printed and will result in an error: ``` =================== Found problems! xs := [1, 28] x := 1 y := 28 ------------------- ``` If `slim_check` successfully tests 100 examples, it acts like admit. If it gives up or finds a counter-example, it reports an error. For more information on writing your own `Sampleable` and `Testable` instances, see `Testing.SlimCheck.Testable`. Optional arguments given with `slim_check (config : { ... })` * `numInst` (default 100): number of examples to test properties with * `maxSize` (default 100): final size argument Options: * `set_option trace.slim_check.decoration true`: print the proposition with quantifier annotations * `set_option trace.slim_check.discarded true`: print the examples discarded because they do not satisfy assumptions * `set_option trace.slim_check.shrink.steps true`: trace the shrinking of counter-example * `set_option trace.slim_check.shrink.candidates true`: print the lists of candidates considered when shrinking each variable * `set_option trace.slim_check.instance true`: print the instances of `testable` being used to test the proposition * `set_option trace.slim_check.success true`: print the tested samples that satisfy a property -/ syntax (name := slimCheckSyntax) "slim_check" (config)? : tactic elab_rules : tactic | `(tactic| slim_check $[$cfg]?) => withMainContext do let cfg ← elabConfig (mkOptionalNode cfg) let (_, g) ← (← getMainGoal).revert ((← getLocalHyps).map (Expr.fvarId!)) g.withContext do let tgt ← g.getType let tgt' ← addDecorations tgt let cfg := { cfg with traceDiscarded := cfg.traceDiscarded || (← isTracingEnabledFor `slim_check.discarded), traceSuccesses := cfg.traceSuccesses || (← isTracingEnabledFor `slim_check.success), traceShrink := cfg.traceShrink || (← isTracingEnabledFor `slim_check.shrink.steps), traceShrinkCandidates := cfg.traceShrinkCandidates || (← isTracingEnabledFor `slim_check.shrink.candidates) } let inst ← try synthInstance (← mkAppM ``Testable #[tgt']) catch _ => throwError "\ Failed to create a `testable` instance for `{tgt}`.\ \nWhat to do:\ \n1. make sure that the types you are using have `SlimCheck.SampleableExt` instances\ \n (you can use `#sample my_type` if you are unsure);\ \n2. make sure that the relations and predicates that your proposition use are decidable;\ \n3. make sure that instances of `SlimCheck.Testable` exist that, when combined,\ \n apply to your decorated proposition:\ \n```\ \n{tgt'}\ \n```\ \n\ \nUse `set_option trace.Meta.synthInstance true` to understand what instances are missing.\ \n\ \nTry this:\ \nset_option trace.Meta.synthInstance true\ \n#synth SlimCheck.Testable ({tgt'})" let e ← mkAppOptM ``Testable.check #[tgt, toExpr cfg, tgt', inst] trace[slim_check.decoration] "[testable decoration]\n {tgt'}" -- Porting note: I have not ported support for `trace.slim_check.instance`. -- See the commented out code below from mathlib3 if you would like to implement this. -- when_tracing `slim_check.instance <| do -- { inst ← summarize_instance inst >>= pp, -- trace!"\n[testable instance]{format.indent inst 2}" }, let code ← unsafe evalExpr (CoreM PUnit) q(CoreM PUnit) e _ ← code admitGoal g -- Porting note: below is the remaining code from mathlib3 which supports the -- `trace.slim_check.instance` trace option, and which has not been ported. -- namespace tactic.interactive -- open tactic slim_check -- open expr -- /-- Tree structure representing a `testable` instance. -/ -- meta inductive instance_tree -- | node : name → expr → list instance_tree → instance_tree -- /-- Gather information about a `testable` instance. Given -- an expression of type `testable ?p`, gather the -- name of the `testable` instances that it is built from -- and the proposition that they test. -/ -- meta def summarize_instance : expr → tactic instance_tree -- | (lam n bi d b) := do -- v ← mk_local' n bi d, -- summarize_instance <| b.instantiate_var v -- | e@(app f x) := do -- `(testable %%p) ← infer_type e, -- xs ← e.get_app_args.mmap_filter (try_core ∘ summarize_instance), -- pure <| instance_tree.node e.get_app_fn.const_name p xs -- | e := do -- failed -- /-- format an `instance_tree` -/ -- meta def instance_tree.to_format : instance_tree → tactic format -- | (instance_tree.node n p xs) := do -- xs ← format.join <$> (xs.mmap <| λ t, flip format.indent 2 <$> instance_tree.to_format t), -- ys ← pformat!"testable ({p})", -- pformat!"+ {n} :{format.indent ys 2}\n{xs}" -- meta instance instance_tree.has_to_tactic_format : has_to_tactic_format instance_tree := -- ⟨ instance_tree.to_format ⟩
Tactic\SplitIfs.lean
/- Copyright (c) 2018 Gabriel Ebner. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Gabriel Ebner, David Renshaw -/ import Lean import Mathlib.Tactic.Core /-! Tactic to split if-then-else expressions. -/ namespace Mathlib.Tactic open Lean Elab.Tactic Parser.Tactic Lean.Meta /-- A position where a split may apply. -/ private inductive SplitPosition | target | hyp (fvarId: FVarId) /-- Collects a list of positions pointed to by `loc` and their types. -/ private def getSplitCandidates (loc : Location) : TacticM (List (SplitPosition × Expr)) := match loc with | Location.wildcard => do let candidates ← (← getLCtx).getFVarIds.mapM (fun fvarId ↦ do let typ ← instantiateMVars (← inferType (mkFVar fvarId)) return (SplitPosition.hyp fvarId, typ)) pure ((SplitPosition.target, ← getMainTarget) :: candidates.toList) | Location.targets hyps tgt => do let candidates ← (← hyps.mapM getFVarId).mapM (fun fvarId ↦ do let typ ← instantiateMVars (← inferType (mkFVar fvarId)) return (SplitPosition.hyp fvarId, typ)) if tgt then return (SplitPosition.target, ← getMainTarget) :: candidates.toList else return candidates.toList /-- Return the condition and decidable instance of an `if` expression to case split. -/ private partial def findIfToSplit? (e : Expr) : Option (Expr × Expr) := match e.find? fun e => (e.isIte || e.isDIte) && !(e.getArg! 1 5).hasLooseBVars with | some iteApp => let cond := iteApp.getArg! 1 5 let dec := iteApp.getArg! 2 5 -- Try to find a nested `if` in `cond` findIfToSplit? cond |>.getD (cond, dec) | none => none /-- Finds an if condition to split. If successful, returns the position and the condition. -/ private def findIfCondAt (loc : Location) : TacticM (Option (SplitPosition × Expr)) := do for (pos, e) in (← getSplitCandidates loc) do if let some (cond, _) := findIfToSplit? e then return some (pos, cond) return none /-- `Simp.Discharge` strategy to use in `reduceIfsAt`. Delegates to `SplitIf.discharge?`, and additionally supports discharging `True`, to better match the behavior of mathlib3's `split_ifs`. -/ private def discharge? (e : Expr) : SimpM (Option Expr) := do let e ← instantiateMVars e if let some e1 ← (← SplitIf.mkDischarge? false) e then return some e1 if e.isConstOf `True then return some (mkConst `True.intro) return none /-- Simplifies if-then-else expressions after cases have been split out. -/ private def reduceIfsAt (loc : Location) : TacticM Unit := do let ctx ← SplitIf.getSimpContext let ctx := { ctx with config := { ctx.config with failIfUnchanged := false } } let _ ← simpLocation ctx {} discharge? loc pure () /-- Splits a single if-then-else expression and then reduces the resulting goals. Has a similar effect as `SplitIf.splitIfTarget?` or `SplitIf.splitIfLocalDecl?` from core Lean 4. We opt not to use those library functions so that we can better mimic the behavior of mathlib3's `split_ifs`. -/ private def splitIf1 (cond : Expr) (hName : Name) (loc : Location) : TacticM Unit := do let splitCases := evalTactic (← `(tactic| by_cases $(mkIdent hName) : $(← Elab.Term.exprToSyntax cond))) andThenOnSubgoals splitCases (reduceIfsAt loc) /-- Pops off the front of the list of names, or generates a fresh name if the list is empty. -/ private def getNextName (hNames: IO.Ref (List (TSyntax `Lean.binderIdent))) : MetaM Name := do match ← hNames.get with | [] => mkFreshUserName `h | n::ns => do hNames.set ns if let `(binderIdent| $x:ident) := n then pure x.getId else pure `_ /-- Returns `true` if the condition or its negation already appears as a hypothesis. -/ private def valueKnown (cond : Expr) : TacticM Bool := do let not_cond := mkApp (mkConst `Not) cond for h in ← getLocalHyps do let ty ← instantiateMVars (← inferType h) if cond == ty then return true if not_cond == ty then return true return false /-- Main loop of split_ifs. Pulls names for new hypotheses from `hNames`. Stops if it encounters a condition in the passed-in `List Expr`. -/ private partial def splitIfsCore (loc : Location) (hNames : IO.Ref (List (TSyntax `Lean.binderIdent))) : List Expr → TacticM Unit := fun done ↦ withMainContext do let some (_,cond) ← findIfCondAt loc | Meta.throwTacticEx `split_ifs (← getMainGoal) "no if-then-else conditions to split" -- If `cond` is `¬p` then use `p` instead. let cond := if cond.isAppOf `Not then cond.getAppArgs[0]! else cond if done.contains cond then return () let no_split ← valueKnown cond if no_split then andThenOnSubgoals (reduceIfsAt loc) (splitIfsCore loc hNames (cond::done) <|> pure ()) else do let hName ← getNextName hNames andThenOnSubgoals (splitIf1 cond hName loc) ((splitIfsCore loc hNames (cond::done)) <|> pure ()) /-- Splits all if-then-else-expressions into multiple goals. Given a goal of the form `g (if p then x else y)`, `split_ifs` will produce two goals: `p ⊢ g x` and `¬p ⊢ g y`. If there are multiple ite-expressions, then `split_ifs` will split them all, starting with a top-most one whose condition does not contain another ite-expression. `split_ifs at *` splits all ite-expressions in all hypotheses as well as the goal. `split_ifs with h₁ h₂ h₃` overrides the default names for the hypotheses. -/ syntax (name := splitIfs) "split_ifs" (location)? (" with" (ppSpace colGt binderIdent)+)? : tactic elab_rules : tactic | `(tactic| split_ifs $[$loc:location]? $[with $withArg*]?) => let loc := match loc with | none => Location.targets #[] true | some loc => expandLocation loc let names := match withArg with | none => [] | some args => args.toList withMainContext do let names ← IO.mkRef names splitIfsCore loc names [] for name in ← names.get do logWarningAt name m!"unused name: {name}"
Tactic\Spread.lean
/- Copyright (c) 2021 Gabriel Ebner. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Gabriel Ebner -/ import Lean.Elab.Binders /-! # Macro for spread syntax (`__ := instSomething`) in structures. -/ open Lean Parser.Term Macro /- This adds support for structure instance spread syntax. ```lean instance : Foo α where __ := instSomething -- include fields from `instSomething` example : Foo α := { __ := instSomething -- include fields from `instSomething` } ``` -/ /-- Mathlib extension to preserve old behavior of structure instances. We need to be able to `let` some implementation details that are still local instances. Normally implementation detail fvars are not local instances, but we need them to be implementation details so that `simp` will see them as "reducible" fvars. -/ syntax (name := letImplDetailStx) "let_impl_detail " ident " := " term "; " term : term open Lean Elab Term Meta @[term_elab letImplDetailStx, inherit_doc letImplDetailStx] def elabLetImplDetail : TermElab := fun stx expectedType? => match stx with | `(let_impl_detail $id := $valStx; $body) => do let val ← elabTerm valStx none let type ← inferType val trace[Elab.let.decl] "{id.getId} : {type} := {val}" let result ← withLetDecl id.getId (kind := .default) type val fun x => do addLocalVarInfo id x let lctx ← getLCtx let lctx := lctx.modifyLocalDecl x.fvarId! fun decl => decl.setKind .implDetail withLCtx lctx (← getLocalInstances) do let body ← elabTermEnsuringType body expectedType? let body ← instantiateMVars body mkLetFVars #[x] body (usedLetOnly := false) pure result | _ => throwUnsupportedSyntax macro_rules | `({ $[$srcs,* with]? $[$fields],* $[: $ty?]? }) => show MacroM Term from do let mut spreads := #[] let mut newFields := #[] for field in fields do match field.1 with | `(structInstField| $name:ident := $arg) => if name.getId.eraseMacroScopes == `__ then do spreads := spreads.push arg else newFields := newFields.push field | `(structInstFieldAbbrev| $_:ident) => newFields := newFields.push field | _ => throwUnsupported if spreads.isEmpty then throwUnsupported let spreadData ← withFreshMacroScope <| spreads.mapIdxM fun i spread => do let n := Name.num `__spread i return (mkIdent <| ← Macro.addMacroScope n, spread) let srcs := (srcs.map (·.getElems)).getD {} ++ spreadData.map Prod.fst let body ← `({ $srcs,* with $[$newFields],* $[: $ty?]? }) spreadData.foldrM (init := body) fun (id, val) body => `(let_impl_detail $id := $val; $body)
Tactic\Subsingleton.lean
/- Copyright (c) 2024 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import Mathlib.Logic.Basic /-! # `subsingleton` tactic The `subsingleton` tactic closes `Eq` or `HEq` goals using an argument that the types involved are subsingletons. To first approximation, it does `apply Subsingleton.elim` but it also will try `proof_irrel_heq`, and it is careful not to accidentally specialize `Sort _` to `Prop. -/ open Lean Meta /-- Returns the expression `Subsingleton ty`. -/ def Lean.Meta.mkSubsingleton (ty : Expr) : MetaM Expr := do let u ← getLevel ty return Expr.app (.const ``Subsingleton [u]) ty /-- Synthesizes a `Subsingleton ty` instance with the additional local instances made available. -/ def Lean.Meta.synthSubsingletonInst (ty : Expr) (insts : Array (Term × AbstractMVarsResult) := #[]) : MetaM Expr := do -- Synthesize a subsingleton instance. The new metacontext depth ensures that universe -- level metavariables are not specialized. withNewMCtxDepth do -- We need to process the local instances *under* `withNewMCtxDepth` since they might -- have universe parameters, which we need to let `synthInstance` assign to. let (insts', uss) ← Array.unzip <$> insts.mapM fun inst => do let us ← inst.2.paramNames.mapM fun _ => mkFreshLevelMVar pure <| (inst.2.expr.instantiateLevelParamsArray inst.2.paramNames us, us) withLocalDeclsD (insts'.map fun e => (`inst, fun _ => inferType e)) fun fvars => do withNewLocalInstances fvars 0 do let res ← instantiateMVars <| ← synthInstance <| ← mkSubsingleton ty let res' := res.abstract fvars for i in [0 : fvars.size] do if res'.hasLooseBVar (fvars.size - i - 1) then uss[i]!.forM fun u => do let u ← instantiateLevelMVars u if u.isMVar then -- This shouldn't happen, `synthInstance` should solve for all level metavariables throwErrorAt insts[i]!.1 "\ Instance provided to 'subsingleton' has unassigned universe level metavariable\ {indentD insts'[i]!}" else -- Unused local instance. -- Not logging a warning since this might be `... <;> subsingleton [...]` pure () instantiateMVars <| res'.instantiateRev insts' /-- Closes the goal `g` whose target is an `Eq` or `HEq` by appealing to the fact that the types are subsingletons. Fails if it cannot find a way to do this. Has support for showing `BEq` instances are equal if they have `LawfulBEq` instances. -/ def Lean.MVarId.subsingleton (g : MVarId) (insts : Array (Term × AbstractMVarsResult) := #[]) : MetaM Unit := commitIfNoEx do let g ← g.heqOfEq g.withContext do let tgt ← whnfR (← g.getType) if let some (ty, x, y) := tgt.eq? then -- Proof irrelevance. This is not necessary since `rfl` suffices, -- but propositions are subsingletons so we may as well. if ← Meta.isProp ty then g.assign <| mkApp3 (.const ``proof_irrel []) ty x y return -- Try `Subsingleton.elim` let u ← getLevel ty try let inst ← synthSubsingletonInst ty insts g.assign <| mkApp4 (.const ``Subsingleton.elim [u]) ty inst x y return catch _ => pure () -- Try `lawful_beq_subsingleton` let ty' ← whnfR ty if ty'.isAppOfArity ``BEq 1 then let α := ty'.appArg! try let some u' := u.dec | failure let xInst ← withNewMCtxDepth <| Meta.synthInstance <| mkApp2 (.const ``LawfulBEq [u']) α x let yInst ← withNewMCtxDepth <| Meta.synthInstance <| mkApp2 (.const ``LawfulBEq [u']) α y g.assign <| mkApp5 (.const ``lawful_beq_subsingleton [u']) α x y xInst yInst return catch _ => pure () throwError "\ tactic 'subsingleton' could not prove equality since it could not synthesize\ {indentD (← mkSubsingleton ty)}" else if let some (xTy, x, yTy, y) := tgt.heq? then -- The HEq version of proof irrelevance. if ← (Meta.isProp xTy <&&> Meta.isProp yTy) then g.assign <| mkApp4 (.const ``proof_irrel_heq []) xTy yTy x y return throwError "tactic 'subsingleton' could not prove heterogenous equality" throwError "tactic 'subsingleton' failed, goal is neither an equality nor heterogenous equality" namespace Mathlib.Tactic /-- The `subsingleton` tactic tries to prove a goal of the form `x = y` or `HEq x y` using the fact that the types involved are *subsingletons* (a type with exactly zero or one terms). To a first approximation, it does `apply Subsingleton.elim`. As a nicety, `subsingleton` first runs the `intros` tactic. - If the goal is an equality, it either closes the goal or fails. - `subsingleton [inst1, inst2, ...]` can be used to add additional `Subsingleton` instances to the local context. This can be more flexible than `have := inst1; have := inst2; ...; subsingleton` since the tactic does not require that all placeholders be solved for. Techniques the `subsingleton` tactic can apply: - proof irrelevance - heterogenous proof irrelevance (via `proof_irrel_heq`) - using `Subsingleton` (via `Subsingleton.elim`) - proving `BEq` instances are equal if they are both lawful (via `lawful_beq_subsingleton`) ### Properties The tactic is careful not to accidentally specialize `Sort _` to `Prop`, avoiding the following surprising behavior of `apply Subsingleton.elim`: ```lean example (α : Sort _) (x y : α) : x = y := by apply Subsingleton.elim ``` The reason this `example` goes through is that it applies the `∀ (p : Prop), Subsingleton p` instance, specializing the universe level metavariable in `Sort _` to `0`. -/ syntax (name := subsingletonStx) "subsingleton" (ppSpace "[" term,* "]")? : tactic open Elab Tactic /-- Elaborates the terms like how `Lean.Elab.Tactic.addSimpTheorem` does, abstracting their metavariables. -/ def elabSubsingletonInsts (instTerms? : Option (Array Term)) : TermElabM (Array (Term × AbstractMVarsResult)) := do if let some instTerms := instTerms? then go instTerms.toList #[] else return #[] where /-- Main loop for `addSubsingletonInsts`. -/ go (instTerms : List Term) (insts : Array (Term × AbstractMVarsResult)) : TermElabM (Array (Term × AbstractMVarsResult)) := do match instTerms with | [] => return insts | instTerm :: instTerms => let inst ← withNewMCtxDepth <| Term.withoutModifyingElabMetaStateWithInfo do withRef instTerm <| Term.withoutErrToSorry do let e ← Term.elabTerm instTerm none Term.synthesizeSyntheticMVars (postpone := .no) (ignoreStuckTC := true) let e ← instantiateMVars e unless (← isClass? (← inferType e)).isSome do throwError "Not an instance. Term has type{indentD <| ← inferType e}" if e.hasMVar then let r ← abstractMVars e -- Change all instance arguments corresponding to the mvars to be inst implicit. let e' ← forallBoundedTelescope (← inferType r.expr) r.numMVars fun args _ => do let newBIs ← args.filterMapM fun arg => do if (← isClass? (← inferType arg)).isSome then return some (arg.fvarId!, .instImplicit) else return none withNewBinderInfos newBIs do mkLambdaFVars args (r.expr.beta args) pure { r with expr := e' } else pure { paramNames := #[], numMVars := 0, expr := e } go instTerms (insts.push (instTerm, inst)) elab_rules : tactic | `(tactic| subsingleton $[[$[$instTerms?],*]]?) => withMainContext do let recover := (← read).recover let insts ← elabSubsingletonInsts instTerms? Elab.Tactic.liftMetaTactic1 fun g => do let (fvars, g) ← g.intros -- note: `insts` are still valid after `intros` try g.subsingleton (insts := insts) return none catch e => -- Try `refl` when all else fails, to give a hint to the user if recover then try g.refl <|> g.hrefl let tac ← if !fvars.isEmpty then `(tactic| (intros; rfl)) else `(tactic| rfl) Meta.Tactic.TryThis.addSuggestion (← getRef) tac (origSpan? := ← getRef) return none catch _ => pure () throw e end Mathlib.Tactic
Tactic\Substs.lean
/- Copyright (c) 2022 Evan Lohn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Evan Lohn, Mario Carneiro -/ import Lean /-! # The `substs` macro The `substs` macro applies the `subst` tactic to a list of hypothesis, in left to right order. -/ namespace Mathlib.Tactic.Substs /-- Applies the `subst` tactic to all given hypotheses from left to right. -/ syntax (name := substs) "substs" (colGt ppSpace ident)* : tactic macro_rules | `(tactic| substs $xs:ident*) => `(tactic| ($[subst $xs]*))
Tactic\SuccessIfFailWithMsg.lean
/- Copyright (c) 2017 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Simon Hudon, Sébastien Gouëzel, Scott Morrison, Thomas Murrills -/ import Lean /-! # Success If Fail With Message This file implements a tactic that succeeds only if its argument fails with a specified message. It's mostly useful in tests, where we want to make sure that tactics fail in certain ways under circumstances. -/ open Lean Elab Meta Tactic Syntax namespace Mathlib.Tactic /-- `success_if_fail_with_msg msg tacs` runs `tacs` and succeeds only if they fail with the message `msg`. `msg` can be any term that evaluates to an explicit `String`. -/ syntax (name := successIfFailWithMsg) "success_if_fail_with_msg " term:max tacticSeq : tactic /-- Evaluates `tacs` and succeeds only if `tacs` both fails and throws an error equal (as a string) to `msg`. -/ def successIfFailWithMessage {s α : Type} {m : Type → Type} [Monad m] [MonadLiftT IO m] [MonadBacktrack s m] [MonadError m] (msg : String) (tacs : m α) (ref : Option Syntax := none) : m Unit := do let s ← saveState let err ← try _ ← tacs; pure none catch err => pure (some (← err.toMessageData.toString)) restoreState s if let some err := err then unless msg.trim == err.trim do if let some ref := ref then throwErrorAt ref "tactic '{ref}' failed, but got different error message:\n\n{err}" else throwError "tactic failed, but got different error message:\n\n{err}" else if let some ref := ref then throwErrorAt ref "tactic '{ref}' succeeded, but was expected to fail" else throwError "tactic succeeded, but was expected to fail" elab_rules : tactic | `(tactic| success_if_fail_with_msg $msg:term $tacs:tacticSeq) => Term.withoutErrToSorry <| withoutRecover do let msg ← unsafe Term.evalTerm String (.const ``String []) msg successIfFailWithMessage msg (evalTacticSeq tacs) tacs
Tactic\SudoSetOption.lean
/- Copyright (c) 2021 Gabriel Ebner. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Gabriel Ebner -/ import Lean.Elab.ElabRules /-! # Defines the `sudo set_option` command. Allows setting undeclared options. -/ open Lean Elab private def setOption {m : Type → Type} [Monad m] [MonadError m] (name val : Syntax) (opts : Options) : m Options := do let val ← match val with | Syntax.ident _ _ `true _ => pure <| DataValue.ofBool true | Syntax.ident _ _ `false _ => pure <| DataValue.ofBool false | _ => match val.isNatLit? with | some num => pure <| DataValue.ofNat num | none => match val.isStrLit? with | some str => pure <| DataValue.ofString str | none => throwError "unsupported option value {val}" pure <| opts.insert name.getId val open Elab.Command in /-- The command `sudo set_option name val` is similar to `set_option name val`, but it also allows to set undeclared options. -/ elab "sudo " "set_option " n:ident ppSpace val:term : command => do let options ← setOption n val (← getOptions) modify fun s ↦ { s with maxRecDepth := maxRecDepth.get options } modifyScope fun scope ↦ { scope with opts := options } open Elab.Term in /-- The command `sudo set_option name val in term` is similar to `set_option name val in term`, but it also allows to set undeclared options. -/ elab "sudo " "set_option " n:ident ppSpace val:term " in " body:term : term <= expectedType => do let options ← setOption n val (← getOptions) withTheReader Core.Context (fun ctx ↦ { ctx with maxRecDepth := maxRecDepth.get options, options := options }) do elabTerm body expectedType /- sudo set_option trace.Elab.resuming true in #check 4 #check sudo set_option trace.Elab.resuming true in by exact 4 -/
Tactic\SuppressCompilation.lean
/- Copyright (c) 2023 Sébastien Gouëzel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Alex J. Best, Mac Malone -/ import Lean.Elab.Declaration import Lean.Elab.Notation /-! # Supressing compilation to executable code in a file or in a section Currently, the compiler may spend a lot of time trying to produce executable code for complicated definitions. This is a waste of resources for definitions in area of mathematics that will never lead to executable code. The command `suppress_compilation` is a hack to disable code generation on all definitions (in a section or in a whole file). See the issue mathlib4#7103 To compile a definition even when `suppress_compilation` is active, use `unsuppress_compilation in def foo : ...`. This is activated by default on notations to make sure that they work properly. Note that `suppress_compilation` does not work with `notation3`. You need to prefix such a notation declaration with `unsuppress_compilation` if `suppress_compilation` is active. -/ open Lean Parser Elab Command /-- Replacing `def` and `instance` by `noncomputable def` and `noncomputable instance`, designed to disable the compiler in a given file or a given section. This is a hack to work around mathlib4#7103. -/ def elabSuppressCompilationDecl : CommandElab := fun | `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)? $(recKind?)? def $id $sig:optDeclSig $val:declVal) => do elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)? $(recKind?)? def $id $sig:optDeclSig $val:declVal) | `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)? $(recKind?)? def $id $sig:optDeclSig $val:declVal deriving $derivs,*) => do elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)? $(recKind?)? def $id $sig:optDeclSig $val:declVal deriving $derivs,*) | `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)? $(recKind?)? $(attrKind?)? instance $(prio?)? $(id?)? $sig:declSig $val:declVal) => do elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)? $(recKind?)? $(attrKind?)? instance $(prio?)? $(id?)? $sig:declSig $val:declVal) | `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)? $(recKind?)? example $sig:optDeclSig $val:declVal) => do elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)? $(recKind?)? example $sig:optDeclSig $val:declVal) | `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)? $(recKind?)? abbrev $id $sig:optDeclSig $val:declVal) => do elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)? $(recKind?)? abbrev $id $sig:optDeclSig $val:declVal) | _ => throwUnsupportedSyntax /-- The command `unsuppress_compilation in def foo : ...` makes sure that the definition is compiled to executable code, even if `suppress_compilation` is active. -/ syntax "unsuppress_compilation" (" in " command)? : command /-- Make sure that notations are compiled, even if `suppress_compilation` is active, by prepending them with `unsuppress_compilation`. -/ def expandSuppressCompilationNotation : Macro := fun | `($[$doc?:docComment]? $(attrs?)? $(attrKind)? notation $(prec?)? $(name?)? $(prio?)? $items* => $v) => do let defn ← expandNotation <| ← `($[$doc?:docComment]? $(attrs?)? $(attrKind)? notation $(prec?)? $(name?)? $(prio?)? $items* => $v) `(unsuppress_compilation in $(⟨defn⟩):command) | _ => Macro.throwUnsupported /-- Replacing `def` and `instance` by `noncomputable def` and `noncomputable instance`, designed to disable the compiler in a given file or a given section. This is a hack to work around mathlib4#7103. Note that it does not work with `notation3`. You need to prefix such a notation declaration with `unsuppress_compilation` if `suppress_compilation` is active. -/ macro "suppress_compilation" : command => do let declKind := mkIdent ``declaration let notaKind := mkIdent ``«notation» let declElab := mkCIdent ``elabSuppressCompilationDecl let notaMacro := mkCIdent ``expandSuppressCompilationNotation `( attribute [local command_elab $declKind] $declElab attribute [local macro $notaKind] $notaMacro ) /-- The command `unsuppress_compilation in def foo : ...` makes sure that the definition is compiled to executable code, even if `suppress_compilation` is active. -/ macro_rules | `(unsuppress_compilation $[in $cmd?]?) => do let declElab := mkCIdent ``elabSuppressCompilationDecl let notaMacro := mkCIdent ``expandSuppressCompilationNotation let attrCmds ← `( attribute [-command_elab] $declElab attribute [-macro] $notaMacro ) if let some cmd := cmd? then `($attrCmds:command $cmd:command suppress_compilation) else return attrCmds
Tactic\SwapVar.lean
/- Copyright (c) 2022 Arthur Paulino. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Arthur Paulino -/ import Lean.Elab.ElabRules import Mathlib.Util.Tactic /-! # Defines the `swap_var` tactic Swap the names of two hypotheses. -/ open Lean Meta Elab.Tactic namespace Mathlib.Tactic /-- The parser for swap rules -/ syntax swapRule := ident " ↔"? ppSpace ident /-- `swap_var swap_rule₁, swap_rule₂, ⋯` applies `swap_rule₁` then `swap_rule₂` then `⋯`. A *swap_rule* is of the form `x y` or `x ↔ y`, and "applying it" means swapping the variable name `x` by `y` and vice-versa on all hypotheses and the goal. ```lean example {P Q : Prop} (q : P) (p : Q) : P ∧ Q := by swap_var p ↔ q exact ⟨p, q⟩ ``` -/ elab "swap_var " swapRules:(colGt swapRule),+ : tactic => do let mvarId ← getMainGoal let mdecl ← mvarId.getDecl let localInstances := mdecl.localInstances let lctx ← swapRules.getElems.foldlM (init := mdecl.lctx) fun lctx swapRule ↦ do withLCtx lctx localInstances do let `(swapRule| $n₁:ident $[↔]? $n₂:ident) := swapRule | unreachable! let n₁ := n₁.getId let n₂ := n₂.getId let fvarId₁ := (← getLocalDeclFromUserName n₁).fvarId let fvarId₂ := (← getLocalDeclFromUserName n₂).fvarId return lctx.setUserName fvarId₁ n₂ |>.setUserName fvarId₂ n₁ let mdecl := { mdecl with lctx := lctx } modifyMCtx fun mctx ↦ { mctx with decls := mctx.decls.insert mvarId mdecl }
Tactic\Tauto.lean
/- Copyright (c) 2018 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon, David Renshaw -/ import Mathlib.Tactic.CasesM import Mathlib.Tactic.Core import Mathlib.Lean.Elab.Tactic.Basic import Mathlib.Logic.Basic import Qq import Batteries.Tactic.Classical /-! The `tauto` tactic. -/ namespace Mathlib.Tactic.Tauto open Lean Elab.Tactic Parser.Tactic Lean.Meta MVarId Batteries.Tactic open Qq initialize registerTraceClass `tauto /-- Tries to apply de-Morgan-like rules on a hypothesis. -/ def distribNotOnceAt (hypFVar : Expr) (g : MVarId) : MetaM AssertAfterResult := g.withContext do let .fvar fvarId := hypFVar | throwError "not fvar {hypFVar}" let h ← fvarId.getDecl let e : Q(Prop) ← (do guard <| ← Meta.isProp h.type; pure h.type) let replace (p : Expr) : MetaM AssertAfterResult := do commitIfNoEx do let result ← g.assertAfter fvarId h.userName (← inferType p) p /- We attempt to clear the old hypothesis. Doing so is crucial for avoiding infinite loops. On failure, we roll back the MetaM state and ignore this hypothesis. See https://github.com/leanprover-community/mathlib4/issues/10590. -/ let newGoal ← result.mvarId.clear fvarId return { result with mvarId := newGoal } match e with | ~q(¬ ($a : Prop) = $b) => do let h' : Q(¬$a = $b) := h.toExpr replace q(mt propext $h') | ~q(($a : Prop) = $b) => do let h' : Q($a = $b) := h.toExpr replace q(Eq.to_iff $h') | ~q(¬ (($a : Prop) ∧ $b)) => do let h' : Q(¬($a ∧ $b)) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $b) : Q(Type)) replace q(Decidable.not_and_iff_or_not_not'.mp $h') | ~q(¬ (($a : Prop) ∨ $b)) => do let h' : Q(¬($a ∨ $b)) := h.toExpr replace q(not_or.mp $h') | ~q(¬ (($a : Prop) ≠ $b)) => do let h' : Q(¬($a ≠ $b)) := h.toExpr let _inst ← synthInstanceQ (q(Decidable ($a = $b)) : Q(Type)) replace q(Decidable.of_not_not $h') | ~q(¬¬ ($a : Prop)) => do let h' : Q(¬¬$a) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $a) : Q(Type)) replace q(Decidable.of_not_not $h') | ~q(¬ ((($a : Prop)) → $b)) => do let h' : Q(¬($a → $b)) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $a) : Q(Type)) replace q(Decidable.not_imp_iff_and_not.mp $h') | ~q(¬ (($a : Prop) ↔ $b)) => do let h' : Q(¬($a ↔ $b)) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $b) : Q(Type)) replace q(Decidable.not_iff.mp $h') | ~q(($a : Prop) ↔ $b) => do let h' : Q($a ↔ $b) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $b) : Q(Type)) replace q(Decidable.iff_iff_and_or_not_and_not.mp $h') | ~q((((($a : Prop)) → False) : Prop)) => throwError "distribNot found nothing to work on with negation" | ~q((((($a : Prop)) → $b) : Prop)) => do let h' : Q($a → $b) := h.toExpr let _inst ← synthInstanceQ (q(Decidable $a) : Q(Type)) replace q(Decidable.not_or_of_imp $h') | _ => throwError "distribNot found nothing to work on" /-- State of the `distribNotAt` function. We need to carry around the list of remaining hypothesis as fvars so that we can incrementally apply the `AssertAfterResult.subst` from each step to each of them. Otherwise, they could end up referring to old hypotheses. -/ structure DistribNotState where /-- The list of hypothesis left to work on, renamed to be up-to-date with the current goal. -/ fvars : List Expr /-- The current goal. -/ currentGoal : MVarId /-- Calls `distribNotAt` on the head of `state.fvars` up to `nIters` times, returning early on failure. -/ partial def distribNotAt (nIters : Nat) (state : DistribNotState) : MetaM DistribNotState := match nIters, state.fvars with | 0, _ | _, [] => pure state | n + 1, fv::fvs => do try let result ← distribNotOnceAt fv state.currentGoal let newFVars := mkFVar result.fvarId :: fvs.map (fun x ↦ result.subst.apply x) distribNotAt n ⟨newFVars, result.mvarId⟩ catch _ => pure state /-- For each fvar in `fvars`, calls `distribNotAt` and carries along the resulting renamings. -/ partial def distribNotAux (fvars : List Expr) (g : MVarId) : MetaM MVarId := match fvars with | [] => pure g | _ => do let result ← distribNotAt 3 ⟨fvars, g⟩ distribNotAux result.fvars.tail! result.currentGoal /-- Tries to apply de-Morgan-like rules on all hypotheses. Always succeeds, regardless of whether any progress was actually made. -/ def distribNot : TacticM Unit := withMainContext do let mut fvars := [] for h in ← getLCtx do if !h.isImplementationDetail then fvars := mkFVar h.fvarId :: fvars liftMetaTactic' (distribNotAux fvars) /-- Config for the `tauto` tactic. Currently empty. TODO: add `closer` option. -/ structure Config /-- Function elaborating `Config`. -/ declare_config_elab elabConfig Config /-- Matches propositions where we want to apply the `constructor` tactic in the core loop of `tauto`. -/ def coreConstructorMatcher (e : Q(Prop)) : MetaM Bool := match e with | ~q(_ ∧ _) => pure true | ~q(_ ↔ _) => pure true | ~q(True) => pure true | _ => pure false /-- Matches propositions where we want to apply the `cases` tactic in the core loop of `tauto`. -/ def casesMatcher (e : Q(Prop)) : MetaM Bool := match e with | ~q(_ ∧ _) => pure true | ~q(_ ∨ _) => pure true | ~q(Exists _) => pure true | ~q(False) => pure true | _ => pure false @[inherit_doc] local infixl: 50 " <;> " => andThenOnSubgoals /-- The core loop of the `tauto` tactic. Repeatedly tries to break down propositions until no more progress can be made. Tries `assumption` and `contradiction` at every step, to discharge goals as soon as possible. Does not do anything that requires backtracking. TODO: The Lean 3 version uses more-powerful versions of `contradiction` and `assumption` that additionally apply `symm` and use a fancy union-find data structure to avoid duplicated work. -/ def tautoCore : TacticM Unit := do _ ← tryTactic (evalTactic (← `(tactic| contradiction))) _ ← tryTactic (evalTactic (← `(tactic| assumption))) iterateUntilFailure do let gs ← getUnsolvedGoals allGoals ( liftMetaTactic (fun m => do pure [(← m.intros!).2]) <;> distribNot <;> liftMetaTactic (casesMatching casesMatcher (recursive := true) (throwOnNoMatch := false)) <;> (do _ ← tryTactic (evalTactic (← `(tactic| contradiction)))) <;> (do _ ← tryTactic (evalTactic (← `(tactic| refine or_iff_not_imp_left.mpr ?_)))) <;> liftMetaTactic (fun m => do pure [(← m.intros!).2]) <;> liftMetaTactic (constructorMatching · coreConstructorMatcher (recursive := true) (throwOnNoMatch := false)) <;> do _ ← tryTactic (evalTactic (← `(tactic| assumption)))) let gs' ← getUnsolvedGoals if gs == gs' then failure -- no progress pure () /-- Matches propositions where we want to apply the `constructor` tactic in the finishing stage of `tauto`. -/ def finishingConstructorMatcher (e : Q(Prop)) : MetaM Bool := match e with | ~q(_ ∧ _) => pure true | ~q(_ ↔ _) => pure true | ~q(Exists _) => pure true | ~q(True) => pure true | _ => pure false /-- Implementation of the `tauto` tactic. -/ def tautology : TacticM Unit := focusAndDoneWithScope "tauto" do classical do tautoCore allGoals (iterateUntilFailure (evalTactic (← `(tactic| rfl)) <|> evalTactic (← `(tactic| solve_by_elim)) <|> liftMetaTactic (constructorMatching · finishingConstructorMatcher))) /-- `tauto` breaks down assumptions of the form `_ ∧ _`, `_ ∨ _`, `_ ↔ _` and `∃ _, _` and splits a goal of the form `_ ∧ _`, `_ ↔ _` or `∃ _, _` until it can be discharged using `reflexivity` or `solve_by_elim`. This is a finishing tactic: it either closes the goal or raises an error. The Lean 3 version of this tactic by default attempted to avoid classical reasoning where possible. This Lean 4 version makes no such attempt. The `itauto` tactic is designed for that purpose. -/ syntax (name := tauto) "tauto" (config)? : tactic elab_rules : tactic | `(tactic| tauto $[$cfg:config]?) => do let _cfg ← elabConfig (mkOptionalNode cfg) tautology
Tactic\TermCongr.lean
/- Copyright (c) 2023 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import Mathlib.Lean.Expr.Basic import Mathlib.Lean.Meta.CongrTheorems import Mathlib.Logic.Basic import Mathlib.Tactic.CongrExclamation /-! # `congr(...)` congruence quotations This module defines a term elaborator for generating congruence lemmas from patterns written using quotation syntax. One can write `congr($hf $hx)` with `hf : f = f'` and `hx : x = x'` to get `f x = f' x'`. While in simple cases it might be possible to use `congr_arg` or `congr_fun`, congruence quotations are more general, since for example `f` could have implicit arguments, complicated dependent types, and subsingleton instance arguments such as `Decidable` or `Fintype`. The implementation strategy is the following: 1. The pattern is elaborated twice, once with each hole replaced by the LHS and again with each hole replaced by the RHS. We do not force the hole to have any particular type while elaborating, but if the hole has a type with an obvious LHS or RHS, then we propagate this information outward. We use `Mathlib.Tactic.TermCongr.cHole` with metadata for these replacements to hold onto the hole itself. 2. Once the pattern has been elaborated twice, we unify them against the respective LHS and RHS of the target type if the target has a type with an obvious LHS and RHS. This can fill in some metavariables and help typeclass inference make progress. 3. Then we simultaneously walk along the elaborated LHS and RHS expressions to generate a congruence. When we reach `cHole`s, we make sure they elaborated in a compatible way. Each `Expr` type has some logic to come up with a suitable congruence. For applications we use a version of `Lean.Meta.mkHCongrWithArity` that tries to fill in some of the equality proofs using subsingleton lemmas. The point of elaborating the expression twice is that we let the elaborator handle activities like synthesizing instances, etc., specialized to LHS or RHS, without trying to derive one side from the other. During development there was a version using `simp` transformations, but there was no way to inform `simp` about the expected RHS, which could cause `simp` to fail because it eagerly wants to solve for instance arguments. The current version is able to use the expected LHS and RHS to fill in arguments before solving for instance arguments. -/ universe u namespace Mathlib.Tactic.TermCongr open Lean Elab Meta initialize registerTraceClass `Elab.congr /-- `congr(expr)` generates an congruence from an expression containing congruence holes of the form `$h` or `$(h)`. In these congruence holes, `h : a = b` indicates that, in the generated congruence, on the left-hand side `a` is substituted for `$h` and on the right-hand side `b` is substituted for `$h`. For example, if `h : a = b` then `congr(1 + $h) : 1 + a = 1 + b`. This is able to make use of the expected type, for example `(congr(_ + $h) : 1 + _ = _)` with `h : x = y` gives `1 + x = 1 + y`. The expected type can be an `Iff`, `Eq`, or `HEq`. If there is no expected type, then it generates an equality. Note: the process of generating a congruence lemma involves elaborating the pattern using terms with attached metadata and a reducible wrapper. We try to avoid doing so, but these terms can leak into the local context through unification. This can potentially break tactics that are sensitive to metadata or reducible functions. Please report anything that goes wrong with `congr(...)` lemmas on Zulip. For debugging, you can set `set_option trace.Elab.congr true`. -/ syntax (name := termCongr) "congr(" withoutForbidden(ppDedentIfGrouped(term)) ")" : term /-! ### Congruence holes This section sets up the way congruence holes are elaborated for `congr(...)` quotations. The basic problem is that if we have `$h` with `h : x = y`, we need to elaborate it once as `x` and once as `y`, and in both cases the term needs to remember that it's associated to `h`. -/ /-- Key for congruence hole metadata. For a `Bool` recording whether this hole is for the LHS elaboration. -/ private def congrHoleForLhsKey : Name := decl_name% /-- Key for congruence hole metadata. For a `Nat` recording how old this congruence hole is, to prevent reprocessing them if they leak into the local context. -/ private def congrHoleIndex : Name := decl_name% /-- For holding onto the hole's value along with the value of either the LHS or RHS of the hole. These occur wrapped in metadata so that they always appear as function application with exactly four arguments. Note that there is no relation between `val` and the proof. We need to decouple these to support letting the proof's elaboration be deferred until we know whether we want an iff, eq, or heq, while also allowing it to choose to elaborate as an iff, eq, or heq. Later, the congruence generator handles any discrepencies. See `Mathlib.Tactic.TermCongr.CongrResult`. -/ @[reducible, nolint unusedArguments] def cHole {α : Sort u} (val : α) {p : Prop} (_pf : p) : α := val /-- For error reporting purposes, make the hole pretty print as its value. We can still see that it is a hole in the info view on mouseover. -/ @[app_unexpander cHole] def unexpandCHole : Lean.PrettyPrinter.Unexpander | `($_ $val $_) => pure val | _ => throw () /-- Create the congruence hole. Used by `elabCHole`. Saves the current mvarCounter as a proxy for age. We use this to avoid reprocessing old congruence holes that happened to leak into the local context. -/ def mkCHole (forLhs : Bool) (val pf : Expr) : MetaM Expr := do -- Create a metavariable to bump the mvarCounter. discard <| mkFreshTypeMVar let d : MData := KVMap.empty |>.insert congrHoleForLhsKey forLhs |>.insert congrHoleIndex (← getMCtx).mvarCounter return Expr.mdata d <| ← mkAppM ``cHole #[val, pf] /-- If the expression is a congruence hole, returns `(forLhs, sideVal, pf)`. If `mvarCounterSaved?` is not none, then only returns the hole if it is at least as recent. -/ def cHole? (e : Expr) (mvarCounterSaved? : Option Nat := none) : Option (Bool × Expr × Expr) := do match e with | .mdata d e' => let forLhs : Bool ← d.get? congrHoleForLhsKey let mvarCounter : Nat ← d.get? congrHoleIndex if let some mvarCounterSaved := mvarCounterSaved? then guard <| mvarCounterSaved ≤ mvarCounter let #[_, val, _, pf] := e'.getAppArgs | failure return (forLhs, val, pf) | _ => none /-- Returns any subexpression that is a recent congruence hole. -/ def hasCHole (mvarCounterSaved : Nat) (e : Expr) : Option Expr := e.find? fun e' => (cHole? e' mvarCounterSaved).isSome /-- Eliminate all congruence holes from an expression by replacing them with their values. -/ def removeCHoles (e : Expr) : Expr := e.replace fun e' => if let some (_, val, _) := cHole? e' then val else none /-- Elaborates a congruence hole and returns either the left-hand side or the right-hand side, annotated with information necessary to generate a congruence lemma. -/ def elabCHole (h : Syntax) (forLhs : Bool) (expectedType? : Option Expr) : Term.TermElabM Expr := do let pf ← Term.elabTerm h none let pfTy ← inferType pf -- Ensure that `pfTy` is a proposition unless ← isDefEq (← inferType pfTy) (.sort .zero) do throwError "Hole has type{indentD pfTy}\nbut is expected to be a Prop" if let some (_, lhs, _, rhs) := (← whnf pfTy).sides? then let val := if forLhs then lhs else rhs if let some expectedType := expectedType? then -- Propagate type hint: discard <| isDefEq expectedType (← inferType val) mkCHole forLhs val pf else -- Since `pf` doesn't yet have sides, we resort to the value and the proof being decoupled. -- These will be unified during congruence generation. mkCHole forLhs (← mkFreshExprMVar expectedType?) pf /-- (Internal for `congr(...)`) Elaborates to an expression satisfying `cHole?` that equals the LHS or RHS of `h`, if the LHS or RHS is available after elaborating `h`. Uses the expected type as a hint. -/ syntax (name := cHoleExpand) "cHole% " (&"lhs" <|> &"rhs") term : term @[term_elab cHoleExpand, inherit_doc cHoleExpand] def elabCHoleExpand : Term.TermElab := fun stx expectedType? => match stx with | `(cHole% lhs $h) => elabCHole h true expectedType? | `(cHole% rhs $h) => elabCHole h false expectedType? | _ => throwUnsupportedSyntax /-- Replace all `term` antiquotations in a term using the given `expand` function. -/ def processAntiquot (t : Term) (expand : Term → Term.TermElabM Term) : Term.TermElabM Term := do let t' ← t.raw.replaceM fun s => do if s.isAntiquots then let ks := s.antiquotKinds unless ks.any (fun (k, _) => k == `term) do throwErrorAt s "Expecting term" let h : Term := ⟨s.getCanonicalAntiquot.getAntiquotTerm⟩ expand h else pure none return ⟨t'⟩ /-- Given the pattern `t` in `congr(t)`, elaborate it for the given side by replacing antiquotations with `cHole%` terms, and ensure the elaborated term is of the expected type. -/ def elaboratePattern (t : Term) (expectedType? : Option Expr) (forLhs : Bool) : Term.TermElabM Expr := Term.withoutErrToSorry do let t' ← processAntiquot t (fun h => if forLhs then `(cHole% lhs $h) else `(cHole% rhs $h)) Term.elabTermEnsuringType t' expectedType? /-! ### Congruence generation -/ /-- Ensures the expected type is an equality. Returns the equality. The returned expression satisfies `Lean.Expr.eq?`. -/ def mkEqForExpectedType (expectedType? : Option Expr) : MetaM Expr := do let u ← mkFreshLevelMVar let ty ← mkFreshExprMVar (mkSort u) let eq := mkApp3 (mkConst ``Eq [u]) ty (← mkFreshExprMVar ty) (← mkFreshExprMVar ty) if let some expectedType := expectedType? then unless ← isDefEq expectedType eq do throwError m!"Type{indentD expectedType}\nis expected to be an equality." return eq /-- Ensures the expected type is a HEq. Returns the HEq. This expression satisfies `Lean.Expr.heq?`. -/ def mkHEqForExpectedType (expectedType? : Option Expr) : MetaM Expr := do let u ← mkFreshLevelMVar let tya ← mkFreshExprMVar (mkSort u) let tyb ← mkFreshExprMVar (mkSort u) let heq := mkApp4 (mkConst ``HEq [u]) tya (← mkFreshExprMVar tya) tyb (← mkFreshExprMVar tyb) if let some expectedType := expectedType? then unless ← isDefEq expectedType heq do throwError m!"Type{indentD expectedType}\nis expected to be a `HEq`." return heq /-- Ensures the expected type is an iff. Returns the iff. This expression satisfies `Lean.Expr.iff?`. -/ def mkIffForExpectedType (expectedType? : Option Expr) : MetaM Expr := do let a ← mkFreshExprMVar (Expr.sort .zero) let b ← mkFreshExprMVar (Expr.sort .zero) let iff := mkApp2 (Expr.const `Iff []) a b if let some expectedType := expectedType? then unless ← isDefEq expectedType iff do throwError m!"Type{indentD expectedType}\nis expected to be an `Iff`." return iff /-- Make sure that the expected type of `pf` is an iff by unification. -/ def ensureIff (pf : Expr) : MetaM Expr := do discard <| mkIffForExpectedType (← inferType pf) return pf /-- A request for a type of congruence lemma from a `CongrResult`. -/ inductive CongrType | eq | heq /-- A congruence lemma between two expressions. The proof is generated dynamically, depending on whether the resulting lemma should be an `Eq` or a `HEq`. If generating a proof impossible, then the generator can throw an error. This can be due to either an `Eq` proof being impossible or due to the lhs/rhs not being defeq to the lhs/rhs of the generated proof, which can happen for user-supplied congruence holes. This complexity is to support two features: 1. The user is free to supply Iff, Eq, and HEq lemmas in congurence holes, and we're able to transform them into whatever is appropriate for a given congruence lemma. 2. If the congrence hole is a metavariable, then we can specialize that hole to an Iff, Eq, or HEq depending on what's necessary at that site. -/ structure CongrResult where /-- The left-hand side of the congruence result. -/ lhs : Expr /-- The right-hand side of the congruence result. -/ rhs : Expr /-- A generator for an `Eq lhs rhs` or `HEq lhs rhs` proof. If such a proof is impossible, the generator can throw an error. The inferred type of the generated proof needs only be defeq to `Eq lhs rhs` or `HEq lhs rhs`. This function can assign metavariables when constructing the proof. If `pf? = none`, then `lhs` and `rhs` are defeq, and the proof is by reflexivity. -/ (pf? : Option (CongrType → MetaM Expr)) /-- Returns whether the proof is by reflexivity. Such congruence proofs are trivial. -/ def CongrResult.isRfl (res : CongrResult) : Bool := res.pf?.isNone /-- Returns the proof that `lhs = rhs`. Fails if the `CongrResult` is inapplicable. Throws an error if the `lhs` and `rhs` have non-defeq types. If `pf? = none`, this returns the `rfl` proof. -/ def CongrResult.eq (res : CongrResult) : MetaM Expr := do unless ← isDefEq (← inferType res.lhs) (← inferType res.rhs) do throwError "Expecting{indentD res.lhs}\nand{indentD res.rhs}\n\ to have definitionally equal types." match res.pf? with | some pf => pf .eq | none => mkEqRefl res.lhs /-- Returns the proof that `HEq lhs rhs`. Fails if the `CongrResult` is inapplicable. If `pf? = none`, this returns the `rfl` proof. -/ def CongrResult.heq (res : CongrResult) : MetaM Expr := do match res.pf? with | some pf => pf .heq | none => mkHEqRefl res.lhs /-- Returns a proof of `lhs ↔ rhs`. Uses `CongrResult.eq`. -/ def CongrResult.iff (res : CongrResult) : MetaM Expr := do unless ← Meta.isProp res.lhs do throwError "Expecting{indentD res.lhs}\nto be a proposition." return mkApp3 (.const ``iff_of_eq []) res.lhs res.rhs (← res.eq) /-- Combine two congruence proofs using transitivity. Does not check that `res1.rhs` is defeq to `res2.lhs`. If both `res1` and `res2` are trivial then the result is trivial. -/ def CongrResult.trans (res1 res2 : CongrResult) : CongrResult where lhs := res1.lhs rhs := res2.rhs pf? := if res1.isRfl then res2.pf? else if res2.isRfl then res1.pf? else some fun | .eq => do mkEqTrans (← res1.eq) (← res2.eq) | .heq => do mkHEqTrans (← res1.heq) (← res2.heq) /-- Make a `CongrResult` from a LHS, a RHS, and a proof of an Iff, Eq, or HEq. The proof is allowed to have a metavariable for its type. Validates the inputs and throws errors in the `pf?` function. The `pf?` function is responsible for finally unifying the type of `pf` with `lhs` and `rhs`. -/ def CongrResult.mk' (lhs rhs : Expr) (pf : Expr) : CongrResult where lhs := lhs rhs := rhs pf? := some fun | .eq => do ensureSidesDefeq (← toEqPf) | .heq => do ensureSidesDefeq (← toHEqPf) where /-- Given a `pf` of an `Iff`, `Eq`, or `HEq`, return a proof of `Eq`. If `pf` is not obviously any of these, weakly try inserting `propext` to make an `Iff` and otherwise unify the type with `Eq`. -/ toEqPf : MetaM Expr := do let ty ← whnf (← inferType pf) if let some .. := ty.iff? then mkPropExt pf else if let some .. := ty.eq? then return pf else if let some (lhsTy, _, rhsTy, _) := ty.heq? then unless ← isDefEq lhsTy rhsTy do throwError "Cannot turn HEq proof into an equality proof. Has type{indentD ty}" mkAppM ``eq_of_heq #[pf] else if ← Meta.isProp lhs then mkPropExt (← ensureIff pf) else discard <| mkEqForExpectedType (← inferType pf) return pf /-- Given a `pf` of an `Iff`, `Eq`, or `HEq`, return a proof of `HEq`. If `pf` is not obviously any of these, weakly try making it be an `Eq` or an `Iff`, and otherwise make it be a `HEq`. -/ toHEqPf : MetaM Expr := do let ty ← whnf (← inferType pf) if let some .. := ty.iff? then mkAppM ``heq_of_eq #[← mkPropExt pf] else if let some .. := ty.eq? then mkAppM ``heq_of_eq #[pf] else if let some .. := ty.heq? then return pf else if ← withNewMCtxDepth <| isDefEq (← inferType lhs) (← inferType rhs) then mkAppM ``heq_of_eq #[← toEqPf] else discard <| mkHEqForExpectedType (← inferType pf) return pf /-- Get the sides of the type of `pf` and unify them with the respective `lhs` and `rhs`. -/ ensureSidesDefeq (pf : Expr) : MetaM Expr := do let pfTy ← inferType pf let some (_, lhs', _, rhs') := (← whnf pfTy).sides? | panic! "Unexpectedly did not generate an eq or heq" unless ← isDefEq lhs lhs' do throwError "Congruence hole has type{indentD pfTy}\n\ but its left-hand side is not definitionally equal to the expected value{indentD lhs}" unless ← isDefEq rhs rhs' do throwError "Congruence hole has type{indentD pfTy}\n\ but its right-hand side is not definitionally equal to the expected value{indentD rhs}" return pf /-- Force the lhs and rhs to be defeq. For when `dsimp`-like congruence is necessary. Clears the proof. -/ def CongrResult.defeq (res : CongrResult) : MetaM CongrResult := do if res.isRfl then return res else unless ← isDefEq res.lhs res.rhs do throwError "Cannot generate congruence because we need{indentD res.lhs}\n\ to be definitionally equal to{indentD res.rhs}" -- Propagate types into any proofs that we're dropping: discard <| res.eq return {res with pf? := none} /-- Tries to make a congruence between `lhs` and `rhs` automatically. 1. If they are defeq, returns a trivial congruence. 2. Tries using `Subsingleton.elim`. 3. Tries `proof_irrel_heq` as another effort to avoid doing congruence on proofs. 3. Otherwise throws an error. Note: `mkAppM` uses `withNewMCtxDepth`, which prevents typeclass inference from accidentally specializing `Sort _` to `Prop`, which could otherwise happen because there is a `Subsingleton Prop` instance. -/ def CongrResult.mkDefault (lhs rhs : Expr) : MetaM CongrResult := do if ← isDefEq lhs rhs then return {lhs, rhs, pf? := none} else if let some pf ← (observing? <| mkAppM ``Subsingleton.elim #[lhs, rhs]) then return CongrResult.mk' lhs rhs pf else if let some pf ← (observing? <| mkAppM ``proof_irrel_heq #[lhs, rhs]) then return CongrResult.mk' lhs rhs pf throwError "Could not generate congruence between{indentD lhs}\nand{indentD rhs}" /-- Does `CongrResult.mkDefault` but makes sure there are no lingering congruence holes. -/ def CongrResult.mkDefault' (mvarCounterSaved : Nat) (lhs rhs : Expr) : MetaM CongrResult := do if let some h := hasCHole mvarCounterSaved lhs then throwError "Left-hand side{indentD lhs}\nstill has a congruence hole{indentD h}" if let some h := hasCHole mvarCounterSaved rhs then throwError "Right-hand side{indentD rhs}\nstill has a congruence hole{indentD h}" CongrResult.mkDefault lhs rhs /-- Throw an internal error. -/ def throwCongrEx {α : Type} (lhs rhs : Expr) (msg : MessageData) : MetaM α := do throwError "congr(...) failed with left-hand side{indentD lhs}\n\ and right-hand side {indentD rhs}\n{msg}" /-- If `lhs` or `rhs` is a congruence hole, then process it. Only process ones that are at least as new as `mvarCounterSaved` since nothing prevents congruence holes from leaking into the local context. -/ def mkCongrOfCHole? (mvarCounterSaved : Nat) (lhs rhs : Expr) : MetaM (Option CongrResult) := do match cHole? lhs mvarCounterSaved, cHole? rhs mvarCounterSaved with | some (isLhs1, val1, pf1), some (isLhs2, val2, pf2) => trace[Elab.congr] "mkCongrOfCHole, both holes" unless isLhs1 == true do throwCongrEx lhs rhs "A RHS congruence hole leaked into the LHS" unless isLhs2 == false do throwCongrEx lhs rhs "A LHS congruence hole leaked into the RHS" -- Defeq checks to unify the lhs and rhs congruence holes. unless ← isDefEq (← inferType pf1) (← inferType pf2) do throwCongrEx lhs rhs "Elaborated types of congruence holes are not defeq." if let some (_, lhsVal, _, rhsVal) := (← whnf <| ← inferType pf1).sides? then unless ← isDefEq val1 lhsVal do throwError "Left-hand side of congruence hole is{indentD lhsVal}\n\ but is expected to be{indentD val1}" unless ← isDefEq val2 rhsVal do throwError "Right-hand side of congruence hole is{indentD rhsVal}\n\ but is expected to be{indentD val2}" return some <| CongrResult.mk' val1 val2 pf1 | some .., none => throwCongrEx lhs rhs "Right-hand side lost its congruence hole annotation." | none, some .. => throwCongrEx lhs rhs "Left-hand side lost its congruence hole annotation." | none, none => return none /-- Walks along both `lhs` and `rhs` simultaneously to create a congruence lemma between them. Where they are desynchronized, we fall back to the base case (using `CongrResult.mkDefault'`) since it's likely due to unification with the expected type, from `_` placeholders or implicit arguments being filled in. -/ partial def mkCongrOf (depth : Nat) (mvarCounterSaved : Nat) (lhs rhs : Expr) : MetaM CongrResult := do trace[Elab.congr] "mkCongrOf: {depth}, {lhs}, {rhs}, {(← mkFreshExprMVar none).mvarId!}" if depth > 1000 then throwError "congr(...) internal error: out of gas" -- Potentially metavariables get assigned as we process congruence holes, -- so instantiate them to be safe. Placeholders and implicit arguments might -- end up with congruence holes, so they indeed might need a nontrivial congruence. let lhs ← instantiateMVars lhs let rhs ← instantiateMVars rhs if let some res ← mkCongrOfCHole? mvarCounterSaved lhs rhs then trace[Elab.congr] "hole processing succeeded" return res if (hasCHole mvarCounterSaved lhs).isNone && (hasCHole mvarCounterSaved rhs).isNone then -- It's safe to fastforward if the lhs and rhs are defeq and have no congruence holes. -- This is more conservative than necessary since congruence holes might only be inside proofs. if ← isDefEq lhs rhs then return {lhs, rhs, pf? := none} if ← (isProof lhs <||> isProof rhs) then -- We don't want to look inside proofs at all. return ← CongrResult.mkDefault lhs rhs match lhs, rhs with | .app .., .app .. => trace[Elab.congr] "app" let arity := lhs.getAppNumArgs unless arity == rhs.getAppNumArgs do trace[Elab.congr] "app desync (arity)" return ← CongrResult.mkDefault' mvarCounterSaved lhs rhs let f := lhs.getAppFn let f' := rhs.getAppFn unless ← isDefEq (← inferType f) (← inferType f') do trace[Elab.congr] "app desync (function types)" return ← CongrResult.mkDefault' mvarCounterSaved lhs rhs let fnRes ← mkCongrOf (depth + 1) mvarCounterSaved f f' trace[Elab.congr] "mkCongrOf functions {f}, {f'} has isRfl = {fnRes.isRfl}" if !fnRes.isRfl then -- If there's a nontrivial proof, then since mkHCongrWithArity fixes the function -- we need to handle this ourselves. let lhs := mkAppN fnRes.lhs lhs.getAppArgs let lhs' := mkAppN fnRes.rhs lhs.getAppArgs let rhs := mkAppN fnRes.rhs rhs.getAppArgs let mut pf ← fnRes.eq for arg in lhs.getAppArgs do pf ← mkCongrFun pf arg let res1 := CongrResult.mk' lhs lhs' pf let res2 ← mkCongrOf (depth + 1) mvarCounterSaved lhs' rhs return res1.trans res2 let thm ← mkHCongrWithArity' fnRes.lhs arity let mut args := #[] let mut lhsArgs := #[] let mut rhsArgs := #[] let mut nontriv : Bool := false for lhs' in lhs.getAppArgs, rhs' in rhs.getAppArgs, kind in thm.argKinds do match kind with | .eq => let res ← mkCongrOf (depth + 1) mvarCounterSaved lhs' rhs' nontriv := nontriv || !res.isRfl args := args |>.push res.lhs |>.push res.rhs |>.push (← res.eq) lhsArgs := lhsArgs.push res.lhs rhsArgs := rhsArgs.push res.rhs | .heq => let res ← mkCongrOf (depth + 1) mvarCounterSaved lhs' rhs' nontriv := nontriv || !res.isRfl args := args |>.push res.lhs |>.push res.rhs |>.push (← res.heq) lhsArgs := lhsArgs.push res.lhs rhsArgs := rhsArgs.push res.rhs | .subsingletonInst => -- Warning: we're not processing any congruence holes here. -- Users shouldn't be intentionally placing them in such arguments anyway. -- We can't throw an error because these arguments might incidentally have -- congruence holes by unification. nontriv := true let lhs := removeCHoles lhs' let rhs := removeCHoles rhs' args := args |>.push lhs |>.push rhs lhsArgs := lhsArgs.push lhs rhsArgs := rhsArgs.push rhs | _ => panic! "unexpected hcongr argument kind" let lhs := mkAppN fnRes.lhs lhsArgs let rhs := mkAppN fnRes.rhs rhsArgs if nontriv then return CongrResult.mk' lhs rhs (mkAppN thm.proof args) else -- `lhs` and `rhs` *should* be defeq, but use `mkDefault` just to be safe. CongrResult.mkDefault lhs rhs | .lam .., .lam .. => trace[Elab.congr] "lam" let resDom ← mkCongrOf (depth + 1) mvarCounterSaved lhs.bindingDomain! rhs.bindingDomain! -- We do not yet support congruences in the binding domain for lambdas. discard <| resDom.defeq withLocalDecl lhs.bindingName! lhs.bindingInfo! resDom.lhs fun x => do let lhsb := lhs.bindingBody!.instantiate1 x let rhsb := rhs.bindingBody!.instantiate1 x let resBody ← mkCongrOf (depth + 1) mvarCounterSaved lhsb rhsb let lhs ← mkLambdaFVars #[x] resBody.lhs let rhs ← mkLambdaFVars #[x] resBody.rhs if resBody.isRfl then return {lhs, rhs, pf? := none} else let pf ← mkLambdaFVars #[x] (← resBody.eq) return CongrResult.mk' lhs rhs (← mkAppM ``funext #[pf]) | .forallE .., .forallE .. => trace[Elab.congr] "forallE" let resDom ← mkCongrOf (depth + 1) mvarCounterSaved lhs.bindingDomain! rhs.bindingDomain! if lhs.isArrow && rhs.isArrow then let resBody ← mkCongrOf (depth + 1) mvarCounterSaved lhs.bindingBody! rhs.bindingBody! let lhs := Expr.forallE lhs.bindingName! resDom.lhs resBody.lhs lhs.bindingInfo! let rhs := Expr.forallE rhs.bindingName! resDom.rhs resBody.rhs rhs.bindingInfo! if resDom.isRfl && resBody.isRfl then return {lhs, rhs, pf? := none} else return CongrResult.mk' lhs rhs (← mkImpCongr (← resDom.eq) (← resBody.eq)) else -- We do not yet support congruences in the binding domain for dependent pi types. discard <| resDom.defeq withLocalDecl lhs.bindingName! lhs.bindingInfo! resDom.lhs fun x => do let lhsb := lhs.bindingBody!.instantiate1 x let rhsb := rhs.bindingBody!.instantiate1 x let resBody ← mkCongrOf (depth + 1) mvarCounterSaved lhsb rhsb let lhs ← mkForallFVars #[x] resBody.lhs let rhs ← mkForallFVars #[x] resBody.rhs if resBody.isRfl then return {lhs, rhs, pf? := none} else let pf ← mkLambdaFVars #[x] (← resBody.eq) return CongrResult.mk' lhs rhs (← mkAppM ``pi_congr #[pf]) | .letE .., .letE .. => trace[Elab.congr] "letE" -- Just zeta reduce for now. Could look at `Lean.Meta.Simp.simp.simpLet` let lhs := lhs.letBody!.instantiate1 lhs.letValue! let rhs := rhs.letBody!.instantiate1 rhs.letValue! mkCongrOf (depth + 1) mvarCounterSaved lhs rhs | .mdata _ lhs', .mdata _ rhs' => trace[Elab.congr] "mdata" let res ← mkCongrOf (depth + 1) mvarCounterSaved lhs' rhs' return {res with lhs := lhs.updateMData! res.lhs, rhs := rhs.updateMData! res.rhs} | .proj n1 i1 e1, .proj n2 i2 e2 => trace[Elab.congr] "proj" -- Only handles defeq at the moment. unless n1 == n2 && i1 == i2 do throwCongrEx lhs rhs "Incompatible primitive projections" let res ← mkCongrOf (depth + 1) mvarCounterSaved e1 e2 discard <| res.defeq return {lhs := lhs.updateProj! res.lhs, rhs := rhs.updateProj! res.rhs, pf? := none} | _, _ => trace[Elab.congr] "base case" CongrResult.mkDefault' mvarCounterSaved lhs rhs /-! ### Elaborating congruence quotations -/ @[term_elab termCongr, inherit_doc termCongr] def elabTermCongr : Term.TermElab := fun stx expectedType? => do match stx with | `(congr($t)) => -- Save the current mvarCounter so that we know which cHoles are for this congr quotation. let mvarCounterSaved := (← getMCtx).mvarCounter -- Case 1: There is an expected type and it's obviously an Iff/Eq/HEq. if let some expectedType := expectedType? then if let some (expLhsTy, expLhs, expRhsTy, expRhs) := (← whnf expectedType).sides? then let lhs ← elaboratePattern t expLhsTy true let rhs ← elaboratePattern t expRhsTy false -- Note: these defeq checks can leak congruence holes. unless ← isDefEq expLhs lhs do throwError "Left-hand side of elaborated pattern{indentD lhs}\n\ is not definitionally equal to left-hand side of expected type{indentD expectedType}" unless ← isDefEq expRhs rhs do throwError "Right-hand side of elaborated pattern{indentD rhs}\n\ is not definitionally equal to right-hand side of expected type{indentD expectedType}" Term.synthesizeSyntheticMVars (postpone := .yes) let res ← mkCongrOf 0 mvarCounterSaved lhs rhs let expectedType' ← whnf expectedType let pf ← if expectedType'.iff?.isSome then res.iff else if expectedType'.isEq then res.eq else if expectedType'.isHEq then res.heq else panic! "unreachable case, sides? guarantees Iff, Eq, and HEq" return ← mkExpectedTypeHint pf expectedType -- Case 2: No expected type or it's not obviously Iff/Eq/HEq. We generate an Eq. let lhs ← elaboratePattern t none true let rhs ← elaboratePattern t none false Term.synthesizeSyntheticMVars (postpone := .yes) let res ← mkCongrOf 0 mvarCounterSaved lhs rhs let pf ← res.eq let ty ← mkEq res.lhs res.rhs mkExpectedTypeHint pf ty | _ => throwUnsupportedSyntax
Tactic\TFAE.lean
/- Copyright (c) 2018 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin, Reid Barton, Simon Hudon, Thomas Murrills, Mario Carneiro -/ import Qq import Mathlib.Data.Nat.Notation import Mathlib.Util.AtomM import Mathlib.Data.List.TFAE /-! # The Following Are Equivalent (TFAE) This file provides the tactics `tfae_have` and `tfae_finish` for proving goals of the form `TFAE [P₁, P₂, ...]`. -/ open List Lean Meta Expr Elab.Term Elab.Tactic Mathlib.Tactic Qq namespace Mathlib.Tactic.TFAE /-- An arrow of the form `←`, `→`, or `↔`. -/ syntax impArrow := " → " <|> " ↔ " <|> " ← " /-- `tfae_have` introduces hypotheses for proving goals of the form `TFAE [P₁, P₂, ...]`. Specifically, `tfae_have i arrow j` introduces a hypothesis of type `Pᵢ arrow Pⱼ` to the local context, where `arrow` can be `→`, `←`, or `↔`. Note that `i` and `j` are natural number indices (beginning at 1) used to specify the propositions `P₁, P₂, ...` that appear in the `TFAE` goal list. A proof is required afterward, typically via a tactic block. ```lean example (h : P → R) : TFAE [P, Q, R] := by tfae_have 1 → 3 · exact h ... ``` The resulting context now includes `tfae_1_to_3 : P → R`. The introduced hypothesis can be given a custom name, in analogy to `have` syntax: ```lean tfae_have h : 2 ↔ 3 ``` Once sufficient hypotheses have been introduced by `tfae_have`, `tfae_finish` can be used to close the goal. ```lean example : TFAE [P, Q, R] := by tfae_have 1 → 2 · /- proof of P → Q -/ tfae_have 2 → 1 · /- proof of Q → P -/ tfae_have 2 ↔ 3 · /- proof of Q ↔ R -/ tfae_finish ``` -/ syntax (name := tfaeHave) "tfae_have " (ident " : ")? num impArrow num : tactic /-- `tfae_finish` is used to close goals of the form `TFAE [P₁, P₂, ...]` once a sufficient collection of hypotheses of the form `Pᵢ → Pⱼ` or `Pᵢ ↔ Pⱼ` have been introduced to the local context. `tfae_have` can be used to conveniently introduce these hypotheses; see `tfae_have`. Example: ```lean example : TFAE [P, Q, R] := by tfae_have 1 → 2 · /- proof of P → Q -/ tfae_have 2 → 1 · /- proof of Q → P -/ tfae_have 2 ↔ 3 · /- proof of Q ↔ R -/ tfae_finish ``` -/ syntax (name := tfaeFinish) "tfae_finish" : tactic /-! # Setup -/ /-- Extract a list of `Prop` expressions from an expression of the form `TFAE [P₁, P₂, ...]` as long as `[P₁, P₂, ...]` is an explicit list. -/ partial def getTFAEList (t : Expr) : MetaM (Q(List Prop) × List Q(Prop)) := do let .app tfae (l : Q(List Prop)) ← whnfR <|← instantiateMVars t | throwError "goal must be of the form TFAE [P₁, P₂, ...]" unless (← withNewMCtxDepth <| isDefEq tfae q(TFAE)) do throwError "goal must be of the form TFAE [P₁, P₂, ...]" return (l, ← getExplicitList l) where /-- Convert an expression representing an explicit list into a list of expressions. -/ getExplicitList (l : Q(List Prop)) : MetaM (List Q(Prop)) := do match l with | ~q([]) => return ([] : List Expr) | ~q($a :: $l') => return (a :: (← getExplicitList l')) | e => throwError "{e} must be an explicit list of propositions" /-! # Proof construction -/ variable (hyps : Array (ℕ × ℕ × Expr)) (atoms : Array Q(Prop)) /-- Uses depth-first search to find a path from `P` to `P'`. -/ partial def dfs (i j : ℕ) (P P' : Q(Prop)) (hP : Q($P)) : StateT (HashSet ℕ) MetaM Q($P') := do if i == j then return hP modify (·.insert i) for (a, b, h) in hyps do if i == a then if !(← get).contains b then have Q := atoms[b]! have h : Q($P → $Q) := h try return ← dfs b j Q P' q($h $hP) catch _ => pure () failure /-- Prove an implication via depth-first traversal. -/ def proveImpl (i j : ℕ) (P P' : Q(Prop)) : MetaM Q($P → $P') := do try withLocalDeclD (← mkFreshUserName `h) P fun (h : Q($P)) => do mkLambdaFVars #[h] <|← dfs hyps atoms i j P P' h |>.run' {} catch _ => throwError "couldn't prove {P} → {P'}" /-- Generate a proof of `Chain (· → ·) P l`. We assume `P : Prop` and `l : List Prop`, and that `l` is an explicit list. -/ partial def proveChain (i : ℕ) (is : List ℕ) (P : Q(Prop)) (l : Q(List Prop)) : MetaM Q(Chain (· → ·) $P $l) := do match l with | ~q([]) => return q(Chain.nil) | ~q($P' :: $l') => -- `id` is a workaround for https://github.com/leanprover-community/quote4/issues/30 let i' :: is' := id is | unreachable! have cl' : Q(Chain (· → ·) $P' $l') := ← proveChain i' is' q($P') q($l') let p ← proveImpl hyps atoms i i' P P' return q(Chain.cons $p $cl') /-- Attempt to prove `getLastD l P' → P` given an explicit list `l`. -/ partial def proveGetLastDImpl (i i' : ℕ) (is : List ℕ) (P P' : Q(Prop)) (l : Q(List Prop)) : MetaM Q(getLastD $l $P' → $P) := do match l with | ~q([]) => proveImpl hyps atoms i' i P' P | ~q($P'' :: $l') => -- `id` is a workaround for https://github.com/leanprover-community/quote4/issues/30 let i'' :: is' := id is | unreachable! proveGetLastDImpl i i'' is' P P'' l' /-- Attempt to prove a statement of the form `TFAE [P₁, P₂, ...]`. -/ def proveTFAE (is : List ℕ) (l : Q(List Prop)) : MetaM Q(TFAE $l) := do match l with | ~q([]) => return q(tfae_nil) | ~q([$P]) => return q(tfae_singleton $P) | ~q($P :: $P' :: $l') => -- `id` is a workaround for https://github.com/leanprover-community/quote4/issues/30 let i :: i' :: is' := id is | unreachable! let c ← proveChain hyps atoms i (i'::is') P q($P' :: $l') let il ← proveGetLastDImpl hyps atoms i i' is' P P' l' return q(tfae_of_cycle $c $il) /-! # `tfae_have` components -/ /-- Construct a name for a hypothesis introduced by `tfae_have`. -/ def mkTFAEHypName (i j : TSyntax `num) (arr : TSyntax ``impArrow) : MetaM Name := do let arr ← match arr with | `(impArrow| ← ) => pure "from" | `(impArrow| → ) => pure "to" | `(impArrow| ↔ ) => pure "iff" | _ => throwErrorAt arr "expected '←', '→', or '↔'" return .mkSimple <| String.intercalate "_" ["tfae", s!"{i.getNat}", arr, s!"{j.getNat}"] open Elab in /-- The core of `tfae_have`, which behaves like `haveLetCore` in `Mathlib.Tactic.Have`. -/ def tfaeHaveCore (goal : MVarId) (name : Option (TSyntax `ident)) (i j : TSyntax `num) (arrow : TSyntax ``impArrow) (t : Expr) : TermElabM (MVarId × MVarId) := goal.withContext do let n := (Syntax.getId <$> name).getD <|← mkTFAEHypName i j arrow let (goal1, t, p) ← do let p ← mkFreshExprMVar t MetavarKind.syntheticOpaque n pure (p.mvarId!, t, p) let (fv, goal2) ← (← MVarId.assert goal n t p).intro1P if let some stx := name then goal2.withContext do Term.addTermInfo' (isBinder := true) stx (mkFVar fv) pure (goal1, goal2) /-- Turn syntax for a given index into a natural number, as long as it lies between `1` and `maxIndex`. -/ def elabIndex (i : TSyntax `num) (maxIndex : ℕ) : TacticM ℕ := do let i' := i.getNat unless Nat.ble 1 i' && Nat.ble i' maxIndex do throwError "{i} must be between 1 and {maxIndex}" return i' /-- Construct an expression for the type `Pj → Pi`, `Pi → Pj`, or `Pi ↔ Pj` given expressions `Pi Pj : Q(Prop)` and `impArrow` syntax `arr`, depending on whether `arr` is `←`, `→`, or `↔` respectively. -/ def mkImplType (Pi : Q(Prop)) (arr : TSyntax ``impArrow) (Pj : Q(Prop)) : MetaM Q(Prop) := do match arr with | `(impArrow| ← ) => pure q($Pj → $Pi) | `(impArrow| → ) => pure q($Pi → $Pj) | `(impArrow| ↔ ) => pure q($Pi ↔ $Pj) | _ => throwErrorAt arr "expected '←', '→', or '↔'" /-! # Tactic implementation -/ elab_rules : tactic | `(tactic| tfae_have $[$h:ident : ]? $i:num $arr:impArrow $j:num) => do let goal ← getMainGoal goal.withContext do let (_, tfaeList) ← getTFAEList (← goal.getType) let l₀ := tfaeList.length let i' ← elabIndex i l₀ let j' ← elabIndex j l₀ let Pi := tfaeList.get! (i'-1) let Pj := tfaeList.get! (j'-1) let type ← mkImplType Pi arr Pj let (goal1, goal2) ← tfaeHaveCore goal h i j arr type replaceMainGoal [goal1, goal2] elab_rules : tactic | `(tactic| tfae_finish) => do let goal ← getMainGoal goal.withContext do let (tfaeListQ, tfaeList) ← getTFAEList (← goal.getType) closeMainGoal `tfae_finish <|← AtomM.run .reducible do let is ← tfaeList.mapM AtomM.addAtom let mut hyps := #[] for hyp in ← getLocalHyps do let ty ← inferType hyp if let (``Iff, #[p1, p2]) := ty.getAppFnArgs then let q1 ← AtomM.addAtom p1 let q2 ← AtomM.addAtom p2 hyps := hyps.push (q1, q2, ← mkAppM ``Iff.mp #[hyp]) hyps := hyps.push (q2, q1, ← mkAppM ``Iff.mpr #[hyp]) else if ty.isArrow then let q1 ← AtomM.addAtom ty.bindingDomain! let q2 ← AtomM.addAtom ty.bindingBody! hyps := hyps.push (q1, q2, hyp) proveTFAE hyps (← get).atoms is tfaeListQ
Tactic\ToAdditive.lean
/- Copyright (c) 2024 Miyahara Kō. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Miyahara Kō -/ import Mathlib.Tactic.ToAdditive.Frontend /-! ## `@[to_additive]` attributes for basic types -/ attribute [to_additive Empty] Empty attribute [to_additive PEmpty] PEmpty attribute [to_additive PUnit] PUnit attribute [to_additive existing Unit] Unit
Tactic\ToExpr.lean
/- Copyright (c) 2023 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import Mathlib.Tactic.DeriveToExpr import Mathlib.Util.WhatsNew /-! # `ToExpr` instances for Mathlib This module should be imported by any module that intends to define `ToExpr` instances. It provides necessary dependencies (the `Lean.ToLevel` class) and it also overrides the instances that come from core Lean 4 that do not handle universe polymorphism. (See the module `Lean.ToExpr` for the instances that are overridden.) In addition, we provide some additional `ToExpr` instances for core definitions. -/ section override namespace Lean attribute [-instance] Lean.instToExprOption set_option autoImplicit true in deriving instance ToExpr for Option attribute [-instance] Lean.instToExprList set_option autoImplicit true in deriving instance ToExpr for List attribute [-instance] Lean.instToExprArray universe u in instance {α : Type u} [ToExpr α] [ToLevel.{u}] : ToExpr (Array α) := let type := toTypeExpr α { toExpr := fun as => mkApp2 (mkConst ``List.toArray [toLevel.{u}]) type (toExpr as.toList) toTypeExpr := mkApp (mkConst ``Array [toLevel.{u}]) type } attribute [-instance] Lean.instToExprProd set_option autoImplicit true in deriving instance ToExpr for Prod deriving instance ToExpr for System.FilePath end Lean end override namespace Mathlib open Lean deriving instance ToExpr for Int set_option autoImplicit true in deriving instance ToExpr for ULift universe u in /-- Hand-written instance since `PUnit` is a `Sort` rather than a `Type`. -/ instance [ToLevel.{u}] : ToExpr PUnit.{u+1} where toExpr _ := mkConst ``PUnit.unit [toLevel.{u+1}] toTypeExpr := mkConst ``PUnit [toLevel.{u+1}] deriving instance ToExpr for String.Pos deriving instance ToExpr for Substring deriving instance ToExpr for SourceInfo deriving instance ToExpr for Syntax.Preresolved deriving instance ToExpr for Syntax open DataValue in /-- Core of a hand-written `ToExpr` handler for `MData`. Uses the `KVMap.set*` functions rather than going into the internals of the `KVMap` data structure. -/ private def toExprMData (md : MData) : Expr := Id.run do let mut e := mkConst ``MData.empty for (k, v) in md do let k := toExpr k e := match v with | ofString v => mkApp3 (mkConst ``KVMap.setString) e k (mkStrLit v) | ofBool v => mkApp3 (mkConst ``KVMap.setBool) e k (toExpr v) | ofName v => mkApp3 (mkConst ``KVMap.setName) e k (toExpr v) | ofNat v => mkApp3 (mkConst ``KVMap.setNat) e k (mkNatLit v) | ofInt v => mkApp3 (mkConst ``KVMap.setInt) e k (toExpr v) | ofSyntax v => mkApp3 (mkConst ``KVMap.setSyntax) e k (toExpr v) return e instance : ToExpr MData where toExpr := toExprMData toTypeExpr := mkConst ``MData deriving instance ToExpr for FVarId deriving instance ToExpr for MVarId deriving instance ToExpr for LevelMVarId deriving instance ToExpr for Level deriving instance ToExpr for BinderInfo deriving instance ToExpr for Literal deriving instance ToExpr for Expr end Mathlib
Tactic\ToLevel.lean
/- Copyright (c) 2023 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import Mathlib.Tactic.PPWithUniv /-! # `ToLevel` class This module defines `Lean.ToLevel`, which is the `Lean.Level` analogue to `Lean.ToExpr`. **Warning:** Import `Mathlib.Tactic.ToExpr` instead of this one if you are writing `ToExpr` instances. This ensures that you are using the universe polymorphic `ToExpr` instances that override the ones from Lean 4 core. -/ namespace Lean /-- A class to create `Level` expressions that denote particular universe levels in Lean. `Lean.ToLevel.toLevel.{u}` evaluates to a `Lean.Level` term representing `u` -/ @[pp_with_univ] class ToLevel.{u} where /-- A `Level` that represents the universe level `u`. -/ toLevel : Level /-- The universe itself. This is only here to avoid the "unused universe parameter" error. -/ univ : Type u := Sort u export ToLevel (toLevel) attribute [pp_with_univ] toLevel instance : ToLevel.{0} where toLevel := .zero universe u v instance [ToLevel.{u}] : ToLevel.{u+1} where toLevel := .succ toLevel.{u} /-- `ToLevel` for `max u v`. This is not an instance since it causes divergence. -/ def ToLevel.max [ToLevel.{u}] [ToLevel.{v}] : ToLevel.{max u v} where toLevel := .max toLevel.{u} toLevel.{v} /-- `ToLevel` for `imax u v`. This is not an instance since it causes divergence. -/ def ToLevel.imax [ToLevel.{u}] [ToLevel.{v}] : ToLevel.{imax u v} where toLevel := .imax toLevel.{u} toLevel.{v} end Lean
Tactic\Trace.lean
/- Copyright (c) 2022 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ import Lean.Elab.Tactic.ElabTerm import Lean.Meta.Eval /-! # Defines the `trace` tactic. -/ open Lean Meta Elab Tactic /-- Evaluates a term to a string (when possible), and prints it as a trace message. -/ elab (name := Lean.Parser.Tactic.trace) tk:"trace " val:term : tactic => do let e ← elabTerm (← `(toString $val)) (some (mkConst `String)) logInfoAt tk <|← unsafe evalExpr String (mkConst `String) e
Tactic\TryThis.lean
/- Copyright (c) 2024 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ import Lean /-! # 'Try this' tactic macro This is a convenient shorthand intended for macro authors to be able to generate "Try this" recommendations. (It is not the main implementation of 'Try this', which is implemented in Lean core, see `Lean.Meta.Tactic.TryThis`.) -/ namespace Mathlib.Tactic open Lean /-- Produces the text `Try this: <tac>` with the given tactic, and then executes it. -/ elab tk:"try_this" tac:tactic : tactic => do Elab.Tactic.evalTactic tac Meta.Tactic.TryThis.addSuggestion tk tac (origSpan? := ← getRef) /-- Produces the text `Try this: <tac>` with the given conv tactic, and then executes it. -/ elab tk:"try_this" tac:conv : conv => do Elab.Tactic.evalTactic tac Meta.Tactic.TryThis.addSuggestion tk tac (origSpan? := ← getRef)
Tactic\TypeCheck.lean
/- Copyright (c) 2022 Jireh Loreaux. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jireh Loreaux -/ import Lean.Elab.Tactic.Basic import Lean.Elab.SyntheticMVars /-! # The `type_check` tactic Define the `type_check` tactic: it type checks a given expression, and traces its type. -/ open Lean Elab Meta /-- Type check the given expression, and trace its type. -/ elab tk:"type_check " e:term : tactic => do Tactic.withMainContext do let e ← Term.elabTermAndSynthesize e none check e let type ← inferType e Lean.logInfoAt tk m!"{← Lean.instantiateMVars type}"
Tactic\TypeStar.lean
/- Copyright (c) 2023 Matthew Ballard. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Matthew Ballard -/ import Lean.Elab.Term /-! # Support for `Sort*` and `Type*`. These elaborate as `Sort u` and `Type u` with a fresh implicit universe variable `u`. -/ open Lean /-- The syntax `variable (X Y ... Z : Sort*)` creates a new distinct implicit universe variable for each variable in the sequence. -/ elab "Sort*" : term => do let u ← Lean.Meta.mkFreshLevelMVar Elab.Term.levelMVarToParam (.sort u) /-- The syntax `variable (X Y ... Z : Type*)` creates a new distinct implicit universe variable `> 0` for each variable in the sequence. -/ elab "Type*" : term => do let u ← Lean.Meta.mkFreshLevelMVar Elab.Term.levelMVarToParam (.sort (.succ u))
Tactic\UnsetOption.lean
/- Copyright (c) 2022 Alex J. Best. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Alex J. Best -/ import Lean.Parser.Term import Lean.Parser.Do import Lean.Elab.Command /-! # The `unset_option` command This file defines an `unset_option` user command, which unsets user configurable options. For example inputing `set_option blah 7` and then `unset_option blah` returns the user to the default state before any `set_option` command is called. This is helpful when the user does not know the default value of the option or it is cleaner not to write it explicitly, or for some options where the default behaviour is different from any user set value. -/ namespace Lean.Elab variable {m : Type → Type} [Monad m] [MonadOptions m] [MonadExceptOf Exception m] [MonadRef m] variable [AddErrorMessageContext m] [MonadLiftT (EIO Exception) m] [MonadInfoTree m] /-- unset the option specified by id -/ def elabUnsetOption (id : Syntax) : m Options := do -- We include the first argument (the keyword) for position information in case `id` is `missing`. addCompletionInfo <| CompletionInfo.option (← getRef) unsetOption id.getId.eraseMacroScopes where /-- unset the given option name -/ unsetOption (optionName : Name) : m Options := return (← getOptions).erase optionName namespace Command /-- Unset a user option -/ elab (name := unsetOption) "unset_option " opt:ident : command => do let options ← Elab.elabUnsetOption opt modify fun s ↦ { s with maxRecDepth := maxRecDepth.get options } modifyScope fun scope ↦ { scope with opts := options } end Command end Lean.Elab
Tactic\Use.lean
/- Copyright (c) 2022 Arthur Paulino. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Arthur Paulino, Gabriel Ebner, Kyle Miller -/ import Lean.Meta.Tactic.Util import Lean.Elab.Tactic.Basic /-! # The `use` tactic The `use` and `use!` tactics are for instantiating one-constructor inductive types just like the `exists` tactic, but they can be a little more flexible. `use` is the more restrained version for mathlib4, and `use!` is the exuberant version that more closely matches `use` from mathlib3. Note: The `use!` tactic is almost exactly the mathlib3 `use` except that it does not try applying `exists_prop`. See the failing test in `test/Use.lean`. -/ namespace Mathlib.Tactic open Lean Meta Elab Tactic initialize registerTraceClass `tactic.use /-- When the goal `mvarId` is an inductive datatype with a single constructor, this applies that constructor, then returns metavariables for the non-parameter explicit arguments along with metavariables for the parameters and implicit arguments. The first list of returned metavariables correspond to the arguments that `⟨x,y,...⟩` notation uses. The second list corresponds to everything else: the parameters and implicit arguments. The third list consists of those implicit arguments that are instance implicits, which one can try to synthesize. The third list is a sublist of the second list. Returns metavariables for all arguments whether or not the metavariables are assigned. -/ def applyTheConstructor (mvarId : MVarId) : MetaM (List MVarId × List MVarId × List MVarId) := do mvarId.withContext do mvarId.checkNotAssigned `constructor let target ← mvarId.getType' matchConstInduct target.getAppFn (fun _ => throwTacticEx `constructor mvarId m!"target is not an inductive datatype{indentExpr target}") fun ival us => do match ival.ctors with | [ctor] => let cinfo ← getConstInfoCtor ctor let ctorConst := Lean.mkConst ctor us let (args, binderInfos, _) ← forallMetaTelescopeReducing (← inferType ctorConst) let mut explicit := #[] let mut implicit := #[] let mut insts := #[] for arg in args, binderInfo in binderInfos, i in [0:args.size] do if cinfo.numParams ≤ i ∧ binderInfo.isExplicit then explicit := explicit.push arg.mvarId! else implicit := implicit.push arg.mvarId! if binderInfo.isInstImplicit then insts := insts.push arg.mvarId! let e := mkAppN ctorConst args let eType ← inferType e unless (← withAssignableSyntheticOpaque <| isDefEq eType target) do throwError m!"type mismatch{indentExpr e}\n{← mkHasTypeButIsExpectedMsg eType target}" mvarId.assign e return (explicit.toList, implicit.toList, insts.toList) | _ => throwTacticEx `constructor mvarId m!"target inductive type does not have exactly one constructor{indentExpr target}" /-- Use the `args` to refine the goals `gs` in order, but whenever there is a single goal remaining then first try applying a single constructor if it's for a single-constructor inductive type. In `eager` mode, instead we always first try to refine, and if that fails we always try to apply such a constructor no matter if it's the last goal. Returns the remaining explicit goals `gs`, any goals `acc` due to `refine`, and a sublist of these of instance arguments that we should try synthesizing after the loop. The new set of goals should be `gs ++ acc`. -/ partial def useLoop (eager : Bool) (gs : List MVarId) (args : List Term) (acc insts : List MVarId) : TermElabM (List MVarId × List MVarId × List MVarId) := do trace[tactic.use] "gs = {gs}\nargs = {args}\nacc = {acc}" match gs, args with | gs, [] => return (gs, acc, insts) | [], arg :: _ => throwErrorAt arg "too many arguments supplied to `use`" | g :: gs', arg :: args' => g.withContext do if ← g.isAssigned then -- Goals might become assigned in inductive types with indices. -- Let's check that what's supplied is defeq to what's already there. let e ← Term.elabTermEnsuringType arg (← g.getType) unless ← isDefEq e (.mvar g) do throwErrorAt arg "argument is not definitionally equal to inferred value{indentExpr (.mvar g)}" return ← useLoop eager gs' args' acc insts -- Type ascription is a workaround for `refine` ensuring the type after synthesizing mvars. let refineArg ← `(tactic| refine ($arg : $(← Term.exprToSyntax (← g.getType)))) if eager then -- In eager mode, first try refining with the argument before applying the constructor if let some newGoals ← observing? (run g do withoutRecover <| evalTactic refineArg) then return ← useLoop eager gs' args' (acc ++ newGoals) insts if eager || gs'.isEmpty then if let some (expl, impl, insts') ← observing? do try applyTheConstructor g catch e => trace[tactic.use] "Constructor. {e.toMessageData}"; throw e then trace[tactic.use] "expl.length = {expl.length}, impl.length = {impl.length}" return ← useLoop eager (expl ++ gs') args (acc ++ impl) (insts ++ insts') -- In eager mode, the following will give an error, which hopefully is more informative than -- the one provided by `applyTheConstructor`. let newGoals ← run g do evalTactic refineArg useLoop eager gs' args' (acc ++ newGoals) insts /-- Run the `useLoop` on the main goal then discharge remaining explicit `Prop` arguments. -/ def runUse (eager : Bool) (discharger : TacticM Unit) (args : List Term) : TacticM Unit := do let egoals ← focus do let (egoals, acc, insts) ← useLoop eager (← getGoals) args [] [] -- Try synthesizing instance arguments for inst in insts do if !(← inst.isAssigned) then discard <| inst.withContext <| observing? do inst.assign (← synthInstance (← inst.getType)) -- Set the goals. setGoals (egoals ++ acc) pruneSolvedGoals pure egoals -- Run the discharger on non-assigned proposition metavariables -- (`trivial` uses `assumption`, which isn't great for non-propositions) for g in egoals do if !(← g.isAssigned) then g.withContext do if ← isProp (← g.getType) then trace[tactic.use] "running discharger on {g}" discard <| run g discharger /-- Default discharger to try to use for the `use` and `use!` tactics. This is similar to the `trivial` tactic but doesn't do things like `contradiction` or `decide`. -/ syntax "use_discharger" : tactic macro_rules | `(tactic| use_discharger) => `(tactic| apply exists_prop.mpr <;> use_discharger) macro_rules | `(tactic| use_discharger) => `(tactic| apply And.intro <;> use_discharger) macro_rules | `(tactic| use_discharger) => `(tactic| rfl) macro_rules | `(tactic| use_discharger) => `(tactic| assumption) macro_rules | `(tactic| use_discharger) => `(tactic| apply True.intro) /-- Returns a `TacticM Unit` that either runs the tactic sequence from `discharger?` if it's non-`none`, or it does `try with_reducible use_discharger`. -/ def mkUseDischarger (discharger? : Option (TSyntax ``Parser.Tactic.discharger)) : TacticM (TacticM Unit) := do let discharger ← if let some disch := discharger? then match disch with | `(Parser.Tactic.discharger| ($_ := $d)) => `(tactic| ($d)) | _ => throwUnsupportedSyntax else `(tactic| try with_reducible use_discharger) return evalTactic discharger /-- `use e₁, e₂, ⋯` is similar to `exists`, but unlike `exists` it is equivalent to applying the tactic `refine ⟨e₁, e₂, ⋯, ?_, ⋯, ?_⟩` with any number of placeholders (rather than just one) and then trying to close goals associated to the placeholders with a configurable discharger (rather than just `try trivial`). Examples: ```lean example : ∃ x : Nat, x = x := by use 42 example : ∃ x : Nat, ∃ y : Nat, x = y := by use 42, 42 example : ∃ x : String × String, x.1 = x.2 := by use ("forty-two", "forty-two") ``` `use! e₁, e₂, ⋯` is similar but it applies constructors everywhere rather than just for goals that correspond to the last argument of a constructor. This gives the effect that nested constructors are being flattened out, with the supplied values being used along the leaves and nodes of the tree of constructors. With `use!` one can feed in each `42` one at a time: ```lean example : ∃ p : Nat × Nat, p.1 = p.2 := by use! 42, 42 example : ∃ p : Nat × Nat, p.1 = p.2 := by use! (42, 42) ``` The second line makes use of the fact that `use!` tries refining with the argument before applying a constructor. Also note that `use`/`use!` by default uses a tactic called `use_discharger` to discharge goals, so `use! 42` will close the goal in this example since `use_discharger` applies `rfl`, which as a consequence solves for the other `Nat` metavariable. These tactics take an optional discharger to handle remaining explicit `Prop` constructor arguments. By default it is `use (discharger := try with_reducible use_discharger) e₁, e₂, ⋯`. To turn off the discharger and keep all goals, use `(discharger := skip)`. To allow "heavy refls", use `(discharger := try use_discharger)`. -/ elab (name := useSyntax) "use" discharger?:(Parser.Tactic.discharger)? ppSpace args:term,+ : tactic => do runUse false (← mkUseDischarger discharger?) args.getElems.toList @[inherit_doc useSyntax] elab "use!" discharger?:(Parser.Tactic.discharger)? ppSpace args:term,+ : tactic => do runUse true (← mkUseDischarger discharger?) args.getElems.toList
Tactic\Variable.lean
/- Copyright (c) 2023 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller -/ import Lean.Meta.Tactic.TryThis /-! # The `variable?` command This defines a command like `variable` that automatically adds all missing typeclass arguments. For example, `variable? [Module R M]` is the same as `variable [Semiring R] [AddCommMonoid M] [Module R M]`, though if any of these three instance arguments can be inferred from previous variables then they will be omitted. An inherent limitation with this command is that variables are recorded in the scope as *syntax*. This means that `variable?` needs to pretty print the expressions we get from typeclass synthesis errors, and these might fail to round trip. -/ namespace Mathlib.Command.Variable open Lean Elab Command Parser.Term Meta initialize registerTraceClass `variable? register_option variable?.maxSteps : Nat := { defValue := 15 group := "variable?" descr := "The maximum number of instance arguments `variable?` will try to insert before giving up" } register_option variable?.checkRedundant : Bool := { defValue := true group := "variable?" descr := "Warn if instance arguments can be inferred from preceding ones" } /-- Get the type out of a bracketed binder. -/ def bracketedBinderType : Syntax → Option Term | `(bracketedBinderF|($_* $[: $ty?]? $(_annot?)?)) => ty? | `(bracketedBinderF|{$_* $[: $ty?]?}) => ty? | `(bracketedBinderF|⦃$_* $[: $ty?]?⦄) => ty? | `(bracketedBinderF|[$[$_ :]? $ty]) => some ty | _ => none /-- The `variable?` command has the same syntax as `variable`, but it will auto-insert missing instance arguments wherever they are needed. It does not add variables that can already be deduced from others in the current context. By default the command checks that variables aren't implied by earlier ones, but it does *not* check that earlier variables aren't implied by later ones. Unlike `variable`, the `variable?` command does not support changing variable binder types. The `variable?` command will give a suggestion to replace itself with a command of the form `variable? ...binders... => ...binders...`. The binders after the `=>` are the completed list of binders. When this `=>` clause is present, the command verifies that the expanded binders match the post-`=>` binders. The purpose of this is to help keep code that uses `variable?` resilient against changes to the typeclass hierarchy, at least in the sense that this additional information can be used to debug issues that might arise. One can also replace `variable? ...binders... =>` with `variable`. The core algorithm is to try elaborating binders one at a time, and whenever there is a typeclass instance inference failure, it synthesizes binder syntax for it and adds it to the list of binders and tries again, recursively. There are no guarantees that this process gives the "correct" list of binders. Structures tagged with the `variable_alias` attribute can serve as aliases for a collection of typeclasses. For example, given ```lean @[variable_alias] structure VectorSpace (k V : Type*) [Field k] [AddCommGroup V] [Module k V] ``` then `variable? [VectorSpace k V]` is equivalent to `variable {k V : Type*} [Field k] [AddCommGroup V] [Module k V]`, assuming that there are no pre-existing instances on `k` and `V`. Note that this is not a simple replacement: it only adds instances not inferrable from others in the current scope. A word of warning: the core algorithm depends on pretty printing, so if terms that appear in binders do not round trip, this algorithm can fail. That said, it has some support for quantified binders such as `[∀ i, F i]`. -/ syntax (name := «variable?») "variable?" (ppSpace bracketedBinder)* (" =>" (ppSpace bracketedBinder)*)? : command /-- Attribute to record aliases for the `variable?` command. Aliases are structures that have no fields, and additional typeclasses are recorded as *arguments* to the structure. Example: ``` @[variable_alias] structure VectorSpace (k V : Type*) [Field k] [AddCommGroup V] [Module k V] ``` Then `variable? [VectorSpace k V]` ensures that these three typeclasses are present in the current scope. Notice that it's looking at the arguments to the `VectorSpace` type constructor. You should not have any fields in `variable_alias` structures. Notice that `VectorSpace` is not a class; the `variable?` command allows non-classes with the `variable_alias` attribute to use instance binders. -/ initialize variableAliasAttr : TagAttribute ← registerTagAttribute `variable_alias "Attribute to record aliases for the `variable?` command." /-- Find a synthetic typeclass metavariable with no expr metavariables in its type. -/ def pendingActionableSynthMVar (binder : TSyntax ``bracketedBinder) : TermElabM (Option MVarId) := do let pendingMVars := (← get).pendingMVars if pendingMVars.isEmpty then return none for mvarId in pendingMVars.reverse do let some decl ← Term.getSyntheticMVarDecl? mvarId | continue match decl.kind with | .typeClass _ => let ty ← instantiateMVars (← mvarId.getType) if !ty.hasExprMVar then return mvarId | _ => pure () throwErrorAt binder "Can not satisfy requirements for {binder} due to metavariables." /-- Try elaborating `ty`. Returns `none` if it doesn't need any additional typeclasses, or it returns a new binder that needs to come first. Does not add info unless it throws an exception. -/ partial def getSubproblem (binder : TSyntax ``bracketedBinder) (ty : Term) : TermElabM (Option (MessageData × TSyntax ``bracketedBinder)) := do let res : Term.TermElabResult (Option (MessageData × TSyntax ``bracketedBinder)) ← Term.observing do withTheReader Term.Context (fun ctx => {ctx with ignoreTCFailures := true}) do Term.withAutoBoundImplicit do _ ← Term.elabType ty Term.synthesizeSyntheticMVars (postpone := .yes) (ignoreStuckTC := true) let fvarIds := (← getLCtx).getFVarIds if let some mvarId ← pendingActionableSynthMVar binder then trace[«variable?»] "Actionable mvar:{mvarId}" -- TODO alter goal based on configuration, for example Semiring -> CommRing. -- 1. Find the new fvars that this instance problem depends on: let fvarIds' := (← mvarId.getDecl).lctx.getFVarIds.filter (fun fvar => !(fvarIds.contains fvar)) -- 2. Abstract the instance problem with respect to these fvars let goal ← mvarId.withContext do instantiateMVars <| (← mkForallFVars (usedOnly := true) (fvarIds'.map .fvar) (← mvarId.getType)) -- Note: pretty printing is not guaranteed to round-trip, but it's what we can do. let ty' ← PrettyPrinter.delab goal let binder' ← withRef binder `(bracketedBinderF| [$ty']) return some (← addMessageContext m!"{mvarId}", binder') else return none match res with | .ok v _ => return v | .error .. => Term.applyResult res /-- Tries elaborating binders, inserting new binders whenever typeclass inference fails. `i` is the index of the next binder that needs to be checked. The `toOmit` array keeps track of which binders should be removed at the end, in particular the `variable_alias` binders and any redundant binders. -/ partial def completeBinders' (maxSteps : Nat) (gas : Nat) (checkRedundant : Bool) (binders : TSyntaxArray ``bracketedBinder) (toOmit : Array Bool) (i : Nat) : TermElabM (TSyntaxArray ``bracketedBinder × Array Bool) := do if 0 < gas && i < binders.size then let binder := binders[i]! trace[«variable?»] "\ Have {(← getLCtx).getFVarIds.size} fvars and {(← getLocalInstances).size} local instances. \ Looking at{indentD binder}" let sub? ← getSubproblem binder (bracketedBinderType binder).get! if let some (goalMsg, binder') := sub? then trace[«variable?»] m!"new subproblem:{indentD binder'}" if binders.any (stop := i) (· == binder') then let binders' := binders.extract 0 i throwErrorAt binder "\ Binder{indentD binder}\nwas not able to satisfy one of its dependencies using \ the pre-existing binder{indentD binder'}\n\n\ This might be due to differences in implicit arguments, which are not represented \ in binders since they are generated by pretty printing unsatisfied dependencies.\n\n\ Current variable command:{indentD (← `(command| variable $binders'*))}\n\n\ Local context for the unsatisfied dependency:{goalMsg}" let binders := binders.insertAt! i binder' completeBinders' maxSteps (gas - 1) checkRedundant binders toOmit i else let lctx ← getLCtx let linst ← getLocalInstances withOptions (fun opts => Term.checkBinderAnnotations.set opts false) <| -- for variable_alias Term.withAutoBoundImplicit <| Term.elabBinders #[binder] fun bindersElab => do let types : Array Expr ← bindersElab.mapM (inferType ·) trace[«variable?»] m!"elaborated binder types array = {types}" Term.synthesizeSyntheticMVarsNoPostponing -- checkpoint for withAutoBoundImplicit Term.withoutAutoBoundImplicit do let (binders, toOmit) := ← do match binder with | `(bracketedBinderF|[$[$ident? :]? $ty]) => -- Check if it's an alias let type ← instantiateMVars (← inferType bindersElab.back) if ← isVariableAlias type then if ident?.isSome then throwErrorAt binder "`variable_alias` binders can't have an explicit name" -- Switch to implicit so that `elabBinders` succeeds. -- We keep it around so that it gets infotrees let binder' ← withRef binder `(bracketedBinderF|{_ : $ty}) return (binders.set! i binder', toOmit.push true) -- Check that this wasn't already an instance let res ← try withLCtx lctx linst <| trySynthInstance type catch _ => pure .none if let .some _ := res then if checkRedundant then let mvar ← mkFreshExprMVarAt lctx linst type logWarningAt binder m!"Instance argument can be inferred from earlier arguments.\n{mvar.mvarId!}" return (binders, toOmit.push true) else return (binders, toOmit.push false) | _ => return (binders, toOmit.push false) completeBinders' maxSteps gas checkRedundant binders toOmit (i + 1) else if gas == 0 && i < binders.size then let binders' := binders.extract 0 i logErrorAt binders[i]! m!"Maximum recursion depth for variables! reached. This might be a \ bug, or you can try adjusting `set_option variable?.maxSteps {maxSteps}`\n\n\ Current variable command:{indentD (← `(command| variable $binders'*))}" return (binders, toOmit) where isVariableAlias (type : Expr) : MetaM Bool := do forallTelescope type fun _ type => do if let .const name _ := type.getAppFn then if variableAliasAttr.hasTag (← getEnv) name then return true return false def completeBinders (maxSteps : Nat) (checkRedundant : Bool) (binders : TSyntaxArray ``bracketedBinder) : TermElabM (TSyntaxArray ``bracketedBinder × Array Bool) := completeBinders' maxSteps maxSteps checkRedundant binders #[] 0 /-- Strip off whitespace and comments. -/ def cleanBinders (binders : TSyntaxArray ``bracketedBinder) : TSyntaxArray ``bracketedBinder := Id.run do let mut binders' := #[] for binder in binders do binders' := binders'.push <| ⟨binder.raw.unsetTrailing⟩ return binders' @[command_elab «variable?», inherit_doc «variable?»] def elabVariables : CommandElab := fun stx => match stx with | `(variable? $binders* $[=> $expectedBinders?*]?) => do let checkRedundant := variable?.checkRedundant.get (← getOptions) process stx checkRedundant binders expectedBinders? | _ => throwUnsupportedSyntax where extendScope (binders : TSyntaxArray ``bracketedBinder) : CommandElabM Unit := do for binder in binders do let varUIds ← getBracketedBinderIds binder |>.mapM (withFreshMacroScope ∘ MonadQuotation.addMacroScope) modifyScope fun scope => { scope with varDecls := scope.varDecls.push binder, varUIds := scope.varUIds ++ varUIds } process (stx : Syntax) (checkRedundant : Bool) (binders : TSyntaxArray ``bracketedBinder) (expectedBinders? : Option <| TSyntaxArray ``bracketedBinder) : CommandElabM Unit := do let binders := cleanBinders binders let maxSteps := variable?.maxSteps.get (← getOptions) trace[«variable?»] "variable?.maxSteps = {maxSteps}" for binder in binders do if (bracketedBinderType binder).isNone then throwErrorAt binder "variable? cannot update pre-existing variables" let (binders', suggest) ← runTermElabM fun _ => do let (binders, toOmit) ← completeBinders maxSteps checkRedundant binders /- Elaborate the binders again, which also adds the infotrees. This also makes sure the list works with auto-bound implicits at the front. -/ Term.withAutoBoundImplicit <| Term.elabBinders binders fun _ => pure () -- Filter out omitted binders let binders' : TSyntaxArray ``bracketedBinder := (binders.zip toOmit).filterMap fun (b, omit) => if omit then none else some b if let some expectedBinders := expectedBinders? then trace[«variable?»] "checking expected binders" /- We re-elaborate the binders to create an expression that represents the entire resulting local context (auto-bound implicits mean we can't just the `binders` array). -/ let elabAndPackageBinders (binders : TSyntaxArray ``bracketedBinder) : TermElabM AbstractMVarsResult := withoutModifyingStateWithInfoAndMessages <| Term.withAutoBoundImplicit <| Term.elabBinders binders fun _ => do let e ← mkForallFVars (← getLCtx).getFVars (.sort .zero) let res ← abstractMVars e -- Throw in the level names from the current state since `Type*` produces new -- level names. return {res with paramNames := (← get).levelNames.toArray ++ res.paramNames} let ctx1 ← elabAndPackageBinders binders' let ctx2 ← elabAndPackageBinders expectedBinders trace[«variable?»] "new context: paramNames = {ctx1.paramNames}, { ""}numMVars = {ctx1.numMVars}\n{indentD ctx1.expr}" trace[«variable?»] "expected context: paramNames = {ctx2.paramNames}, { ""}numMVars = {ctx2.numMVars}\n{indentD ctx2.expr}" if ctx1.paramNames == ctx2.paramNames && ctx1.numMVars == ctx2.numMVars then if ← isDefEq ctx1.expr ctx2.expr then return (binders', false) logWarning "Calculated binders do not match the expected binders given after `=>`." return (binders', true) else return (binders', true) extendScope binders' let varComm ← `(command| variable? $binders* => $binders'*) trace[«variable?»] "derived{indentD varComm}" if suggest then liftTermElabM <| Lean.Meta.Tactic.TryThis.addSuggestion stx (origSpan? := stx) varComm /-- Hint for the unused variables linter. Copies the one for `variable`. -/ @[unused_variables_ignore_fn] def ignorevariable? : Lean.Linter.IgnoreFunction := fun _ stack _ => stack.matches [`null, none, `null, ``Mathlib.Command.Variable.variable?] || stack.matches [`null, none, `null, `null, ``Mathlib.Command.Variable.variable?]
Tactic\WLOG.lean
/- Copyright (c) 2018 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Mario Carneiro, Johan Commelin, Reid Barton, Thomas Murrills -/ import Mathlib.Tactic.Core /-! # Without loss of generality tactic The tactic `wlog h : P` will add an assumption `h : P` to the main goal, and add a new goal that requires showing that the case `h : ¬ P` can be reduced to the case where `P` holds (typically by symmetry). The new goal will be placed at the top of the goal stack. -/ namespace Mathlib.Tactic open Lean Meta Elab Term Tactic MetavarContext.MkBinding /-- The result of running `wlog` on a goal. -/ structure WLOGResult where /-- The `reductionGoal` requires showing that the case `h : ¬ P` can be reduced to the case where `P` holds. It has two additional assumptions in its context: * `h : ¬ P`: the assumption that `P` does not hold * `H`: the statement that in the original context `P` suffices to prove the goal. -/ reductionGoal : MVarId /-- The pair `(HFVarId, negHypFVarId)` of `FVarIds` for `reductionGoal`: * `HFVarId`: `H`, the statement that in the original context `P` suffices to prove the goal. * `negHypFVarId`: `h : ¬ P`, the assumption that `P` does not hold -/ reductionFVarIds : FVarId × FVarId /-- The original goal with the additional assumption `h : P`. -/ hypothesisGoal : MVarId /-- The `FVarId` of the hypothesis `h` in `hypothesisGoal` -/ hypothesisFVarId : FVarId /-- The array of `FVarId`s that was reverted to produce the reduction hypothesis `H` in `reductionGoal`, which are still present in the context of `reductionGoal` (but not necessarily `hypothesisGoal`). -/ revertedFVarIds : Array FVarId open private withFreshCache mkAuxMVarType from Lean.MetavarContext in /-- `wlog goal h P xs H` will return two goals: the `hypothesisGoal`, which adds an assumption `h : P` to the context of `goal`, and the `reductionGoal`, which requires showing that the case `h : ¬ P` can be reduced to the case where `P` holds (typically by symmetry). In `reductionGoal`, there will be two additional assumptions: - `h : ¬ P`: the assumption that `P` does not hold - `H`: which is the statement that in the old context `P` suffices to prove the goal. If `H` is `none`, the name `this` is used. If `xs` is `none`, all hypotheses are reverted to produce the reduction goal's hypothesis `H`. Otherwise, the `xs` are elaborated to hypotheses in the context of `goal`, and only those hypotheses are reverted (and any that depend on them). If `h` is `none`, the hypotheses of types `P` and `¬ P` in both branches will be inaccessible. -/ def _root_.Lean.MVarId.wlog (goal : MVarId) (h : Option Name) (P : Expr) (xs : Option (TSyntaxArray `ident) := none) (H : Option Name := none) : TacticM WLOGResult := goal.withContext do goal.checkNotAssigned `wlog let H := H.getD `this let inaccessible := h.isNone let h := h.getD `h /- Compute the type for H and keep track of the FVarId's reverted in doing so. (Do not modify the tactic state.) -/ let HSuffix := Expr.forallE h P (← goal.getType) .default let fvars ← getFVarIdsAt goal xs let fvars := fvars.map Expr.fvar let lctx := (← goal.getDecl).lctx let (revertedFVars, HType) ← liftMkBindingM fun ctx => (do let f ← collectForwardDeps lctx fvars let revertedFVars := filterOutImplementationDetails lctx (f.map Expr.fvarId!) let HType ← withFreshCache do mkAuxMVarType lctx (revertedFVars.map Expr.fvar) .natural HSuffix return (revertedFVars, HType)) { preserveOrder := false, mainModule := ctx.mainModule } /- Set up the goal which will suppose `h`; this begins as a goal with type H (hence HExpr), and h is obtained through `introNP` -/ let HExpr ← mkFreshExprSyntheticOpaqueMVar HType let hGoal := HExpr.mvarId! /- Begin the "reduction goal" which will contain hypotheses `H` and `¬h`. For now, it only contains `H`. Keep track of that hypothesis' FVarId. -/ let (HFVarId, reductionGoal) ← goal.assertHypotheses #[⟨H, HType, HExpr⟩] let HFVarId := HFVarId[0]! /- Clear the reverted fvars from the branch that will contain `h` as a hypothesis. -/ let hGoal ← hGoal.tryClearMany revertedFVars /- Introduce all of the reverted fvars to the context in order to restore the original target as well as finally introduce the hypothesis `h`. -/ let (_, hGoal) ← hGoal.introNP revertedFVars.size -- keep track of the hypothesis' FVarId let (hFVar, hGoal) ← if inaccessible then hGoal.intro1 else hGoal.intro1P /- Split the reduction goal by cases on `h`. Keep the one with `¬h` as the reduction goal, and prove the easy goal by applying `H` to all its premises, which are fvars in the context. -/ let (⟨easyGoal, hyp⟩, ⟨reductionGoal, negHyp⟩) ← reductionGoal.byCases P <| if inaccessible then `_ else h easyGoal.withContext do -- Exclude ldecls from the `mkAppN` arguments let HArgFVarIds ← revertedFVars.filterM (notM ·.isLetVar) let HApp ← instantiateMVars <| mkAppN (.fvar HFVarId) (HArgFVarIds.map .fvar) |>.app (.fvar hyp) ensureHasNoMVars HApp easyGoal.assign HApp return ⟨reductionGoal, (HFVarId, negHyp), hGoal, hFVar, revertedFVars⟩ /-- `wlog h : P` will add an assumption `h : P` to the main goal, and add a side goal that requires showing that the case `h : ¬ P` can be reduced to the case where `P` holds (typically by symmetry). The side goal will be at the top of the stack. In this side goal, there will be two additional assumptions: - `h : ¬ P`: the assumption that `P` does not hold - `this`: which is the statement that in the old context `P` suffices to prove the goal. By default, the name `this` is used, but the idiom `with H` can be added to specify the name: `wlog h : P with H`. Typically, it is useful to use the variant `wlog h : P generalizing x y`, to revert certain parts of the context before creating the new goal. In this way, the wlog-claim `this` can be applied to `x` and `y` in different orders (exploiting symmetry, which is the typical use case). By default, the entire context is reverted. -/ syntax (name := wlog) "wlog " binderIdent " : " term (" generalizing" (ppSpace colGt ident)*)? (" with " binderIdent)? : tactic elab_rules : tactic | `(tactic| wlog $h:binderIdent : $P:term $[ generalizing $xs*]? $[ with $H:ident]?) => withMainContext do let H := H.map (·.getId) let h := match h with | `(binderIdent|$h:ident) => some h.getId | _ => none let P ← elabType P let goal ← getMainGoal let { reductionGoal, hypothesisGoal .. } ← goal.wlog h P xs H replaceMainGoal [reductionGoal, hypothesisGoal]
Tactic\Zify.lean
/- Copyright (c) 2022 Moritz Doll. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Moritz Doll, Mario Carneiro, Robert Y. Lewis -/ import Mathlib.Tactic.Basic import Mathlib.Tactic.Attr.Register import Mathlib.Data.Int.Cast.Basic import Mathlib.Order.Basic /-! # `zify` tactic The `zify` tactic is used to shift propositions from `Nat` to `Int`. This is often useful since `Int` has well-behaved subtraction. ``` example (a b c x y z : Nat) (h : ¬ x*y*z < 0) : c < a + 3*b := by zify zify at h /- h : ¬↑x * ↑y * ↑z < 0 ⊢ ↑c < ↑a + 3 * ↑b -/ ``` -/ namespace Mathlib.Tactic.Zify open Lean open Lean.Meta open Lean.Parser.Tactic open Lean.Elab.Tactic /-- The `zify` tactic is used to shift propositions from `Nat` to `Int`. This is often useful since `Int` has well-behaved subtraction. ``` example (a b c x y z : Nat) (h : ¬ x*y*z < 0) : c < a + 3*b := by zify zify at h /- h : ¬↑x * ↑y * ↑z < 0 ⊢ ↑c < ↑a + 3 * ↑b -/ ``` `zify` can be given extra lemmas to use in simplification. This is especially useful in the presence of nat subtraction: passing `≤` arguments will allow `push_cast` to do more work. ``` example (a b c : Nat) (h : a - b < c) (hab : b ≤ a) : false := by zify [hab] at h /- h : ↑a - ↑b < ↑c -/ ``` `zify` makes use of the `@[zify_simps]` attribute to move propositions, and the `push_cast` tactic to simplify the `Int`-valued expressions. `zify` is in some sense dual to the `lift` tactic. `lift (z : Int) to Nat` will change the type of an integer `z` (in the supertype) to `Nat` (the subtype), given a proof that `z ≥ 0`; propositions concerning `z` will still be over `Int`. `zify` changes propositions about `Nat` (the subtype) to propositions about `Int` (the supertype), without changing the type of any variable. -/ syntax (name := zify) "zify" (simpArgs)? (location)? : tactic macro_rules | `(tactic| zify $[[$simpArgs,*]]? $[at $location]?) => let args := simpArgs.map (·.getElems) |>.getD #[] `(tactic| simp (config := {decide := false}) only [zify_simps, push_cast, $args,*] $[at $location]?) /-- The `Simp.Context` generated by `zify`. -/ def mkZifyContext (simpArgs : Option (Syntax.TSepArray `Lean.Parser.Tactic.simpStar ",")) : TacticM MkSimpContextResult := do let args := simpArgs.map (·.getElems) |>.getD #[] mkSimpContext (← `(tactic| simp (config := {decide := false}) only [zify_simps, push_cast, $args,*])) false /-- A variant of `applySimpResultToProp` that cannot close the goal, but does not need a meta variable and returns a tuple of a proof and the corresponding simplified proposition. -/ def applySimpResultToProp' (proof : Expr) (prop : Expr) (r : Simp.Result) : MetaM (Expr × Expr) := do match r.proof? with | some eqProof => return (← mkExpectedTypeHint (← mkEqMP eqProof proof) r.expr, r.expr) | none => if r.expr != prop then return (← mkExpectedTypeHint proof r.expr, r.expr) else return (proof, r.expr) /-- Translate a proof and the proposition into a zified form. -/ def zifyProof (simpArgs : Option (Syntax.TSepArray `Lean.Parser.Tactic.simpStar ",")) (proof : Expr) (prop : Expr) : TacticM (Expr × Expr) := do let ctx_result ← mkZifyContext simpArgs let (r, _) ← simp prop ctx_result.ctx applySimpResultToProp' proof prop r @[zify_simps] lemma natCast_eq (a b : Nat) : a = b ↔ (a : Int) = (b : Int) := Int.ofNat_inj.symm @[zify_simps] lemma natCast_le (a b : Nat) : a ≤ b ↔ (a : Int) ≤ (b : Int) := Int.ofNat_le.symm @[zify_simps] lemma natCast_lt (a b : Nat) : a < b ↔ (a : Int) < (b : Int) := Int.ofNat_lt.symm @[zify_simps] lemma natCast_ne (a b : Nat) : a ≠ b ↔ (a : Int) ≠ (b : Int) := not_congr Int.ofNat_inj.symm @[zify_simps] lemma natCast_dvd (a b : Nat) : a ∣ b ↔ (a : Int) ∣ (b : Int) := Int.ofNat_dvd.symm -- TODO: is it worth adding lemmas for Prime and Coprime as well? -- Doing so in this file would require adding imports. @[deprecated (since := "2024-04-17")] alias nat_cast_dvd := natCast_dvd -- `Nat.cast_sub` is already tagged as `norm_cast` but it does allow to use assumptions like -- `m < n` or more generally `m + k ≤ n`. We add two lemmas to increase the probability that -- `zify` will push through `ℕ` subtraction. variable {R : Type*} [AddGroupWithOne R] @[norm_cast] theorem Nat.cast_sub_of_add_le {m n k} (h : m + k ≤ n) : ((n - m : ℕ) : R) = n - m := Nat.cast_sub (m.le_add_right k |>.trans h) @[norm_cast] theorem Nat.cast_sub_of_lt {m n} (h : m < n) : ((n - m : ℕ) : R) = n - m := Nat.cast_sub h.le
Tactic\ArithMult\Init.lean
/- Copyright (c) 2023 Arend Mellendijk. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Arend Mellendijk -/ import Aesop /-! # arith_mult Rule Set This module defines the `IsMultiplicative` Aesop rule set which is used by the `arith_mult` tactic. Aesop rule sets only become visible once the file in which they're declared is imported, so we must put this declaration into its own file. -/ declare_aesop_rule_sets [IsMultiplicative]
Tactic\Attr\Core.lean
/- Copyright (c) 2023 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov -/ import Mathlib.Tactic.Attr.Register /-! # Simp tags for core lemmas In Lean 4, an attribute declared with `register_simp_attr` cannot be used in the same file. So, we declare all `simp` attributes used in `Mathlib` in `Mathlib/Tactic/Attr/Register` and tag lemmas from the core library and the `Batteries` library with these attributes in this file. -/ attribute [simp] id_map' attribute [functor_norm, monad_norm] seq_assoc pure_seq pure_bind bind_assoc bind_pure map_pure attribute [monad_norm] seq_eq_bind_map -- Porting note: changed some `iff` lemmas to `eq` lemmas attribute [mfld_simps] id and_true true_and Function.comp_apply and_self eq_self not_false true_or or_true heq_eq_eq forall_const and_imp -- Porting note: until we change the default induction principle on `Nat`: attribute [ghost_simps] Nat.zero_eq attribute [nontriviality] eq_iff_true_of_subsingleton
Tactic\Attr\Register.lean
/- Copyright (c) 2023 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov -/ import Lean.Meta.Tactic.Simp.SimpTheorems import Lean.Meta.Tactic.Simp.RegisterCommand import Lean.LabelAttribute /-! # Attributes used in `Mathlib` In this file we define all `simp`-like and `label`-like attributes used in `Mathlib`. We declare all of them in one file for two reasons: - in Lean 4, one cannot use an attribute in the same file where it was declared; - this way it is easy to see which simp sets contain a given lemma. -/ /-- Simp set for `functor_norm` -/ register_simp_attr functor_norm -- Porting note: -- in mathlib3 we declared `monad_norm` using: -- mk_simp_attribute monad_norm none with functor_norm -- This syntax is not supported by mathlib4's `register_simp_attr`. -- See https://github.com/leanprover-community/mathlib4/issues/802 -- TODO: add `@[monad_norm]` to all `@[functor_norm] lemmas /-- Simp set for `functor_norm` -/ register_simp_attr monad_norm /-- The simpset `field_simps` is used by the tactic `field_simp` to reduce an expression in a field to an expression of the form `n / d` where `n` and `d` are division-free. -/ register_simp_attr field_simps /-- Simp attribute for lemmas about `Even` -/ register_simp_attr parity_simps /-- "Simp attribute for lemmas about `RCLike`" -/ register_simp_attr rclike_simps /-- The simpset `rify_simps` is used by the tactic `rify` to move expressions from `ℕ`, `ℤ`, or `ℚ` to `ℝ`. -/ register_simp_attr rify_simps /-- The simpset `qify_simps` is used by the tactic `qify` to move expressions from `ℕ` or `ℤ` to `ℚ` which gives a well-behaved division. -/ register_simp_attr qify_simps /-- The simpset `zify_simps` is used by the tactic `zify` to move expressions from `ℕ` to `ℤ` which gives a well-behaved subtraction. -/ register_simp_attr zify_simps /-- The simpset `mfld_simps` records several simp lemmas that are especially useful in manifolds. It is a subset of the whole set of simp lemmas, but it makes it possible to have quicker proofs (when used with `squeeze_simp` or `simp only`) while retaining readability. The typical use case is the following, in a file on manifolds: If `simp [foo, bar]` is slow, replace it with `squeeze_simp [foo, bar, mfld_simps]` and paste its output. The list of lemmas should be reasonable (contrary to the output of `squeeze_simp [foo, bar]` which might contain tens of lemmas), and the outcome should be quick enough. -/ register_simp_attr mfld_simps /-- Simp set for integral rules. -/ register_simp_attr integral_simps /-- simp set for the manipulation of typevec and arrow expressions -/ register_simp_attr typevec /-- Simplification rules for ghost equations. -/ register_simp_attr ghost_simps /-- The `@[nontriviality]` simp set is used by the `nontriviality` tactic to automatically discharge theorems about the trivial case (where we know `Subsingleton α` and many theorems in e.g. groups are trivially true). -/ register_simp_attr nontriviality /-- A stub attribute for `is_poly`. -/ register_label_attr is_poly
Tactic\Bound\Attribute.lean
/- Copyright (c) 2024 Geoffrey Irving. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Geoffrey Irving -/ import Mathlib.Algebra.Group.ZeroOne import Mathlib.Tactic.Bound.Init import Qq /-! # The `bound` attribute Any lemma tagged with `@[bound]` is registered as an apply rule for the `bound` tactic, by converting it to either `norm apply` or `safe apply <priority>`. The classification is based on the number and types of the lemma's hypotheses. -/ open Lean (MetaM) open Qq namespace Mathlib.Tactic.Bound initialize Lean.registerTraceClass `bound.attribute variable {u : Lean.Level} {α : Q(Type u)} /-- Check if an expression is zero -/ def isZero (e : Q($α)) : MetaM Bool := match e with | ~q(@OfNat.ofNat.{u} _ (nat_lit 0) $i) => return true | _ => return false /-- Map the arguments of an inequality expression to a score -/ def ineqPriority (a b : Q($α)) : MetaM ℕ := do return if (← isZero a) || (← isZero b) then 1 else 10 /-- Map a hypothesis type to a score -/ partial def hypPriority (hyp : Q(Prop)) : MetaM ℕ := do match hyp with -- Conjunctions add scores | ~q($a ∧ $b) => pure <| (← hypPriority a) + (← hypPriority b) -- Guessing (disjunction) gets a big penalty | ~q($a ∨ $b) => pure <| 100 + (← hypPriority a) + (← hypPriority b) -- Inequalities get score 1 if they contain zero, 10 otherwise | ~q(@LE.le _ $i $a $b) => ineqPriority a b | ~q(@LT.lt _ $i $a $b) => ineqPriority a b | ~q(@GE.ge _ $i $b $a) => ineqPriority a b | ~q(@GT.gt _ $i $b $a) => ineqPriority a b -- Assume anything else is non-relevant | _ => pure 0 /-- Map a type to a score -/ def typePriority (decl : Lean.Name) (type : Lean.Expr) : MetaM ℕ := Lean.Meta.forallTelescope type fun xs t ↦ do checkResult t xs.foldlM (fun (t : ℕ) x ↦ do return t + (← argPriority x)) 0 where /-- Score the type of argument `x` -/ argPriority (x : Lean.Expr) : MetaM ℕ := do hypPriority (← Lean.Meta.inferType x) /-- Insist that our conclusion is an inequality -/ checkResult (t : Q(Prop)) : MetaM Unit := do match t with | ~q(@LE.le _ $i $a $b) => return () | ~q(@LT.lt _ $i $a $b) => return () | ~q(@GE.ge _ $i $b $a) => return () | ~q(@GT.gt _ $i $b $a) => return () | _ => throwError (f!"`{decl}` has invalid type `{type}` as a 'bound' lemma: \ it should be an inequality") /-- Map a theorem decl to a score (0 means `norm apply`, `0 <` means `safe apply`) -/ def declPriority (decl : Lean.Name) : Lean.MetaM ℕ := do match (← Lean.getEnv).find? decl with | some info => do typePriority decl info.type | none => throwError "unknown declaration {decl}" /-- Map a score to either `norm apply` or `safe apply <priority>` -/ def scoreToConfig (decl : Lean.Name) (score : ℕ) : Aesop.Frontend.RuleConfig := let (phase, priority) := match score with | 0 => (Aesop.PhaseName.norm, 0) -- No hypotheses: this rule closes the goal immediately | s => (Aesop.PhaseName.safe, s) { term? := some (Lean.mkIdent decl) phase? := phase priority? := some (Aesop.Frontend.Priority.int priority) builder? := some (.regular .apply) builderOptions := {} ruleSets := ⟨#[`Bound]⟩ } /-- Register a lemma as an `apply` rule for the `bound` tactic. A lemma is appropriate for `bound` if it proves an inequality using structurally simpler inequalities, "recursing" on the structure of the expressions involved, assuming positivity or nonnegativity where useful. Examples include 1. `gcongr`-like inequalities over `<` and `≤` such as `f x ≤ f y` where `f` is monotone (note that `gcongr` supports other relations). 2. `mul_le_mul` which proves `a * b ≤ c * d` from `a ≤ c ∧ b ≤ d ∧ 0 ≤ b ∧ 0 ≤ c` 3. Positivity or nonnegativity inequalities such as `sub_nonneg`: `a ≤ b → 0 ≤ b - a` 4. Inequalities involving `1` such as `one_le_div` or `Real.one_le_exp` 5. Disjunctions where the natural recursion branches, such as `a ^ n ≤ a ^ m` when the inequality for `n,m` depends on whether `1 ≤ a ∨ a ≤ 1`. Each `@[bound]` lemma is assigned a score based on the number and complexity of its hypotheses, and the `aesop` implementation chooses lemmas with lower scores first: 1. Inequality hypotheses involving `0` add 1 to the score. 2. General inequalities add `10`. 3. Disjuctions `a ∨ b` add `100` plus the sum of the scores of `a` and `b`. The functionality of `bound` overlaps with `positivity` and `gcongr`, but can jump back and forth between `0 ≤ x` and `x ≤ y`-type inequalities. For example, `bound` proves `0 ≤ c → b ≤ a → 0 ≤ a * c - b * c` by turning the goal into `b * c ≤ a * c`, then using `mul_le_mul_of_nonneg_right`. `bound` also uses specialized lemmas for goals of the form `1 ≤ x, 1 < x, x ≤ 1, x < 1`. See also `@[bound_forward]` which marks a lemma as a forward rule for `bound`: these lemmas are applied to hypotheses to extract inequalities (e.g. `HasPowerSeriesOnBall.r_pos`). -/ initialize Lean.registerBuiltinAttribute { name := `bound descr := "Register a theorem as an apply rule for the `bound` tactic." applicationTime := .afterCompilation add := fun decl stx attrKind => Lean.withRef stx do let score ← Aesop.runTermElabMAsCoreM <| declPriority decl trace[bound.attribute] "'{decl}' has score '{score}'" let context ← Aesop.runMetaMAsCoreM Aesop.ElabM.Context.forAdditionalGlobalRules let (rule, ruleSets) ← Aesop.runTermElabMAsCoreM <| (scoreToConfig decl score).buildGlobalRule.run context for ruleSet in ruleSets do Aesop.Frontend.addGlobalRule ruleSet rule attrKind (checkNotExists := true) erase := fun decl => let ruleFilter := { name := decl, scope := .global, builders := #[], phases := #[] } Aesop.Frontend.eraseGlobalRules Aesop.RuleSetNameFilter.all ruleFilter (checkExists := true) } /-- Attribute for `forward` rules for the `bound` tactic. `@[bound_forward]` lemmas should produce inequalities given other hypotheses that might be in the context. A typical example is exposing an inequality field of a structure, such as `HasPowerSeriesOnBall.r_pos`. -/ macro "bound_forward" : attr => `(attr|aesop safe forward (rule_sets := [$(Lean.mkIdent `Bound):ident]))
Tactic\Bound\Init.lean
/- Copyright (c) 2024 Geoffrey Irving. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Geoffrey Irving -/ import Aesop.Frontend.Command /-! # Bound Rule Set This module defines the `Bound` Aesop rule set which is used by the `bound` tactic. Aesop rule sets only become visible once the file in which they're declared is imported, so we must put this declaration into its own file. -/ declare_aesop_rule_sets [Bound]
Tactic\CancelDenoms\Core.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Algebra.Field.Basic import Mathlib.Algebra.Order.Field.Defs import Mathlib.Data.Tree.Basic import Mathlib.Logic.Basic import Mathlib.Tactic.NormNum.Core import Mathlib.Util.SynthesizeUsing import Mathlib.Util.Qq import Mathlib.Algebra.Order.Field.Unbundled.Basic /-! # A tactic for canceling numeric denominators This file defines tactics that cancel numeric denominators from field Expressions. As an example, we want to transform a comparison `5*(a/3 + b/4) < c/3` into the equivalent `5*(4*a + 3*b) < 4*c`. ## Implementation notes The tooling here was originally written for `linarith`, not intended as an interactive tactic. The interactive version has been split off because it is sometimes convenient to use on its own. There are likely some rough edges to it. Improving this tactic would be a good project for someone interested in learning tactic programming. -/ open Lean Parser Tactic Mathlib Meta NormNum Qq initialize registerTraceClass `CancelDenoms namespace CancelDenoms /-! ### Lemmas used in the procedure -/ theorem mul_subst {α} [CommRing α] {n1 n2 k e1 e2 t1 t2 : α} (h1 : n1 * e1 = t1) (h2 : n2 * e2 = t2) (h3 : n1 * n2 = k) : k * (e1 * e2) = t1 * t2 := by rw [← h3, mul_comm n1, mul_assoc n2, ← mul_assoc n1, h1, ← mul_assoc n2, mul_comm n2, mul_assoc, h2] theorem div_subst {α} [Field α] {n1 n2 k e1 e2 t1 : α} (h1 : n1 * e1 = t1) (h2 : n2 / e2 = 1) (h3 : n1 * n2 = k) : k * (e1 / e2) = t1 := by rw [← h3, mul_assoc, mul_div_left_comm, h2, ← mul_assoc, h1, mul_comm, one_mul] theorem cancel_factors_eq_div {α} [Field α] {n e e' : α} (h : n * e = e') (h2 : n ≠ 0) : e = e' / n := eq_div_of_mul_eq h2 <| by rwa [mul_comm] at h theorem add_subst {α} [Ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) : n * (e1 + e2) = t1 + t2 := by simp [left_distrib, *] theorem sub_subst {α} [Ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) : n * (e1 - e2) = t1 - t2 := by simp [left_distrib, *, sub_eq_add_neg] theorem neg_subst {α} [Ring α] {n e t : α} (h1 : n * e = t) : n * -e = -t := by simp [*] theorem pow_subst {α} [CommRing α] {n e1 t1 k l : α} {e2 : ℕ} (h1 : n * e1 = t1) (h2 : l * n ^ e2 = k) : k * (e1 ^ e2) = l * t1 ^ e2 := by rw [← h2, ← h1, mul_pow, mul_assoc] theorem inv_subst {α} [Field α] {n k e : α} (h2 : e ≠ 0) (h3 : n * e = k) : k * (e ⁻¹) = n := by rw [← div_eq_mul_inv, ← h3, mul_div_cancel_right₀ _ h2] theorem cancel_factors_lt {α} [LinearOrderedField α] {a b ad bd a' b' gcd : α} (ha : ad * a = a') (hb : bd * b = b') (had : 0 < ad) (hbd : 0 < bd) (hgcd : 0 < gcd) : (a < b) = (1 / gcd * (bd * a') < 1 / gcd * (ad * b')) := by rw [mul_lt_mul_left, ← ha, ← hb, ← mul_assoc, ← mul_assoc, mul_comm bd, mul_lt_mul_left] · exact mul_pos had hbd · exact one_div_pos (α := α) |>.2 hgcd theorem cancel_factors_le {α} [LinearOrderedField α] {a b ad bd a' b' gcd : α} (ha : ad * a = a') (hb : bd * b = b') (had : 0 < ad) (hbd : 0 < bd) (hgcd : 0 < gcd) : (a ≤ b) = (1 / gcd * (bd * a') ≤ 1 / gcd * (ad * b')) := by rw [mul_le_mul_left, ← ha, ← hb, ← mul_assoc, ← mul_assoc, mul_comm bd, mul_le_mul_left] · exact mul_pos had hbd · exact one_div_pos (α := α) |>.2 hgcd theorem cancel_factors_eq {α} [Field α] {a b ad bd a' b' gcd : α} (ha : ad * a = a') (hb : bd * b = b') (had : ad ≠ 0) (hbd : bd ≠ 0) (hgcd : gcd ≠ 0) : (a = b) = (1 / gcd * (bd * a') = 1 / gcd * (ad * b')) := by rw [← ha, ← hb, ← mul_assoc bd, ← mul_assoc ad, mul_comm bd] ext; constructor · rintro rfl rfl · intro h simp only [← mul_assoc] at h refine mul_left_cancel₀ (mul_ne_zero ?_ ?_) h on_goal 1 => apply mul_ne_zero on_goal 1 => apply div_ne_zero · exact one_ne_zero all_goals assumption theorem cancel_factors_ne {α} [Field α] {a b ad bd a' b' gcd : α} (ha : ad * a = a') (hb : bd * b = b') (had : ad ≠ 0) (hbd : bd ≠ 0) (hgcd : gcd ≠ 0) : (a ≠ b) = (1 / gcd * (bd * a') ≠ 1 / gcd * (ad * b')) := by classical rw [eq_iff_iff, not_iff_not, cancel_factors_eq ha hb had hbd hgcd] /-! ### Computing cancellation factors -/ /-- `findCancelFactor e` produces a natural number `n`, such that multiplying `e` by `n` will be able to cancel all the numeric denominators in `e`. The returned `Tree` describes how to distribute the value `n` over products inside `e`. -/ partial def findCancelFactor (e : Expr) : ℕ × Tree ℕ := match e.getAppFnArgs with | (``HAdd.hAdd, #[_, _, _, _, e1, e2]) | (``HSub.hSub, #[_, _, _, _, e1, e2]) => let (v1, t1) := findCancelFactor e1 let (v2, t2) := findCancelFactor e2 let lcm := v1.lcm v2 (lcm, .node lcm t1 t2) | (``HMul.hMul, #[_, _, _, _, e1, e2]) => let (v1, t1) := findCancelFactor e1 let (v2, t2) := findCancelFactor e2 let pd := v1 * v2 (pd, .node pd t1 t2) | (``HDiv.hDiv, #[_, _, _, _, e1, e2]) => -- If e2 is a rational, then it's a natural number due to the simp lemmas in `deriveThms`. match e2.nat? with | some q => let (v1, t1) := findCancelFactor e1 let n := v1 * q (n, .node n t1 <| .node q .nil .nil) | none => (1, .node 1 .nil .nil) | (``Neg.neg, #[_, _, e]) => findCancelFactor e | (``HPow.hPow, #[_, ℕ, _, _, e1, e2]) => match e2.nat? with | some k => let (v1, t1) := findCancelFactor e1 let n := v1 ^ k (n, .node n t1 <| .node k .nil .nil) | none => (1, .node 1 .nil .nil) | (``Inv.inv, #[_, _, e]) => match e.nat? with | some q => (q, .node q .nil <| .node q .nil .nil) | none => (1, .node 1 .nil .nil) | _ => (1, .node 1 .nil .nil) def synthesizeUsingNormNum (type : Q(Prop)) : MetaM Q($type) := do try synthesizeUsingTactic' type (← `(tactic| norm_num)) catch e => throwError "Could not prove {type} using norm_num. {e.toMessageData}" /-- `CancelResult mα e v'` provies a value for `v * e` where the denominators have been cancelled. -/ structure CancelResult {u : Level} {α : Q(Type u)} (mα : Q(Mul $α)) (e : Q($α)) (v : Q($α)) where /-- An expression with denominators cancelled. -/ cancelled : Q($α) /-- The proof that `cancelled` is valid. -/ pf : Q($v * $e = $cancelled) /-- `mkProdPrf α sα v v' tr e` produces a proof of `v'*e = e'`, where numeric denominators have been canceled in `e'`, distributing `v` proportionally according to the tree `tr` computed by `findCancelFactor`. The `v'` argument is a numeral expression corresponding to `v`, which we need in order to state the return type accurately. -/ partial def mkProdPrf {u : Level} (α : Q(Type u)) (sα : Q(Field $α)) (v : ℕ) (v' : Q($α)) (t : Tree ℕ) (e : Q($α)) : MetaM (CancelResult q(inferInstance) e v') := do let amwo : Q(AddMonoidWithOne $α) := q(inferInstance) trace[CancelDenoms] "mkProdPrf {e} {v}" match t, e with | .node _ lhs rhs, ~q($e1 + $e2) => do let ⟨v1, hv1⟩ ← mkProdPrf α sα v v' lhs e1 let ⟨v2, hv2⟩ ← mkProdPrf α sα v v' rhs e2 return ⟨q($v1 + $v2), q(CancelDenoms.add_subst $hv1 $hv2)⟩ | .node _ lhs rhs, ~q($e1 - $e2) => do let ⟨v1, hv1⟩ ← mkProdPrf α sα v v' lhs e1 let ⟨v2, hv2⟩ ← mkProdPrf α sα v v' rhs e2 return ⟨q($v1 - $v2), q(CancelDenoms.sub_subst $hv1 $hv2)⟩ | .node _ lhs@(.node ln _ _) rhs, ~q($e1 * $e2) => do trace[CancelDenoms] "recursing into mul" have ln' := (← mkOfNat α amwo <| mkRawNatLit ln).1 have vln' := (← mkOfNat α amwo <| mkRawNatLit (v/ln)).1 let ⟨v1, hv1⟩ ← mkProdPrf α sα ln ln' lhs e1 let ⟨v2, hv2⟩ ← mkProdPrf α sα (v / ln) vln' rhs e2 let npf ← synthesizeUsingNormNum q($ln' * $vln' = $v') return ⟨q($v1 * $v2), q(CancelDenoms.mul_subst $hv1 $hv2 $npf)⟩ | .node _ lhs (.node rn _ _), ~q($e1 / $e2) => do -- Invariant: e2 is equal to the natural number rn have rn' := (← mkOfNat α amwo <| mkRawNatLit rn).1 have vrn' := (← mkOfNat α amwo <| mkRawNatLit <| v / rn).1 let ⟨v1, hv1⟩ ← mkProdPrf α sα (v / rn) vrn' lhs e1 let npf ← synthesizeUsingNormNum q($rn' / $e2 = 1) let npf2 ← synthesizeUsingNormNum q($vrn' * $rn' = $v') return ⟨q($v1), q(CancelDenoms.div_subst $hv1 $npf $npf2)⟩ | t, ~q(-$e) => do let ⟨v, hv⟩ ← mkProdPrf α sα v v' t e return ⟨q(-$v), q(CancelDenoms.neg_subst $hv)⟩ | .node _ lhs@(.node k1 _ _) (.node k2 .nil .nil), ~q($e1 ^ $e2) => do have k1' := (← mkOfNat α amwo <| mkRawNatLit k1).1 let ⟨v1, hv1⟩ ← mkProdPrf α sα k1 k1' lhs e1 have l : ℕ := v / (k1 ^ k2) have l' := (← mkOfNat α amwo <| mkRawNatLit l).1 let npf ← synthesizeUsingNormNum q($l' * $k1' ^ $e2 = $v') return ⟨q($l' * $v1 ^ $e2), q(CancelDenoms.pow_subst $hv1 $npf)⟩ | .node _ .nil (.node rn _ _), ~q($ei ⁻¹) => do have rn' := (← mkOfNat α amwo <| mkRawNatLit rn).1 have vrn' := (← mkOfNat α amwo <| mkRawNatLit <| v / rn).1 have _ : $rn' =Q $ei := ⟨⟩ let npf ← synthesizeUsingNormNum q($rn' ≠ 0) let npf2 ← synthesizeUsingNormNum q($vrn' * $rn' = $v') return ⟨q($vrn'), q(CancelDenoms.inv_subst $npf $npf2)⟩ | _, _ => do return ⟨q($v' * $e), q(rfl)⟩ /-- Theorems to get expression into a form that `findCancelFactor` and `mkProdPrf` can more easily handle. These are important for dividing by rationals and negative integers. -/ def deriveThms : List Name := [``div_div_eq_mul_div, ``div_neg] /-- Helper lemma to chain together a `simp` proof and the result of `mkProdPrf`. -/ theorem derive_trans {α} [Mul α] {a b c d : α} (h : a = b) (h' : c * b = d) : c * a = d := h ▸ h' /-- Given `e`, a term with rational division, produces a natural number `n` and a proof of `n*e = e'`, where `e'` has no division. Assumes "well-behaved" division. -/ def derive (e : Expr) : MetaM (ℕ × Expr) := do trace[CancelDenoms] "e = {e}" let eSimp ← simpOnlyNames (config := Simp.neutralConfig) deriveThms e trace[CancelDenoms] "e simplified = {eSimp.expr}" let (n, t) := findCancelFactor eSimp.expr let ⟨u, tp, e⟩ ← inferTypeQ' eSimp.expr let stp : Q(Field $tp) ← synthInstanceQ q(Field $tp) try have n' := (← mkOfNat tp q(inferInstance) <| mkRawNatLit <| n).1 let r ← mkProdPrf tp stp n n' t e trace[CancelDenoms] "pf : {← inferType r.pf}" let pf' ← if let some pfSimp := eSimp.proof? then mkAppM ``derive_trans #[pfSimp, r.pf] else pure r.pf return (n, pf') catch E => do throwError "CancelDenoms.derive failed to normalize {e}.\n{E.toMessageData}" /-- `findCompLemma e` arranges `e` in the form `lhs R rhs`, where `R ∈ {<, ≤, =, ≠}`, and returns `lhs`, `rhs`, the `cancel_factors` lemma corresponding to `R`, and a boolean indicating whether `R` involves the order (i.e. `<` and `≤`) or not (i.e. `=` and `≠`). In the case of `LT`, `LE`, `GE`, and `GT` an order on the type is needed, in the last case it is not, the final component of the return value tracks this. -/ def findCompLemma (e : Expr) : MetaM (Option (Expr × Expr × Name × Bool)) := do match (← whnfR e).getAppFnArgs with | (``LT.lt, #[_, _, a, b]) => return (a, b, ``cancel_factors_lt, true) | (``LE.le, #[_, _, a, b]) => return (a, b, ``cancel_factors_le, true) | (``Eq, #[_, a, b]) => return (a, b, ``cancel_factors_eq, false) -- `a ≠ b` reduces to `¬ a = b` under `whnf` | (``Not, #[p]) => match (← whnfR p).getAppFnArgs with | (``Eq, #[_, a, b]) => return (a, b, ``cancel_factors_ne, false) | _ => return none | (``GE.ge, #[_, _, a, b]) => return (b, a, ``cancel_factors_le, true) | (``GT.gt, #[_, _, a, b]) => return (b, a, ``cancel_factors_lt, true) | _ => return none /-- `cancelDenominatorsInType h` assumes that `h` is of the form `lhs R rhs`, where `R ∈ {<, ≤, =, ≠, ≥, >}`. It produces an Expression `h'` of the form `lhs' R rhs'` and a proof that `h = h'`. Numeric denominators have been canceled in `lhs'` and `rhs'`. -/ def cancelDenominatorsInType (h : Expr) : MetaM (Expr × Expr) := do let some (lhs, rhs, lem, ord) ← findCompLemma h | throwError m!"cannot kill factors" let (al, lhs_p) ← derive lhs let ⟨u, α, _⟩ ← inferTypeQ' lhs let amwo ← synthInstanceQ q(AddMonoidWithOne $α) let (ar, rhs_p) ← derive rhs let gcd := al.gcd ar have al := (← mkOfNat α amwo <| mkRawNatLit al).1 have ar := (← mkOfNat α amwo <| mkRawNatLit ar).1 have gcd := (← mkOfNat α amwo <| mkRawNatLit gcd).1 let (al_cond, ar_cond, gcd_cond) ← if ord then do let _ ← synthInstanceQ q(LinearOrderedField $α) let al_pos : Q(Prop) := q(0 < $al) let ar_pos : Q(Prop) := q(0 < $ar) let gcd_pos : Q(Prop) := q(0 < $gcd) pure (al_pos, ar_pos, gcd_pos) else do let _ ← synthInstanceQ q(Field $α) let al_ne : Q(Prop) := q($al ≠ 0) let ar_ne : Q(Prop) := q($ar ≠ 0) let gcd_ne : Q(Prop) := q($gcd ≠ 0) pure (al_ne, ar_ne, gcd_ne) let al_cond ← synthesizeUsingNormNum al_cond let ar_cond ← synthesizeUsingNormNum ar_cond let gcd_cond ← synthesizeUsingNormNum gcd_cond let pf ← mkAppM lem #[lhs_p, rhs_p, al_cond, ar_cond, gcd_cond] let pf_tp ← inferType pf return ((← findCompLemma pf_tp).elim default (Prod.fst ∘ Prod.snd), pf) end CancelDenoms /-- `cancel_denoms` attempts to remove numerals from the denominators of fractions. It works on propositions that are field-valued inequalities. ```lean variable [LinearOrderedField α] (a b c : α) example (h : a / 5 + b / 4 < c) : 4*a + 5*b < 20*c := by cancel_denoms at h exact h example (h : a > 0) : a / 5 > 0 := by cancel_denoms exact h ``` -/ syntax (name := cancelDenoms) "cancel_denoms" (location)? : tactic open Elab Tactic def cancelDenominatorsAt (fvar : FVarId) : TacticM Unit := do let t ← instantiateMVars (← fvar.getDecl).type let (new, eqPrf) ← CancelDenoms.cancelDenominatorsInType t liftMetaTactic' fun g => do let res ← g.replaceLocalDecl fvar new eqPrf return res.mvarId def cancelDenominatorsTarget : TacticM Unit := do let (new, eqPrf) ← CancelDenoms.cancelDenominatorsInType (← getMainTarget) liftMetaTactic' fun g => g.replaceTargetEq new eqPrf def cancelDenominators (loc : Location) : TacticM Unit := do withLocation loc cancelDenominatorsAt cancelDenominatorsTarget (fun _ ↦ throwError "Failed to cancel any denominators") elab "cancel_denoms" loc?:(location)? : tactic => do cancelDenominators (expandOptLocation (Lean.mkOptionalNode loc?)) Lean.Elab.Tactic.evalTactic (← `(tactic| try norm_num [← mul_assoc] $[$loc?]?))
Tactic\CategoryTheory\BicategoricalComp.lean
/- Copyright (c) 2022 Yuma Mizuno. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yuma Mizuno -/ import Mathlib.CategoryTheory.Bicategory.Basic /-! # Bicategorical composition `⊗≫` (composition up to associators) We provide `f ⊗≫ g`, the `bicategoricalComp` operation, which automatically inserts associators and unitors as needed to make the target of `f` match the source of `g`. -/ universe w v u open CategoryTheory Bicategory namespace CategoryTheory variable {B : Type u} [Bicategory.{w, v} B] {a b c d : B} /-- A typeclass carrying a choice of bicategorical structural isomorphism between two objects. Used by the `⊗≫` bicategorical composition operator, and the `coherence` tactic. -/ class BicategoricalCoherence (f g : a ⟶ b) where /-- The chosen structural isomorphism between to 1-morphisms. -/ hom : f ⟶ g [isIso : IsIso hom] /-- Notation for identities up to unitors and associators. -/ scoped[CategoryTheory.Bicategory] notation " ⊗𝟙 " => BicategoricalCoherence.hom -- type as \ot 𝟙 attribute [instance] BicategoricalCoherence.isIso noncomputable section /-- Construct an isomorphism between two objects in a bicategorical category out of unitors and associators. -/ def bicategoricalIso (f g : a ⟶ b) [BicategoricalCoherence f g] : f ≅ g := asIso ⊗𝟙 /-- Compose two morphisms in a bicategorical category, inserting unitors and associators between as necessary. -/ def bicategoricalComp {f g h i : a ⟶ b} [BicategoricalCoherence g h] (η : f ⟶ g) (θ : h ⟶ i) : f ⟶ i := η ≫ ⊗𝟙 ≫ θ -- type as \ot \gg @[inherit_doc bicategoricalComp] scoped[CategoryTheory.Bicategory] infixr:80 " ⊗≫ " => bicategoricalComp /-- Compose two isomorphisms in a bicategorical category, inserting unitors and associators between as necessary. -/ def bicategoricalIsoComp {f g h i : a ⟶ b} [BicategoricalCoherence g h] (η : f ≅ g) (θ : h ≅ i) : f ≅ i := η ≪≫ asIso ⊗𝟙 ≪≫ θ @[inherit_doc bicategoricalIsoComp] scoped[CategoryTheory.Bicategory] infixr:80 " ≪⊗≫ " => bicategoricalIsoComp -- type as \ll \ot \gg namespace BicategoricalCoherence @[simps] instance refl (f : a ⟶ b) : BicategoricalCoherence f f := ⟨𝟙 _⟩ @[simps] instance whiskerLeft (f : a ⟶ b) (g h : b ⟶ c) [BicategoricalCoherence g h] : BicategoricalCoherence (f ≫ g) (f ≫ h) := ⟨f ◁ ⊗𝟙⟩ @[simps] instance whiskerRight (f g : a ⟶ b) (h : b ⟶ c) [BicategoricalCoherence f g] : BicategoricalCoherence (f ≫ h) (g ≫ h) := ⟨⊗𝟙 ▷ h⟩ @[simps] instance tensorRight (f : a ⟶ b) (g : b ⟶ b) [BicategoricalCoherence (𝟙 b) g] : BicategoricalCoherence f (f ≫ g) := ⟨(ρ_ f).inv ≫ f ◁ ⊗𝟙⟩ @[simps] instance tensorRight' (f : a ⟶ b) (g : b ⟶ b) [BicategoricalCoherence g (𝟙 b)] : BicategoricalCoherence (f ≫ g) f := ⟨f ◁ ⊗𝟙 ≫ (ρ_ f).hom⟩ @[simps] instance left (f g : a ⟶ b) [BicategoricalCoherence f g] : BicategoricalCoherence (𝟙 a ≫ f) g := ⟨(λ_ f).hom ≫ ⊗𝟙⟩ @[simps] instance left' (f g : a ⟶ b) [BicategoricalCoherence f g] : BicategoricalCoherence f (𝟙 a ≫ g) := ⟨⊗𝟙 ≫ (λ_ g).inv⟩ @[simps] instance right (f g : a ⟶ b) [BicategoricalCoherence f g] : BicategoricalCoherence (f ≫ 𝟙 b) g := ⟨(ρ_ f).hom ≫ ⊗𝟙⟩ @[simps] instance right' (f g : a ⟶ b) [BicategoricalCoherence f g] : BicategoricalCoherence f (g ≫ 𝟙 b) := ⟨⊗𝟙 ≫ (ρ_ g).inv⟩ @[simps] instance assoc (f : a ⟶ b) (g : b ⟶ c) (h : c ⟶ d) (i : a ⟶ d) [BicategoricalCoherence (f ≫ g ≫ h) i] : BicategoricalCoherence ((f ≫ g) ≫ h) i := ⟨(α_ f g h).hom ≫ ⊗𝟙⟩ @[simps] instance assoc' (f : a ⟶ b) (g : b ⟶ c) (h : c ⟶ d) (i : a ⟶ d) [BicategoricalCoherence i (f ≫ g ≫ h)] : BicategoricalCoherence i ((f ≫ g) ≫ h) := ⟨⊗𝟙 ≫ (α_ f g h).inv⟩ end BicategoricalCoherence @[simp] theorem bicategoricalComp_refl {f g h : a ⟶ b} (η : f ⟶ g) (θ : g ⟶ h) : η ⊗≫ θ = η ≫ θ := by dsimp [bicategoricalComp]; simp example {f' : a ⟶ d} {f : a ⟶ b} {g : b ⟶ c} {h : c ⟶ d} {h' : a ⟶ d} (η : f' ⟶ f ≫ g ≫ h) (θ : (f ≫ g) ≫ h ⟶ h') : f' ⟶ h' := η ⊗≫ θ -- To automatically insert unitors/associators at the beginning or end, -- you can use `η ⊗≫ 𝟙 _` example {f' : a ⟶ d} {f : a ⟶ b} {g : b ⟶ c} {h : c ⟶ d} (η : f' ⟶ (f ≫ g) ≫ h) : f' ⟶ f ≫ g ≫ h := η ⊗≫ 𝟙 _ end end CategoryTheory
Tactic\CategoryTheory\BicategoryCoherence.lean
/- Copyright (c) 2022 Yuma Mizuno. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yuma Mizuno -/ import Mathlib.CategoryTheory.Bicategory.Coherence import Mathlib.Tactic.CategoryTheory.BicategoricalComp /-! # A `coherence` tactic for bicategories We provide a `bicategory_coherence` tactic, which proves that any two 2-morphisms (with the same source and target) in a bicategory which are built out of associators and unitors are equal. This file mainly deals with the type class setup for the coherence tactic. The actual front end tactic is given in `Mathlib.Tactic.CategoryTheory.Coherence` at the same time as the coherence tactic for monoidal categories. -/ noncomputable section universe w v u open CategoryTheory CategoryTheory.FreeBicategory open scoped Bicategory variable {B : Type u} [Bicategory.{w, v} B] {a b c d e : B} namespace Mathlib.Tactic.BicategoryCoherence /-- A typeclass carrying a choice of lift of a 1-morphism from `B` to `FreeBicategory B`. -/ class LiftHom {a b : B} (f : a ⟶ b) where /-- A lift of a morphism to the free bicategory. This should only exist for "structural" morphisms. -/ lift : of.obj a ⟶ of.obj b instance liftHomId : LiftHom (𝟙 a) where lift := 𝟙 (of.obj a) instance liftHomComp (f : a ⟶ b) (g : b ⟶ c) [LiftHom f] [LiftHom g] : LiftHom (f ≫ g) where lift := LiftHom.lift f ≫ LiftHom.lift g instance (priority := 100) liftHomOf (f : a ⟶ b) : LiftHom f where lift := of.map f /-- A typeclass carrying a choice of lift of a 2-morphism from `B` to `FreeBicategory B`. -/ class LiftHom₂ {f g : a ⟶ b} [LiftHom f] [LiftHom g] (η : f ⟶ g) where /-- A lift of a 2-morphism to the free bicategory. This should only exist for "structural" 2-morphisms. -/ lift : LiftHom.lift f ⟶ LiftHom.lift g instance liftHom₂Id (f : a ⟶ b) [LiftHom f] : LiftHom₂ (𝟙 f) where lift := 𝟙 _ instance liftHom₂LeftUnitorHom (f : a ⟶ b) [LiftHom f] : LiftHom₂ (λ_ f).hom where lift := (λ_ (LiftHom.lift f)).hom instance liftHom₂LeftUnitorInv (f : a ⟶ b) [LiftHom f] : LiftHom₂ (λ_ f).inv where lift := (λ_ (LiftHom.lift f)).inv instance liftHom₂RightUnitorHom (f : a ⟶ b) [LiftHom f] : LiftHom₂ (ρ_ f).hom where lift := (ρ_ (LiftHom.lift f)).hom instance liftHom₂RightUnitorInv (f : a ⟶ b) [LiftHom f] : LiftHom₂ (ρ_ f).inv where lift := (ρ_ (LiftHom.lift f)).inv instance liftHom₂AssociatorHom (f : a ⟶ b) (g : b ⟶ c) (h : c ⟶ d) [LiftHom f] [LiftHom g] [LiftHom h] : LiftHom₂ (α_ f g h).hom where lift := (α_ (LiftHom.lift f) (LiftHom.lift g) (LiftHom.lift h)).hom instance liftHom₂AssociatorInv (f : a ⟶ b) (g : b ⟶ c) (h : c ⟶ d) [LiftHom f] [LiftHom g] [LiftHom h] : LiftHom₂ (α_ f g h).inv where lift := (α_ (LiftHom.lift f) (LiftHom.lift g) (LiftHom.lift h)).inv instance liftHom₂Comp {f g h : a ⟶ b} [LiftHom f] [LiftHom g] [LiftHom h] (η : f ⟶ g) (θ : g ⟶ h) [LiftHom₂ η] [LiftHom₂ θ] : LiftHom₂ (η ≫ θ) where lift := LiftHom₂.lift η ≫ LiftHom₂.lift θ instance liftHom₂WhiskerLeft (f : a ⟶ b) [LiftHom f] {g h : b ⟶ c} (η : g ⟶ h) [LiftHom g] [LiftHom h] [LiftHom₂ η] : LiftHom₂ (f ◁ η) where lift := LiftHom.lift f ◁ LiftHom₂.lift η instance liftHom₂WhiskerRight {f g : a ⟶ b} (η : f ⟶ g) [LiftHom f] [LiftHom g] [LiftHom₂ η] {h : b ⟶ c} [LiftHom h] : LiftHom₂ (η ▷ h) where lift := LiftHom₂.lift η ▷ LiftHom.lift h open Lean Elab Tactic Meta /-- Helper function for throwing exceptions. -/ def exception {α : Type} (g : MVarId) (msg : MessageData) : MetaM α := throwTacticEx `bicategorical_coherence g msg /-- Helper function for throwing exceptions with respect to the main goal. -/ def exception' (msg : MessageData) : TacticM Unit := do try liftMetaTactic (exception (msg := msg)) catch _ => -- There might not be any goals throwError msg set_option quotPrecheck false in /-- Auxiliary definition for `bicategorical_coherence`. -/ -- We could construct this expression directly without using `elabTerm`, -- but it would require preparing many implicit arguments by hand. def mkLiftMap₂LiftExpr (e : Expr) : TermElabM Expr := do Term.elabTerm (← ``((FreeBicategory.lift (Prefunctor.id _)).map₂ (LiftHom₂.lift $(← Term.exprToSyntax e)))) none /-- Coherence tactic for bicategories. -/ def bicategory_coherence (g : MVarId) : TermElabM Unit := g.withContext do withOptions (fun opts => synthInstance.maxSize.set opts (max 256 (synthInstance.maxSize.get opts))) do -- TODO: is this `dsimp only` step necessary? It doesn't appear to be in the tests below. let (ty, _) ← dsimp (← g.getType) (← Simp.Context.ofNames [] true) let some (_, lhs, rhs) := (← whnfR ty).eq? | exception g "Not an equation of morphisms." let lift_lhs ← mkLiftMap₂LiftExpr lhs let lift_rhs ← mkLiftMap₂LiftExpr rhs -- This new equation is defeq to the original by assumption -- on the `LiftHom` instances. let g₁ ← g.change (← mkEq lift_lhs lift_rhs) let [g₂] ← g₁.applyConst ``congrArg | exception g "congrArg failed in coherence" let [] ← g₂.applyConst ``Subsingleton.elim | exception g "This shouldn't happen; Subsingleton.elim does not create goals." /-- Coherence tactic for bicategories. Use `pure_coherence` instead, which is a frontend to this one. -/ elab "bicategory_coherence" : tactic => do bicategory_coherence (← getMainGoal) open Lean.Parser.Tactic /-- Simp lemmas for rewriting a 2-morphism into a normal form. -/ syntax (name := whisker_simps) "whisker_simps" (config)? : tactic @[inherit_doc whisker_simps] elab_rules : tactic | `(tactic| whisker_simps $[$cfg]?) => do evalTactic (← `(tactic| simp $[$cfg]? only [Category.assoc, Bicategory.comp_whiskerLeft, Bicategory.id_whiskerLeft, Bicategory.whiskerRight_comp, Bicategory.whiskerRight_id, Bicategory.whiskerLeft_comp, Bicategory.whiskerLeft_id, Bicategory.comp_whiskerRight, Bicategory.id_whiskerRight, Bicategory.whisker_assoc] )) -- We have unused typeclass arguments here. -- They are intentional, to ensure that `simp only [assoc_liftHom₂]` only left associates -- bicategorical structural morphisms. /-- Auxiliary simp lemma for the `coherence` tactic: this move brackets to the left in order to expose a maximal prefix built out of unitors and associators. -/ @[nolint unusedArguments] theorem assoc_liftHom₂ {f g h i : a ⟶ b} [LiftHom f] [LiftHom g] [LiftHom h] (η : f ⟶ g) (θ : g ⟶ h) (ι : h ⟶ i) [LiftHom₂ η] [LiftHom₂ θ] : η ≫ θ ≫ ι = (η ≫ θ) ≫ ι := (Category.assoc _ _ _).symm end BicategoryCoherence end Tactic end Mathlib
Tactic\CategoryTheory\Coherence.lean
/- Copyright (c) 2022. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Yuma Mizuno, Oleksandr Manzyuk -/ import Mathlib.CategoryTheory.Monoidal.Free.Coherence import Mathlib.Lean.Meta import Mathlib.Tactic.CategoryTheory.BicategoryCoherence import Mathlib.Tactic.CategoryTheory.MonoidalComp /-! # A `coherence` tactic for monoidal categories We provide a `coherence` tactic, which proves equations where the two sides differ by replacing strings of monoidal structural morphisms with other such strings. (The replacements are always equalities by the monoidal coherence theorem.) A simpler version of this tactic is `pure_coherence`, which proves that any two morphisms (with the same source and target) in a monoidal category which are built out of associators and unitors are equal. -/ universe v u open CategoryTheory FreeMonoidalCategory -- As the lemmas and typeclasses in this file are not intended for use outside of the tactic, -- we put everything inside a namespace. namespace Mathlib.Tactic.Coherence variable {C : Type u} [Category.{v} C] open scoped MonoidalCategory noncomputable section lifting variable [MonoidalCategory C] /-- A typeclass carrying a choice of lift of an object from `C` to `FreeMonoidalCategory C`. It must be the case that `projectObj id (LiftObj.lift x) = x` by defeq. -/ class LiftObj (X : C) where protected lift : FreeMonoidalCategory C instance LiftObj_unit : LiftObj (𝟙_ C) := ⟨unit⟩ instance LiftObj_tensor (X Y : C) [LiftObj X] [LiftObj Y] : LiftObj (X ⊗ Y) where lift := LiftObj.lift X ⊗ LiftObj.lift Y instance (priority := 100) LiftObj_of (X : C) : LiftObj X := ⟨of X⟩ /-- A typeclass carrying a choice of lift of a morphism from `C` to `FreeMonoidalCategory C`. It must be the case that `projectMap id _ _ (LiftHom.lift f) = f` by defeq. -/ class LiftHom {X Y : C} [LiftObj X] [LiftObj Y] (f : X ⟶ Y) where protected lift : LiftObj.lift X ⟶ LiftObj.lift Y instance LiftHom_id (X : C) [LiftObj X] : LiftHom (𝟙 X) := ⟨𝟙 _⟩ instance LiftHom_left_unitor_hom (X : C) [LiftObj X] : LiftHom (λ_ X).hom where lift := (λ_ (LiftObj.lift X)).hom instance LiftHom_left_unitor_inv (X : C) [LiftObj X] : LiftHom (λ_ X).inv where lift := (λ_ (LiftObj.lift X)).inv instance LiftHom_right_unitor_hom (X : C) [LiftObj X] : LiftHom (ρ_ X).hom where lift := (ρ_ (LiftObj.lift X)).hom instance LiftHom_right_unitor_inv (X : C) [LiftObj X] : LiftHom (ρ_ X).inv where lift := (ρ_ (LiftObj.lift X)).inv instance LiftHom_associator_hom (X Y Z : C) [LiftObj X] [LiftObj Y] [LiftObj Z] : LiftHom (α_ X Y Z).hom where lift := (α_ (LiftObj.lift X) (LiftObj.lift Y) (LiftObj.lift Z)).hom instance LiftHom_associator_inv (X Y Z : C) [LiftObj X] [LiftObj Y] [LiftObj Z] : LiftHom (α_ X Y Z).inv where lift := (α_ (LiftObj.lift X) (LiftObj.lift Y) (LiftObj.lift Z)).inv instance LiftHom_comp {X Y Z : C} [LiftObj X] [LiftObj Y] [LiftObj Z] (f : X ⟶ Y) (g : Y ⟶ Z) [LiftHom f] [LiftHom g] : LiftHom (f ≫ g) where lift := LiftHom.lift f ≫ LiftHom.lift g instance liftHom_WhiskerLeft (X : C) [LiftObj X] {Y Z : C} [LiftObj Y] [LiftObj Z] (f : Y ⟶ Z) [LiftHom f] : LiftHom (X ◁ f) where lift := LiftObj.lift X ◁ LiftHom.lift f instance liftHom_WhiskerRight {X Y : C} (f : X ⟶ Y) [LiftObj X] [LiftObj Y] [LiftHom f] {Z : C} [LiftObj Z] : LiftHom (f ▷ Z) where lift := LiftHom.lift f ▷ LiftObj.lift Z instance LiftHom_tensor {W X Y Z : C} [LiftObj W] [LiftObj X] [LiftObj Y] [LiftObj Z] (f : W ⟶ X) (g : Y ⟶ Z) [LiftHom f] [LiftHom g] : LiftHom (f ⊗ g) where lift := LiftHom.lift f ⊗ LiftHom.lift g end lifting open Lean Meta Elab Tactic /-- Helper function for throwing exceptions. -/ def exception {α : Type} (g : MVarId) (msg : MessageData) : MetaM α := throwTacticEx `monoidal_coherence g msg /-- Helper function for throwing exceptions with respect to the main goal. -/ def exception' (msg : MessageData) : TacticM Unit := do try liftMetaTactic (exception (msg := msg)) catch _ => -- There might not be any goals throwError msg /-- Auxiliary definition for `monoidal_coherence`. -/ -- We could construct this expression directly without using `elabTerm`, -- but it would require preparing many implicit arguments by hand. def mkProjectMapExpr (e : Expr) : TermElabM Expr := do Term.elabTerm (← ``(FreeMonoidalCategory.projectMap _root_.id _ _ (LiftHom.lift $(← Term.exprToSyntax e)))) none /-- Coherence tactic for monoidal categories. -/ def monoidal_coherence (g : MVarId) : TermElabM Unit := g.withContext do withOptions (fun opts => synthInstance.maxSize.set opts (max 512 (synthInstance.maxSize.get opts))) do -- TODO: is this `dsimp only` step necessary? It doesn't appear to be in the tests below. let (ty, _) ← dsimp (← g.getType) (← Simp.Context.ofNames [] true) let some (_, lhs, rhs) := (← whnfR ty).eq? | exception g "Not an equation of morphisms." let projectMap_lhs ← mkProjectMapExpr lhs let projectMap_rhs ← mkProjectMapExpr rhs -- This new equation is defeq to the original by assumption -- on the `LiftObj` and `LiftHom` instances. let g₁ ← g.change (← mkEq projectMap_lhs projectMap_rhs) let [g₂] ← g₁.applyConst ``congrArg | exception g "congrArg failed in coherence" let [] ← g₂.applyConst ``Subsingleton.elim | exception g "This shouldn't happen; Subsingleton.elim does not create goals." /-- Coherence tactic for monoidal categories. Use `pure_coherence` instead, which is a frontend to this one. -/ elab "monoidal_coherence" : tactic => do monoidal_coherence (← getMainGoal) open Mathlib.Tactic.BicategoryCoherence /-- `pure_coherence` uses the coherence theorem for monoidal categories to prove the goal. It can prove any equality made up only of associators, unitors, and identities. ```lean example {C : Type} [Category C] [MonoidalCategory C] : (λ_ (𝟙_ C)).hom = (ρ_ (𝟙_ C)).hom := by pure_coherence ``` Users will typically just use the `coherence` tactic, which can also cope with identities of the form `a ≫ f ≫ b ≫ g ≫ c = a' ≫ f ≫ b' ≫ g ≫ c'` where `a = a'`, `b = b'`, and `c = c'` can be proved using `pure_coherence` -/ elab (name := pure_coherence) "pure_coherence" : tactic => do let g ← getMainGoal monoidal_coherence g <|> bicategory_coherence g /-- Auxiliary simp lemma for the `coherence` tactic: this moves brackets to the left in order to expose a maximal prefix built out of unitors and associators. -/ -- We have unused typeclass arguments here. -- They are intentional, to ensure that `simp only [assoc_LiftHom]` only left associates -- monoidal structural morphisms. @[nolint unusedArguments] lemma assoc_liftHom {W X Y Z : C} [LiftObj W] [LiftObj X] [LiftObj Y] (f : W ⟶ X) (g : X ⟶ Y) (h : Y ⟶ Z) [LiftHom f] [LiftHom g] : f ≫ (g ≫ h) = (f ≫ g) ≫ h := (Category.assoc _ _ _).symm /-- Internal tactic used in `coherence`. Rewrites an equation `f = g` as `f₀ ≫ f₁ = g₀ ≫ g₁`, where `f₀` and `g₀` are maximal prefixes of `f` and `g` (possibly after reassociating) which are "liftable" (i.e. expressible as compositions of unitors and associators). -/ elab (name := liftable_prefixes) "liftable_prefixes" : tactic => do withOptions (fun opts => synthInstance.maxSize.set opts (max 256 (synthInstance.maxSize.get opts))) do evalTactic (← `(tactic| (simp (config := {failIfUnchanged := false}) only [monoidalComp, Category.assoc, MonoidalCoherence.hom]) <;> (apply (cancel_epi (𝟙 _)).1 <;> try infer_instance) <;> (simp (config := {failIfUnchanged := false}) only [assoc_liftHom, Mathlib.Tactic.BicategoryCoherence.assoc_liftHom₂]))) lemma insert_id_lhs {C : Type*} [Category C] {X Y : C} (f g : X ⟶ Y) (w : f ≫ 𝟙 _ = g) : f = g := by simpa using w lemma insert_id_rhs {C : Type*} [Category C] {X Y : C} (f g : X ⟶ Y) (w : f = g ≫ 𝟙 _) : f = g := by simpa using w /-- If either the lhs or rhs is not a composition, compose it on the right with an identity. -/ def insertTrailingIds (g : MVarId) : MetaM MVarId := do let some (_, lhs, rhs) := (← withReducible g.getType').eq? | exception g "Not an equality." let mut g := g if !(lhs.isAppOf ``CategoryStruct.comp) then let [g'] ← g.applyConst ``insert_id_lhs | exception g "failed to apply insert_id_lhs" g := g' if !(rhs.isAppOf ``CategoryStruct.comp) then let [g'] ← g.applyConst ``insert_id_rhs | exception g "failed to apply insert_id_rhs" g := g' return g /-- The main part of `coherence` tactic. -/ -- Porting note: this is an ugly port, using too many `evalTactic`s. -- We can refactor later into either a `macro` (but the flow control is awkward) -- or a `MetaM` tactic. def coherence_loop (maxSteps := 37) : TacticM Unit := match maxSteps with | 0 => exception' "`coherence` tactic reached iteration limit" | maxSteps' + 1 => do -- To prove an equality `f = g` in a monoidal category, -- first try the `pure_coherence` tactic on the entire equation: evalTactic (← `(tactic| pure_coherence)) <|> do -- Otherwise, rearrange so we have a maximal prefix of each side -- that is built out of unitors and associators: evalTactic (← `(tactic| liftable_prefixes)) <|> exception' "Something went wrong in the `coherence` tactic: \ is the target an equation in a monoidal category?" -- The goal should now look like `f₀ ≫ f₁ = g₀ ≫ g₁`, liftMetaTactic MVarId.congrCore -- and now we have two goals `f₀ = g₀` and `f₁ = g₁`. -- Discharge the first using `coherence`, evalTactic (← `(tactic| { pure_coherence })) <|> exception' "`coherence` tactic failed, subgoal not true in the free monoidal category" -- Then check that either `g₀` is identically `g₁`, evalTactic (← `(tactic| rfl)) <|> do -- or that both are compositions, liftMetaTactic' insertTrailingIds liftMetaTactic MVarId.congrCore -- with identical first terms, evalTactic (← `(tactic| rfl)) <|> exception' "`coherence` tactic failed, non-structural morphisms don't match" -- and whose second terms can be identified by recursively called `coherence`. coherence_loop maxSteps' open Lean.Parser.Tactic /-- Simp lemmas for rewriting a hom in monoical categories into a normal form. -/ syntax (name := monoidal_simps) "monoidal_simps" (config)? : tactic @[inherit_doc monoidal_simps] elab_rules : tactic | `(tactic| monoidal_simps $[$cfg]?) => do evalTactic (← `(tactic| simp $[$cfg]? only [ Category.assoc, MonoidalCategory.tensor_whiskerLeft, MonoidalCategory.id_whiskerLeft, MonoidalCategory.whiskerRight_tensor, MonoidalCategory.whiskerRight_id, MonoidalCategory.whiskerLeft_comp, MonoidalCategory.whiskerLeft_id, MonoidalCategory.comp_whiskerRight, MonoidalCategory.id_whiskerRight, MonoidalCategory.whisker_assoc, MonoidalCategory.id_tensorHom, MonoidalCategory.tensorHom_id]; -- I'm not sure if `tensorHom` should be expanded. try simp only [MonoidalCategory.tensorHom_def] )) /-- Use the coherence theorem for monoidal categories to solve equations in a monoidal equation, where the two sides only differ by replacing strings of monoidal structural morphisms (that is, associators, unitors, and identities) with different strings of structural morphisms with the same source and target. That is, `coherence` can handle goals of the form `a ≫ f ≫ b ≫ g ≫ c = a' ≫ f ≫ b' ≫ g ≫ c'` where `a = a'`, `b = b'`, and `c = c'` can be proved using `pure_coherence`. (If you have very large equations on which `coherence` is unexpectedly failing, you may need to increase the typeclass search depth, using e.g. `set_option synthInstance.maxSize 500`.) -/ syntax (name := coherence) "coherence" : tactic @[inherit_doc coherence] elab_rules : tactic | `(tactic| coherence) => do evalTactic (← `(tactic| (simp (config := {failIfUnchanged := false}) only [bicategoricalComp, BicategoricalCoherence.hom, monoidalComp]); whisker_simps (config := {failIfUnchanged := false}); monoidal_simps (config := {failIfUnchanged := false}))) coherence_loop end Coherence end Tactic end Mathlib
Tactic\CategoryTheory\Elementwise.lean
/- Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Kyle Miller -/ import Mathlib.CategoryTheory.ConcreteCategory.Basic import Mathlib.Util.AddRelatedDecl import Batteries.Tactic.Lint /-! # Tools to reformulate category-theoretic lemmas in concrete categories ## The `elementwise` attribute The `elementwise` attribute generates lemmas for concrete categories from lemmas that equate morphisms in a category. A sort of inverse to this for the `Type*` category is the `@[higher_order]` attribute. For more details, see the documentation attached to the `syntax` declaration. ## Main definitions - The `@[elementwise]` attribute. - The ``elementwise_of% h` term elaborator. ## Implementation This closely follows the implementation of the `@[reassoc]` attribute, due to Simon Hudon and reimplemented by Scott Morrison in Lean 4. -/ open Lean Meta Elab Tactic open Mathlib.Tactic namespace Tactic.Elementwise open CategoryTheory section theorems universe u theorem forall_congr_forget_Type (α : Type u) (p : α → Prop) : (∀ (x : (forget (Type u)).obj α), p x) ↔ ∀ (x : α), p x := Iff.rfl attribute [local instance] ConcreteCategory.instFunLike ConcreteCategory.hasCoeToSort theorem forget_hom_Type (α β : Type u) (f : α ⟶ β) : DFunLike.coe f = f := rfl theorem hom_elementwise {C : Type*} [Category C] [ConcreteCategory C] {X Y : C} {f g : X ⟶ Y} (h : f = g) (x : X) : f x = g x := by rw [h] end theorems /-- List of simp lemmas to apply to the elementwise theorem. -/ def elementwiseThms : List Name := [``CategoryTheory.coe_id, ``CategoryTheory.coe_comp, ``CategoryTheory.comp_apply, ``CategoryTheory.id_apply, -- further simplifications if the category is `Type` ``forget_hom_Type, ``forall_congr_forget_Type, -- simp can itself simplify trivial equalities into `true`. Adding this lemma makes it -- easier to detect when this has occurred. ``implies_true] /-- Given an equation `f = g` between morphisms `X ⟶ Y` in a category `C` (possibly after a `∀` binder), produce the equation `∀ (x : X), f x = g x` or `∀ [ConcreteCategory C] (x : X), f x = g x` as needed (after the `∀` binder), but with compositions fully right associated and identities removed. Returns the proof of the new theorem along with (optionally) a new level metavariable for the first universe parameter to `ConcreteCategory`. The `simpSides` option controls whether to simplify both sides of the equality, for simpNF purposes. -/ def elementwiseExpr (src : Name) (type pf : Expr) (simpSides := true) : MetaM (Expr × Option Level) := do let type := (← instantiateMVars type).cleanupAnnotations forallTelescope type fun fvars type' => do mkHomElementwise type' (← mkExpectedTypeHint (mkAppN pf fvars) type') fun eqPf instConcr? => do -- First simplify using elementwise-specific lemmas let mut eqPf' ← simpType (simpOnlyNames elementwiseThms (config := { decide := false })) eqPf if (← inferType eqPf') == .const ``True [] then throwError "elementwise lemma for {src} is trivial after applying ConcreteCategory \ lemmas, which can be caused by how applications are unfolded. \ Using elementwise is unnecessary." if simpSides then let ctx := { ← Simp.Context.mkDefault with config.decide := false } let (ty', eqPf'') ← simpEq (fun e => return (← simp e ctx).1) (← inferType eqPf') eqPf' -- check that it's not a simp-trivial equality: forallTelescope ty' fun _ ty' => do if let some (_, lhs, rhs) := ty'.eq? then if ← Std.Tactic.Lint.isSimpEq lhs rhs then throwError "applying simp to both sides reduces elementwise lemma for {src} \ to the trivial equality {ty'}. \ Either add `nosimp` or remove the `elementwise` attribute." eqPf' ← mkExpectedTypeHint eqPf'' ty' if let some (w, instConcr) := instConcr? then return (← Meta.mkLambdaFVars (fvars.push instConcr) eqPf', w) else return (← Meta.mkLambdaFVars fvars eqPf', none) where /-- Given an equality, extract a `Category` instance from it or raise an error. Returns the name of the category and its instance. -/ extractCatInstance (eqTy : Expr) : MetaM (Expr × Expr) := do let some (α, _, _) := eqTy.cleanupAnnotations.eq? | failure let (``Quiver.Hom, #[_, instQuiv, _, _]) := α.getAppFnArgs | failure let (``CategoryTheory.CategoryStruct.toQuiver, #[_, instCS]) := instQuiv.getAppFnArgs | failure let (``CategoryTheory.Category.toCategoryStruct, #[C, instC]) := instCS.getAppFnArgs | failure return (C, instC) mkHomElementwise {α} (eqTy eqPf : Expr) (k : Expr → Option (Level × Expr) → MetaM α) : MetaM α := do let (C, instC) ← try extractCatInstance eqTy catch _ => throwError "elementwise expects equality of morphisms in a category" -- First try being optimistic that there is already a ConcreteCategory instance. if let some eqPf' ← observing? (mkAppM ``hom_elementwise #[eqPf]) then k eqPf' none else -- That failed, so we need to introduce the instance, which takes creating -- a fresh universe level for `ConcreteCategory`'s forgetful functor. let .app (.const ``Category [v, u]) _ ← inferType instC | throwError "internal error in elementwise" let w ← mkFreshLevelMVar let cty : Expr := mkApp2 (.const ``ConcreteCategory [w, v, u]) C instC withLocalDecl `inst .instImplicit cty fun cfvar => do let eqPf' ← mkAppM ``hom_elementwise #[eqPf] k eqPf' (some (w, cfvar)) /-- Gives a name based on `baseName` that's not already in the list. -/ private partial def mkUnusedName (names : List Name) (baseName : Name) : Name := if not (names.contains baseName) then baseName else let rec loop (i : Nat := 0) : Name := let w := Name.appendIndexAfter baseName i if names.contains w then loop (i + 1) else w loop 1 /-- The `elementwise` attribute can be added to a lemma proving an equation of morphisms, and it creates a new lemma for a `ConcreteCategory` giving an equation with those morphisms applied to some value. Syntax examples: - `@[elementwise]` - `@[elementwise nosimp]` to not use `simp` on both sides of the generated lemma - `@[elementwise (attr := simp)]` to apply the `simp` attribute to both the generated lemma and the original lemma. Example application of `elementwise`: ```lean @[elementwise] lemma some_lemma {C : Type*} [Category C] {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (h : X ⟶ Z) (w : ...) : f ≫ g = h := ... ``` produces ```lean lemma some_lemma_apply {C : Type*} [Category C] {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (h : X ⟶ Z) (w : ...) [ConcreteCategory C] (x : X) : g (f x) = h x := ... ``` Here `X` is being coerced to a type via `CategoryTheory.ConcreteCategory.hasCoeToSort` and `f`, `g`, and `h` are being coerced to functions via `CategoryTheory.ConcreteCategory.hasCoeToFun`. Further, we simplify the type using `CategoryTheory.coe_id : ((𝟙 X) : X → X) x = x` and `CategoryTheory.coe_comp : (f ≫ g) x = g (f x)`, replacing morphism composition with function composition. The `[ConcreteCategory C]` argument will be omitted if it is possible to synthesize an instance. The name of the produced lemma can be specified with `@[elementwise other_lemma_name]`. If `simp` is added first, the generated lemma will also have the `simp` attribute. -/ syntax (name := elementwise) "elementwise" " nosimp"? (" (" &"attr" ":=" Parser.Term.attrInstance,* ")")? : attr initialize registerBuiltinAttribute { name := `elementwise descr := "" applicationTime := .afterCompilation add := fun src ref kind => match ref with | `(attr| elementwise $[nosimp%$nosimp?]? $[(attr := $stx?,*)]?) => MetaM.run' do if (kind != AttributeKind.global) then throwError "`elementwise` can only be used as a global attribute" addRelatedDecl src "_apply" ref stx? fun type value levels => do let (newValue, level?) ← elementwiseExpr src type value (simpSides := nosimp?.isNone) let newLevels ← if let some level := level? then do let w := mkUnusedName levels `w unless ← isLevelDefEq level (mkLevelParam w) do throwError "Could not create level parameter for ConcreteCategory instance" pure <| w :: levels else pure levels pure (newValue, newLevels) | _ => throwUnsupportedSyntax } /-- `elementwise_of% h`, where `h` is a proof of an equation `f = g` between morphisms `X ⟶ Y` in a concrete category (possibly after a `∀` binder), produces a proof of equation `∀ (x : X), f x = g x`, but with compositions fully right associated and identities removed. A typical example is using `elementwise_of%` to dynamically generate rewrite lemmas: ```lean example (M N K : MonCat) (f : M ⟶ N) (g : N ⟶ K) (h : M ⟶ K) (w : f ≫ g = h) (m : M) : g (f m) = h m := by rw [elementwise_of% w] ``` In this case, `elementwise_of% w` generates the lemma `∀ (x : M), f (g x) = h x`. Like the `@[elementwise]` attribute, `elementwise_of%` inserts a `ConcreteCategory` instance argument if it can't synthesize a relevant `ConcreteCategory` instance. (Technical note: The forgetful functor's universe variable is instantiated with a fresh level metavariable in this case.) One difference between `elementwise_of%` and `@[elementwise]` is that `@[elementwise]` by default applies `simp` to both sides of the generated lemma to get something that is in simp normal form. `elementwise_of%` does not do this. -/ elab "elementwise_of% " t:term : term => do let e ← Term.elabTerm t none let (pf, _) ← elementwiseExpr .anonymous (← inferType e) e (simpSides := false) return pf -- TODO: elementwise tactic syntax "elementwise" (ppSpace colGt ident)* : tactic syntax "elementwise!" (ppSpace colGt ident)* : tactic end Tactic.Elementwise
Tactic\CategoryTheory\Monoidal.lean
/- Copyright (c) 2024 Yuma Mizuno. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yuma Mizuno -/ import Mathlib.Tactic.CategoryTheory.Coherence /-! # Normalization of morphisms in monoidal categories This file provides a tactic that normalizes morphisms in monoidal categories. This is used in the string diagram widget given in `Mathlib.Tactic.StringDiagram`. We say that the morphism `η` in a monoidal category is in normal form if 1. `η` is of the form `α₀ ≫ η₀ ≫ α₁ ≫ η₁ ≫ ... αₘ ≫ ηₘ ≫ αₘ₊₁` where each `αᵢ` is a structural 2-morphism (consisting of associators and unitors), 2. each `ηᵢ` is a non-structural 2-morphism of the form `f₁ ◁ ... ◁ fₘ ◁ θ`, and 3. `θ` is of the form `ι ▷ g₁ ▷ ... ▷ gₗ` Note that the structural morphisms `αᵢ` are not necessarily normalized, as the main purpose is to get a list of the non-structural morphisms out. Currently, the primary application of the normalization tactic in mind is drawing string diagrams, which are graphical representations of morphisms in monoidal categories, in the infoview. When drawing string diagrams, we often ignore associators and unitors (i.e., drawing morphisms in strict monoidal categories). On the other hand, in Lean, it is considered difficult to formalize the concept of strict monoidal categories due to the feature of dependent type theory. The normalization tactic can remove associators and unitors from the expression, extracting the necessary data for drawing string diagrams. The current plan on drawing string diagrams (#10581) is to use Penrose (https://github.com/penrose) via ProofWidget. However, it should be noted that the normalization procedure in this file does not rely on specific settings, allowing for broader application. Future plans include the following. At least I (Yuma) would like to work on these in the future, but it might not be immediate. If anyone is interested, I would be happy to discuss. - Currently (#10581), the string diagrams only do drawing. It would be better they also generate proofs. That is, by manipulating the string diagrams displayed in the infoview with a mouse to generate proofs. In #10581, the string diagram widget only uses the morphisms generated by the normalization tactic and does not use proof terms ensuring that the original morphism and the normalized morphism are equal. Proof terms will be necessary for proof generation. - There is also the possibility of using homotopy.io (https://github.com/homotopy-io), a graphical proof assistant for category theory, from Lean. At this point, I have very few ideas regarding this approach. - The normalization tactic allows for an alternative implementation of the coherent tactic. ## Main definitions - `Tactic.Monoidal.eval`: Given a Lean expression `e` that represents a morphism in a monoidal category, this function returns a pair of `⟨e', pf⟩` where `e'` is the normalized expression of `e` and `pf` is a proof that `e = e'`. -/ namespace Mathlib.Tactic.Monoidal open Lean Meta Elab open CategoryTheory open Mathlib.Tactic.Coherence /-- The context for evaluating expressions. -/ structure Context where /-- The expression for the underlying category. -/ C : Expr /-- Populate a `context` object for evaluating `e`. -/ def mkContext? (e : Expr) : MetaM (Option Context) := do match (← inferType e).getAppFnArgs with | (``Quiver.Hom, #[_, _, f, _]) => let C ← inferType f return some ⟨C⟩ | _ => return none /-- The monad for the normalization of 2-morphisms. -/ abbrev MonoidalM := ReaderT Context MetaM /-- Run a computation in the `M` monad. -/ abbrev MonoidalM.run {α : Type} (c : Context) (m : MonoidalM α) : MetaM α := ReaderT.run m c /-- Expressions for atomic 1-morphisms. -/ structure Atom₁ : Type where /-- Extract a Lean expression from an `Atom₁` expression. -/ e : Expr /-- Expressions for 1-morphisms. -/ inductive Mor₁ : Type /-- `id` is the expression for `𝟙_ C`. -/ | id : Mor₁ /-- `comp X Y` is the expression for `X ⊗ Y` -/ | comp : Mor₁ → Mor₁ → Mor₁ /-- Construct the expression for an atomic 1-morphism. -/ | of : Atom₁ → Mor₁ deriving Inhabited /-- Converts a 1-morphism into a list of its components. -/ def Mor₁.toList : Mor₁ → List Atom₁ | .id => [] | .comp f g => f.toList ++ g.toList | .of f => [f] /-- Returns `𝟙_ C` if the expression `e` is of the form `𝟙_ C`. -/ def isTensorUnit? (e : Expr) : MetaM (Option Expr) := do let C ← mkFreshExprMVar none let instC ← mkFreshExprMVar none let instMC ← mkFreshExprMVar none let unit := mkAppN (← mkConstWithFreshMVarLevels ``MonoidalCategoryStruct.tensorUnit) #[C, instC, instMC] if ← withDefault <| isDefEq e unit then return ← instantiateMVars unit else return none /-- Returns `(f, g)` if the expression `e` is of the form `f ⊗ g`. -/ def isTensorObj? (e : Expr) : MetaM (Option (Expr × Expr)) := do let C ← mkFreshExprMVar none let f ← mkFreshExprMVar C let g ← mkFreshExprMVar C let instC ← mkFreshExprMVar none let instMC ← mkFreshExprMVar none let fg := mkAppN (← mkConstWithFreshMVarLevels ``MonoidalCategoryStruct.tensorObj) #[C, instC, instMC, f, g] if ← withDefault <| isDefEq e fg then return (← instantiateMVars f, ← instantiateMVars g) else return none /-- Construct a `Mor₁` expression from a Lean expression. -/ partial def toMor₁ (e : Expr) : MetaM Mor₁ := do if let some _ ← isTensorUnit? e then return Mor₁.id else if let some (f, g) ← isTensorObj? e then return (← toMor₁ f).comp (← toMor₁ g) else return Mor₁.of ⟨e⟩ /-- Expressions for atomic structural 2-morphisms. -/ inductive StructuralAtom : Type /-- The expression for the associator `(α_ f g h).hom`. -/ | associator (f g h : Mor₁) : StructuralAtom /-- The expression for the inverse of the associator `(α_ f g h).inv`. -/ | associatorInv (f g h : Mor₁) : StructuralAtom /-- The expression for the left unitor `(λ_ f).hom`. -/ | leftUnitor (f : Mor₁) : StructuralAtom /-- The expression for the inverse of the left unitor `(λ_ f).inv`. -/ | leftUnitorInv (f : Mor₁) : StructuralAtom /-- The expression for the right unitor `(ρ_ f).hom`. -/ | rightUnitor (f : Mor₁) : StructuralAtom /-- The expression for the inverse of the right unitor `(ρ_ f).inv`. -/ | rightUnitorInv (f : Mor₁) : StructuralAtom deriving Inhabited /-- Construct a `StructuralAtom` expression from a Lean expression. -/ def structuralAtom? (e : Expr) : MetaM (Option StructuralAtom) := do match e.getAppFnArgs with | (``Iso.hom, #[_, _, _, _, η]) => match (← whnfR η).getAppFnArgs with | (``MonoidalCategoryStruct.associator, #[_, _, _, f, g, h]) => return some <| .associator (← toMor₁ f) (← toMor₁ g) (← toMor₁ h) | (``MonoidalCategoryStruct.leftUnitor, #[_, _, _, f]) => return some <| .leftUnitor (← toMor₁ f) | (``MonoidalCategoryStruct.rightUnitor, #[_, _, _, f]) => return some <| .rightUnitor (← toMor₁ f) | _ => return none | (``Iso.inv, #[_, _, _, _, η]) => match (← whnfR η).getAppFnArgs with | (``MonoidalCategoryStruct.associator, #[_, _, _, f, g, h]) => return some <| .associatorInv (← toMor₁ f) (← toMor₁ g) (← toMor₁ h) | (``MonoidalCategoryStruct.leftUnitor, #[_, _, _, f]) => return some <| .leftUnitorInv (← toMor₁ f) | (``MonoidalCategoryStruct.rightUnitor, #[_, _, _, f]) => return some <| .rightUnitorInv (← toMor₁ f) | _ => return none | _ => return none /-- Expressions for atomic non-structural 2-morphisms. -/ structure Atom where /-- Extract a Lean expression from an `Atom` expression. -/ e : Expr deriving Inhabited /-- Expressions of the form `η ▷ f₁ ▷ ... ▷ fₙ`. -/ inductive WhiskerRightExpr : Type /-- Construct the expression for an atomic 2-morphism. -/ | of (η : Atom) : WhiskerRightExpr /-- Construct the expression for `η ▷ f`. -/ | whisker (η : WhiskerRightExpr) (f : Atom₁) : WhiskerRightExpr deriving Inhabited /-- Expressions of the form `f₁ ◁ ... ◁ fₙ ◁ η`. -/ inductive WhiskerLeftExpr : Type /-- Construct the expression for a right-whiskered 2-morphism. -/ | of (η : WhiskerRightExpr) : WhiskerLeftExpr /-- Construct the expression for `f ◁ η`. -/ | whisker (f : Atom₁) (η : WhiskerLeftExpr) : WhiskerLeftExpr deriving Inhabited /-- Expressions for structural 2-morphisms. -/ inductive Structural : Type /-- Expressions for atomic structural 2-morphisms. -/ | atom (η : StructuralAtom) : Structural /-- Expressions for the identity `𝟙 f`. -/ | id (f : Mor₁) : Structural /-- Expressions for the composition `η ≫ θ`. -/ | comp (α β : Structural) : Structural /-- Expressions for the left whiskering `f ◁ η`. -/ | whiskerLeft (f : Mor₁) (η : Structural) : Structural /-- Expressions for the right whiskering `η ▷ f`. -/ | whiskerRight (η : Structural) (f : Mor₁) : Structural /-- Expressions for `α` in the monoidal composition `η ⊗≫ θ := η ≫ α ≫ θ`. -/ | monoidalCoherence (f g : Mor₁) (e : Expr) : Structural deriving Inhabited /-- Normalized expressions for 2-morphisms. -/ inductive NormalExpr : Type /-- Construct the expression for a structural 2-morphism. -/ | nil (α : Structural) : NormalExpr /-- Construct the normalized expression of 2-morphisms recursively. -/ | cons (head_structural : Structural) (head : WhiskerLeftExpr) (tail : NormalExpr) : NormalExpr deriving Inhabited /-- The domain of a morphism. -/ def src (η : Expr) : MetaM Mor₁ := do match (← inferType η).getAppFnArgs with | (``Quiver.Hom, #[_, _, f, _]) => toMor₁ f | _ => throwError "{η} is not a morphism" /-- The codomain of a morphism. -/ def tgt (η : Expr) : MetaM Mor₁ := do match (← inferType η).getAppFnArgs with | (``Quiver.Hom, #[_, _, _, g]) => toMor₁ g | _ => throwError "{η} is not a morphism" /-- The domain of a 2-morphism. -/ def Atom.src (η : Atom) : MetaM Mor₁ := do Monoidal.src η.e /-- The codomain of a 2-morphism. -/ def Atom.tgt (η : Atom) : MetaM Mor₁ := do Monoidal.tgt η.e /-- The domain of a 2-morphism. -/ def WhiskerRightExpr.src : WhiskerRightExpr → MetaM Mor₁ | WhiskerRightExpr.of η => η.src | WhiskerRightExpr.whisker η f => return (← WhiskerRightExpr.src η).comp (Mor₁.of f) /-- The codomain of a 2-morphism. -/ def WhiskerRightExpr.tgt : WhiskerRightExpr → MetaM Mor₁ | WhiskerRightExpr.of η => η.tgt | WhiskerRightExpr.whisker η f => return (← WhiskerRightExpr.tgt η).comp (Mor₁.of f) /-- The domain of a 2-morphism. -/ def WhiskerLeftExpr.src : WhiskerLeftExpr → MetaM Mor₁ | WhiskerLeftExpr.of η => WhiskerRightExpr.src η | WhiskerLeftExpr.whisker f η => return (Mor₁.of f).comp (← WhiskerLeftExpr.src η) /-- The codomain of a 2-morphism. -/ def WhiskerLeftExpr.tgt : WhiskerLeftExpr → MetaM Mor₁ | WhiskerLeftExpr.of η => WhiskerRightExpr.tgt η | WhiskerLeftExpr.whisker f η => return (Mor₁.of f).comp (← WhiskerLeftExpr.tgt η) /-- The domain of a 2-morphism. -/ def StructuralAtom.src : StructuralAtom → Mor₁ | .associator f g h => (f.comp g).comp h | .associatorInv f g h => f.comp (g.comp h) | .leftUnitor f => Mor₁.id.comp f | .leftUnitorInv f => f | .rightUnitor f => f.comp Mor₁.id | .rightUnitorInv f => f /-- The codomain of a 2-morphism. -/ def StructuralAtom.tgt : StructuralAtom → Mor₁ | .associator f g h => f.comp (g.comp h) | .associatorInv f g h => (f.comp g).comp h | .leftUnitor f => f | .leftUnitorInv f => Mor₁.id.comp f | .rightUnitor f => f | .rightUnitorInv f => f.comp Mor₁.id /-- The domain of a 2-morphism. -/ def Structural.src : Structural → Mor₁ | .atom η => η.src | .id f => f | .comp α _ => α.src | .whiskerLeft f η => f.comp η.src | .whiskerRight η f => η.src.comp f | .monoidalCoherence f _ _ => f /-- The codomain of a 2-morphism. -/ def Structural.tgt : Structural → Mor₁ | .atom η => η.tgt | .id f => f | .comp _ β => β.tgt | .whiskerLeft f η => f.comp η.tgt | .whiskerRight η f => η.tgt.comp f | .monoidalCoherence _ g _ => g /-- The domain of a 2-morphism. -/ def NormalExpr.src : NormalExpr → Mor₁ | NormalExpr.nil η => η.src | NormalExpr.cons α _ _ => α.src /-- The codomain of a 2-morphism. -/ def NormalExpr.tgt : NormalExpr → Mor₁ | NormalExpr.nil η => η.tgt | NormalExpr.cons _ _ ηs => ηs.tgt /-- The associator as a term of `normalExpr`. -/ def NormalExpr.associator (f g h : Mor₁) : NormalExpr := .nil <| .atom <| .associator f g h /-- The inverse of the associator as a term of `normalExpr`. -/ def NormalExpr.associatorInv (f g h : Mor₁) : NormalExpr := .nil <| .atom <| .associatorInv f g h /-- The left unitor as a term of `normalExpr`. -/ def NormalExpr.leftUnitor (f : Mor₁) : NormalExpr := .nil <| .atom <| .leftUnitor f /-- The inverse of the left unitor as a term of `normalExpr`. -/ def NormalExpr.leftUnitorInv (f : Mor₁) : NormalExpr := .nil <| .atom <| .leftUnitorInv f /-- The right unitor as a term of `normalExpr`. -/ def NormalExpr.rightUnitor (f : Mor₁) : NormalExpr := .nil <| .atom <| .rightUnitor f /-- The inverse of the right unitor as a term of `normalExpr`. -/ def NormalExpr.rightUnitorInv (f : Mor₁) : NormalExpr := .nil <| .atom <| .rightUnitorInv f /-- Return `η` for `η ▷ g₁ ▷ ... ▷ gₙ`. -/ def WhiskerRightExpr.atom : WhiskerRightExpr → Atom | WhiskerRightExpr.of η => η | WhiskerRightExpr.whisker η _ => η.atom /-- Return `η` for `f₁ ◁ ... ◁ fₙ ◁ η ▷ g₁ ▷ ... ▷ gₙ`. -/ def WhiskerLeftExpr.atom : WhiskerLeftExpr → Atom | WhiskerLeftExpr.of η => η.atom | WhiskerLeftExpr.whisker _ η => η.atom /-- Construct a `Structural` expression from a Lean expression for a structural 2-morphism. -/ partial def structural? (e : Expr) : MetaM Structural := do match (← whnfR e).getAppFnArgs with | (``CategoryStruct.comp, #[_, _, _, α, β]) => return .comp (← structural? α) (← structural? β) | (``CategoryStruct.id, #[_, f]) => return .id (← toMor₁ f) | (``MonoidalCategoryStruct.whiskerLeft, #[f, η]) => return .whiskerLeft (← toMor₁ f) (← structural? η) | (``MonoidalCategoryStruct.whiskerRight, #[η, f]) => return .whiskerRight (← structural? η) (← toMor₁ f) | (``MonoidalCoherence.hom, #[_, _, f, g, inst]) => return .monoidalCoherence (← toMor₁ f) (← toMor₁ g) inst | _ => match ← structuralAtom? e with | some η => return .atom η | none => throwError "not a structural 2-morphism" /-- Construct a `NormalExpr` expression from a `WhiskerLeftExpr` expression. -/ def NormalExpr.of (η : WhiskerLeftExpr) : MetaM NormalExpr := do return .cons (.id (← η.src)) η (.nil (.id (← η.tgt))) /-- Construct a `NormalExpr` expression from a Lean expression for an atomic 2-morphism. -/ def NormalExpr.ofExpr (η : Expr) : MetaM NormalExpr := NormalExpr.of <| .of <| .of ⟨η⟩ /-- If `e` is an expression of the form `η ⊗≫ θ := η ≫ α ≫ θ` in the monoidal category `C`, return the expression for `α` .-/ def structuralOfMonoidalComp (C e : Expr) : MetaM Structural := do let v ← mkFreshLevelMVar let u ← mkFreshLevelMVar _ ← isDefEq (.sort (.succ v)) (← inferType (← inferType e)) _ ← isDefEq (.sort (.succ u)) (← inferType C) let W ← mkFreshExprMVar none let X ← mkFreshExprMVar none let Y ← mkFreshExprMVar none let Z ← mkFreshExprMVar none let f ← mkFreshExprMVar none let g ← mkFreshExprMVar none let α₀ ← mkFreshExprMVar none let instC ← mkFreshExprMVar none let αg := mkAppN (.const ``CategoryStruct.comp [v, u]) #[C, instC, X, Y, Z, α₀, g] let fαg := mkAppN (.const ``CategoryStruct.comp [v, u]) #[C, instC, W, X, Z, f, αg] _ ← isDefEq e fαg structural? α₀ section open scoped MonoidalCategory universe v u variable {C : Type u} [Category.{v} C] variable {f f' g g' h i j : C} theorem evalComp_nil_cons {f g h i j : C} (α : f ⟶ g) (β : g ⟶ h) (η : h ⟶ i) (ηs : i ⟶ j) : α ≫ (β ≫ η ≫ ηs) = (α ≫ β) ≫ η ≫ ηs := by simp @[nolint synTaut] theorem evalComp_nil_nil {f g h : C} (α : f ⟶ g) (β : g ⟶ h) : α ≫ β = α ≫ β := by simp theorem evalComp_cons {f g h i j : C} (α : f ⟶ g) (η : g ⟶ h) {ηs : h ⟶ i} {θ : i ⟶ j} {ι : h ⟶ j} (pf_ι : ηs ≫ θ = ι) : (α ≫ η ≫ ηs) ≫ θ = α ≫ η ≫ ι := by simp [pf_ι] theorem eval_comp {η η' : f ⟶ g} {θ θ' : g ⟶ h} {ι : f ⟶ h} (pf_η : η = η') (pf_θ : θ = θ') (pf_ηθ : η' ≫ θ' = ι) : η ≫ θ = ι := by simp [pf_η, pf_θ, pf_ηθ] theorem eval_of (η : f ⟶ g) : η = 𝟙 _ ≫ η ≫ 𝟙 _ := by simp theorem eval_monoidalComp {η η' : f ⟶ g} {α : g ⟶ h} {θ θ' : h ⟶ i} {αθ : g ⟶ i} {ηαθ : f ⟶ i} (pf_η : η = η') (pf_θ : θ = θ') (pf_αθ : α ≫ θ' = αθ) (pf_ηαθ : η' ≫ αθ = ηαθ) : η ≫ α ≫ θ = ηαθ := by simp [pf_η, pf_θ, pf_αθ, pf_ηαθ] variable [MonoidalCategory C] @[nolint synTaut] theorem evalWhiskerLeft_nil (f : C) (α : g ⟶ h) : f ◁ α = f ◁ α := by simp theorem evalWhiskerLeft_of_cons (α : g ⟶ h) (η : h ⟶ i) {ηs : i ⟶ j} {θ : f ⊗ i ⟶ f ⊗ j} (pf_θ : f ◁ ηs = θ) : f ◁ (α ≫ η ≫ ηs) = f ◁ α ≫ f ◁ η ≫ θ := by simp [pf_θ] theorem evalWhiskerLeft_comp {η : h ⟶ i} {θ : g ⊗ h ⟶ g ⊗ i} {ι : f ⊗ g ⊗ h ⟶ f ⊗ g ⊗ i} {ι' : f ⊗ g ⊗ h ⟶ (f ⊗ g) ⊗ i} {ι'' : (f ⊗ g) ⊗ h ⟶ (f ⊗ g) ⊗ i} (pf_θ : g ◁ η = θ) (pf_ι : f ◁ θ = ι) (pf_ι' : ι ≫ (α_ _ _ _).inv = ι') (pf_ι'' : (α_ _ _ _).hom ≫ ι' = ι'') : (f ⊗ g) ◁ η = ι'' := by simp [pf_θ, pf_ι, pf_ι', pf_ι''] theorem evalWhiskerLeft_id {f g : C} {η : f ⟶ g} {η' : f ⟶ 𝟙_ C ⊗ g} {η'' : 𝟙_ C ⊗ f ⟶ 𝟙_ C ⊗ g} (pf_η' : η ≫ (λ_ _).inv = η') (pf_η'' : (λ_ _).hom ≫ η' = η'') : 𝟙_ C ◁ η = η'' := by simp [pf_η', pf_η''] theorem eval_whiskerLeft {η η' : g ⟶ h} {θ : f ⊗ g ⟶ f ⊗ h} (pf_η : η = η') (pf_θ : f ◁ η' = θ) : f ◁ η = θ := by simp [pf_η, pf_θ] theorem eval_whiskerRight {η η' : f ⟶ g} {θ : f ⊗ h ⟶ g ⊗ h} (pf_η : η = η') (pf_θ : η' ▷ h = θ) : η ▷ h = θ := by simp [pf_η, pf_θ] @[nolint synTaut] theorem evalWhiskerRight_nil (α : f ⟶ g) (h : C) : α ▷ h = α ▷ h := by simp theorem evalWhiskerRight_cons_of_of (α : f ⟶ g) (η : g ⟶ h) {ηs : h ⟶ i} {θ : h ⊗ j ⟶ i ⊗ j} (pf_θ : ηs ▷ j = θ) : (α ≫ η ≫ ηs) ▷ j = α ▷ j ≫ η ▷ j ≫ θ := by simp [pf_θ] theorem evalWhiskerRight_cons_whisker {α : g ⟶ f ⊗ h} {η : h ⟶ i} {ηs : f ⊗ i ⟶ j} {k : C} {η₁ : h ⊗ k ⟶ i ⊗ k} {η₂ : f ⊗ (h ⊗ k) ⟶ f ⊗ (i ⊗ k)} {ηs₁ : (f ⊗ i) ⊗ k ⟶ j ⊗ k} {ηs₂ : f ⊗ (i ⊗ k) ⟶ j ⊗ k} {η₃ : f ⊗ (h ⊗ k) ⟶ j ⊗ k} {η₄ : (f ⊗ h) ⊗ k ⟶ j ⊗ k} {η₅ : g ⊗ k ⟶ j ⊗ k} (pf_η₁ : (𝟙 _ ≫ η ≫ 𝟙 _ ) ▷ k = η₁) (pf_η₂ : f ◁ η₁ = η₂) (pf_ηs₁ : ηs ▷ k = ηs₁) (pf_ηs₂ : (α_ _ _ _).inv ≫ ηs₁ = ηs₂) (pf_η₃ : η₂ ≫ ηs₂ = η₃) (pf_η₄ : (α_ _ _ _).hom ≫ η₃ = η₄) (pf_η₅ : α ▷ k ≫ η₄ = η₅) : (α ≫ (f ◁ η) ≫ ηs) ▷ k = η₅ := by simp at pf_η₁ simp [pf_η₁, pf_η₂, pf_ηs₁, pf_ηs₂, pf_η₃, pf_η₄, pf_η₅] theorem evalWhiskerRight_comp {η : f ⟶ f'} {η₁ : f ⊗ g ⟶ f' ⊗ g} {η₂ : (f ⊗ g) ⊗ h ⟶ (f' ⊗ g) ⊗ h} {η₃ : (f ⊗ g) ⊗ h ⟶ f' ⊗ (g ⊗ h)} {η₄ : f ⊗ (g ⊗ h) ⟶ f' ⊗ (g ⊗ h)} (pf_η₁ : η ▷ g = η₁) (pf_η₂ : η₁ ▷ h = η₂) (pf_η₃ : η₂ ≫ (α_ _ _ _).hom = η₃) (pf_η₄ : (α_ _ _ _).inv ≫ η₃ = η₄) : η ▷ (g ⊗ h) = η₄ := by simp [pf_η₁, pf_η₂, pf_η₃, pf_η₄] theorem evalWhiskerRight_id {η : f ⟶ g} {η₁ : f ⟶ g ⊗ 𝟙_ C} {η₂ : f ⊗ 𝟙_ C ⟶ g ⊗ 𝟙_ C} (pf_η₁ : η ≫ (ρ_ _).inv = η₁) (pf_η₂ : (ρ_ _).hom ≫ η₁ = η₂) : η ▷ 𝟙_ C = η₂ := by simp [pf_η₁, pf_η₂] end /-- Extract a Lean expression from a `Mor₁` expression. -/ def Mor₁.e : Mor₁ → MonoidalM Expr | .id => do let ctx ← read mkAppOptM ``MonoidalCategoryStruct.tensorUnit #[ctx.C, none, none] | .comp f g => do mkAppM ``MonoidalCategoryStruct.tensorObj #[← Mor₁.e f, ← Mor₁.e g] | .of f => return f.e /-- Extract a Lean expression from a `StructuralAtom` expression. -/ def StructuralAtom.e : StructuralAtom → MonoidalM Expr | .associator f g h => do mkAppM ``Iso.hom #[← mkAppM ``MonoidalCategoryStruct.associator #[← f.e, ← g.e, ← h.e]] | .associatorInv f g h => do mkAppM ``Iso.inv #[← mkAppM ``MonoidalCategoryStruct.associator #[← f.e, ← g.e, ← h.e]] | .leftUnitor f => do mkAppM ``Iso.hom #[← mkAppM ``MonoidalCategoryStruct.leftUnitor #[← f.e]] | .leftUnitorInv f => do mkAppM ``Iso.inv #[← mkAppM ``MonoidalCategoryStruct.leftUnitor #[← f.e]] | .rightUnitor f => do mkAppM ``Iso.hom #[← mkAppM ``MonoidalCategoryStruct.rightUnitor #[← f.e]] | .rightUnitorInv f => do mkAppM ``Iso.inv #[← mkAppM ``MonoidalCategoryStruct.rightUnitor #[← f.e]] /-- Extract a Lean expression from a `Structural` expression. -/ partial def Structural.e : Structural → MonoidalM Expr | .atom η => η.e | .id f => do mkAppM ``CategoryStruct.id #[← f.e] | .comp α β => do mkAppM ``CategoryStruct.comp #[← α.e, ← β.e] | .whiskerLeft f η => do mkAppM ``MonoidalCategoryStruct.whiskerLeft #[← f.e, ← η.e] | .whiskerRight η f => do mkAppM ``MonoidalCategoryStruct.whiskerRight #[← η.e, ← f.e] | .monoidalCoherence _ _ e => do mkAppOptM ``MonoidalCoherence.hom #[none, none, none, none, e] /-- Extract a Lean expression from a `WhiskerRightExpr` expression. -/ def WhiskerRightExpr.e : WhiskerRightExpr → MonoidalM Expr | WhiskerRightExpr.of η => return η.e | WhiskerRightExpr.whisker η f => do mkAppM ``MonoidalCategoryStruct.whiskerRight #[← η.e, f.e] /-- Extract a Lean expression from a `WhiskerLeftExpr` expression. -/ def WhiskerLeftExpr.e : WhiskerLeftExpr → MonoidalM Expr | WhiskerLeftExpr.of η => η.e | WhiskerLeftExpr.whisker f η => do mkAppM ``MonoidalCategoryStruct.whiskerLeft #[f.e, ← η.e] /-- Extract a Lean expression from a `NormalExpr` expression. -/ def NormalExpr.e : NormalExpr → MonoidalM Expr | NormalExpr.nil α => α.e | NormalExpr.cons α η θ => do mkAppM ``CategoryStruct.comp #[← α.e, ← mkAppM ``CategoryStruct.comp #[← η.e, ← θ.e]] /-- The result of evaluating an expression into normal form. -/ structure Result where /-- The normalized expression of the 2-morphism. -/ expr : NormalExpr /-- The proof that the normalized expression is equal to the original expression. -/ proof : Expr /-- Evaluate the expression `η ≫ θ` into a normalized form. -/ partial def evalComp : NormalExpr → NormalExpr → MonoidalM Result | .nil α, .cons β η ηs => do let η' := .cons (α.comp β) η ηs return ⟨η', ← mkAppM ``evalComp_nil_cons #[← α.e, ← β.e, ← η.e, ← ηs.e]⟩ | .nil α, .nil α' => do return ⟨.nil (α.comp α'), ← mkAppM ``evalComp_nil_nil #[← α.e, ← α'.e]⟩ | .cons α η ηs, θ => do let ⟨ι, pf_ι⟩ ← evalComp ηs θ let ι' := .cons α η ι return ⟨ι', ← mkAppM ``evalComp_cons #[← α.e, ← η.e, pf_ι]⟩ /-- Evaluate the expression `f ◁ η` into a normalized form. -/ partial def evalWhiskerLeftExpr : Mor₁ → NormalExpr → MonoidalM Result | f, .nil α => do return ⟨.nil (.whiskerLeft f α), ← mkAppM ``evalWhiskerLeft_nil #[← f.e, ← α.e]⟩ | .of f, .cons α η ηs => do let η' := WhiskerLeftExpr.whisker f η let ⟨θ, pf_θ⟩ ← evalWhiskerLeftExpr (.of f) ηs let η'' := .cons (.whiskerLeft (.of f) α) η' θ return ⟨η'', ← mkAppM ``evalWhiskerLeft_of_cons #[← α.e, ← η.e, pf_θ]⟩ | .comp f g, η => do let ⟨θ, pf_θ⟩ ← evalWhiskerLeftExpr g η let ⟨ι, pf_ι⟩ ← evalWhiskerLeftExpr f θ let h := η.src let h' := η.tgt let ⟨ι', pf_ι'⟩ ← evalComp ι (NormalExpr.associatorInv f g h') let ⟨ι'', pf_ι''⟩ ← evalComp (NormalExpr.associator f g h) ι' return ⟨ι'', ← mkAppM ``evalWhiskerLeft_comp #[pf_θ, pf_ι, pf_ι', pf_ι'']⟩ | .id, η => do let f := η.src let g := η.tgt let ⟨η', pf_η'⟩ ← evalComp η (NormalExpr.leftUnitorInv g) let ⟨η'', pf_η''⟩ ← evalComp (NormalExpr.leftUnitor f) η' return ⟨η'', ← mkAppM ``evalWhiskerLeft_id #[pf_η', pf_η'']⟩ /-- Evaluate the expression `η ▷ f` into a normalized form. -/ partial def evalWhiskerRightExpr : NormalExpr → Mor₁ → MonoidalM Result | .nil α, h => do return ⟨.nil (.whiskerRight α h), ← mkAppM ``evalWhiskerRight_nil #[← α.e, ← h.e]⟩ | .cons α (.of η) ηs, .of f => do let ⟨θ, pf_θ⟩ ← evalWhiskerRightExpr ηs (.of f) let η' := .cons (.whiskerRight α (.of f)) (.of (.whisker η f)) θ return ⟨η', ← mkAppM ``evalWhiskerRight_cons_of_of #[← α.e, ← η.e, pf_θ]⟩ | .cons α (.whisker f η) ηs, h => do let g ← η.src let g' ← η.tgt let ⟨η₁, pf_η₁⟩ ← evalWhiskerRightExpr (.cons (.id g) η (.nil (.id g'))) h let ⟨η₂, pf_η₂⟩ ← evalWhiskerLeftExpr (.of f) η₁ let ⟨ηs₁, pf_ηs₁⟩ ← evalWhiskerRightExpr ηs h let α' := .whiskerRight α h let ⟨ηs₂, pf_ηs₂⟩ ← evalComp (.associatorInv (.of f) g' h) ηs₁ let ⟨η₃, pf_η₃⟩ ← evalComp η₂ ηs₂ let ⟨η₄, pf_η₄⟩ ← evalComp (.associator (.of f) g h) η₃ let ⟨η₅, pf_η₅⟩ ← evalComp (.nil α') η₄ return ⟨η₅, ← mkAppM ``evalWhiskerRight_cons_whisker #[pf_η₁, pf_η₂, pf_ηs₁, pf_ηs₂, pf_η₃, pf_η₄, pf_η₅]⟩ | η, .comp g h => do let ⟨η₁, pf_η₁⟩ ← evalWhiskerRightExpr η g let ⟨η₂, pf_η₂⟩ ← evalWhiskerRightExpr η₁ h let f := η.src let f' := η.tgt let ⟨η₃, pf_η₃⟩ ← evalComp η₂ (.associator f' g h) let ⟨η₄, pf_η₄⟩ ← evalComp (.associatorInv f g h) η₃ return ⟨η₄, ← mkAppM ``evalWhiskerRight_comp #[pf_η₁, pf_η₂, pf_η₃, pf_η₄]⟩ | η, .id => do let f := η.src let g := η.tgt let ⟨η₁, pf_η₁⟩ ← evalComp η (.rightUnitorInv g) let ⟨η₂, pf_η₂⟩ ← evalComp (.rightUnitor f) η₁ return ⟨η₂, ← mkAppM ``evalWhiskerRight_id #[pf_η₁, pf_η₂]⟩ /-- Evaluate the expression of a 2-morphism into a normalized form. -/ partial def eval (e : Expr) : MonoidalM Result := do if let .some α ← structuralAtom? e then return ⟨.nil <| .atom α, ← mkEqRefl (← α.e)⟩ else match e.getAppFnArgs with | (``CategoryStruct.id, #[_, _, f]) => return ⟨.nil (.id (← toMor₁ f)), ← mkEqRefl (← mkAppM ``CategoryStruct.id #[f])⟩ | (``CategoryStruct.comp, #[_, _, _, _, _, η, θ]) => let ⟨η_e, pf_η⟩ ← eval η let ⟨θ_e, pf_θ⟩ ← eval θ let ⟨ηθ, pf⟩ ← evalComp η_e θ_e return ⟨ηθ, ← mkAppM ``eval_comp #[pf_η, pf_θ, pf]⟩ | (``MonoidalCategoryStruct.whiskerLeft, #[_, _, _, f, _, _, η]) => let ⟨η_e, pf_η⟩ ← eval η let ⟨θ, pf_θ⟩ ← evalWhiskerLeftExpr (← toMor₁ f) η_e return ⟨θ, ← mkAppM ``eval_whiskerLeft #[pf_η, pf_θ]⟩ | (``MonoidalCategoryStruct.whiskerRight, #[_, _, _, _, _, η, h]) => let ⟨η_e, pf_η⟩ ← eval η let ⟨θ, pf_θ⟩ ← evalWhiskerRightExpr η_e (← toMor₁ h) return ⟨θ, ← mkAppM ``eval_whiskerRight #[pf_η, pf_θ]⟩ | (``monoidalComp, #[C, _, _, _, _, _, _, η, θ]) => let ⟨η_e, pf_η⟩ ← eval η let α₀ ← structuralOfMonoidalComp C e let α := NormalExpr.nil α₀ let ⟨θ_e, pf_θ⟩ ← eval θ let ⟨αθ, pf_θα⟩ ← evalComp α θ_e let ⟨ηαθ, pf_ηαθ⟩ ← evalComp η_e αθ return ⟨ηαθ, ← mkAppM ``eval_monoidalComp #[pf_η, pf_θ, pf_θα, pf_ηαθ]⟩ | _ => return ⟨← NormalExpr.ofExpr e, ← mkAppM ``eval_of #[e]⟩ /-- Convert a `NormalExpr` expression into a list of `WhiskerLeftExpr` expressions. -/ def NormalExpr.toList : NormalExpr → List WhiskerLeftExpr | NormalExpr.nil _ => [] | NormalExpr.cons _ η ηs => η :: NormalExpr.toList ηs end Mathlib.Tactic.Monoidal open Mathlib.Tactic.Monoidal /-- `normalize% η` is the normalization of the 2-morphism `η`. 1. The normalized 2-morphism is of the form `α₀ ≫ η₀ ≫ α₁ ≫ η₁ ≫ ... αₘ ≫ ηₘ ≫ αₘ₊₁` where each `αᵢ` is a structural 2-morphism (consisting of associators and unitors), 2. each `ηᵢ` is a non-structural 2-morphism of the form `f₁ ◁ ... ◁ fₘ ◁ θ`, and 3. `θ` is of the form `ι ▷ g₁ ▷ ... ▷ gₗ` -/ elab "normalize% " t:term:51 : term => do let e ← Lean.Elab.Term.elabTerm t none let some ctx ← mkContext? e | throwError "not a morphism" MonoidalM.run ctx do (← eval e).expr.e theorem mk_eq {α : Type _} (a b a' b' : α) (ha : a = a') (hb : b = b') (h : a' = b') : a = b := by simp [h, ha, hb] open Lean Elab Meta Tactic in /-- Transform an equality between 2-morphisms into the equality between their normalizations. -/ def mkEq (e : Expr) : MetaM Expr := do let some (_, e₁, e₂) := (← whnfR <| e).eq? | throwError "monoidal_nf requires an equality goal" let some ctx ← mkContext? e₁ | throwError "the lhs and rhs must be morphisms" MonoidalM.run ctx do let ⟨e₁', p₁⟩ ← eval e₁ let ⟨e₂', p₂⟩ ← eval e₂ mkAppM ``mk_eq #[e₁, e₂, ← e₁'.e, ← e₂'.e, p₁, p₂] open Lean Elab Tactic in /-- Normalize the both sides of an equality. -/ elab "monoidal_nf" : tactic => withMainContext do let t ← getMainTarget let mvarIds ← (← getMainGoal).apply (← mkEq t) replaceMainGoal mvarIds
Tactic\CategoryTheory\MonoidalComp.lean
/- Copyright (c) 2022. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Yuma Mizuno, Oleksandr Manzyuk -/ import Mathlib.CategoryTheory.Monoidal.Category /-! # Monoidal composition `⊗≫` (composition up to associators) We provide `f ⊗≫ g`, the `monoidalComp` operation, which automatically inserts associators and unitors as needed to make the target of `f` match the source of `g`. ## Example Suppose we have a braiding morphism `R X Y : X ⊗ Y ⟶ Y ⊗ X` in a monoidal category, and that we want to define the morphism with the type `V₁ ⊗ V₂ ⊗ V₃ ⊗ V₄ ⊗ V₅ ⟶ V₁ ⊗ V₃ ⊗ V₂ ⊗ V₄ ⊗ V₅` that transposes the second and third components by `R V₂ V₃`. How to do this? The first guess would be to use the whiskering operators `◁` and `▷`, and define the morphism as `V₁ ◁ R V₂ V₃ ▷ V₄ ▷ V₅`. However, this morphism has the type `V₁ ⊗ ((V₂ ⊗ V₃) ⊗ V₄) ⊗ V₅ ⟶ V₁ ⊗ ((V₃ ⊗ V₂) ⊗ V₄) ⊗ V₅`, which is not what we need. We should insert suitable associators. The desired associators can, in principle, be defined by using the primitive three-components associator `α_ X Y Z : (X ⊗ Y) ⊗ Z ≅ X ⊗ (Y ⊗ Z)` as a building block, but writing down actual definitions are quite tedious, and we usually don't want to see them. The monoidal composition `⊗≫` is designed to solve such a problem. In this case, we can define the desired morphism as `𝟙 _ ⊗≫ V₁ ◁ R V₂ V₃ ▷ V₄ ▷ V₅ ⊗≫ 𝟙 _`, where the first and the second `𝟙 _` are completed as `𝟙 (V₁ ⊗ V₂ ⊗ V₃ ⊗ V₄ ⊗ V₅)` and `𝟙 (V₁ ⊗ V₃ ⊗ V₂ ⊗ V₄ ⊗ V₅)`, respectively. -/ universe v u open CategoryTheory MonoidalCategory namespace CategoryTheory variable {C : Type u} [Category.{v} C] open scoped MonoidalCategory /-- A typeclass carrying a choice of monoidal structural isomorphism between two objects. Used by the `⊗≫` monoidal composition operator, and the `coherence` tactic. -/ -- We could likely turn this into a `Prop` valued existential if that proves useful. class MonoidalCoherence (X Y : C) where /-- A monoidal structural isomorphism between two objects. -/ hom : X ⟶ Y [isIso : IsIso hom] /-- Notation for identities up to unitors and associators. -/ scoped[CategoryTheory.MonoidalCategory] notation " ⊗𝟙 " => MonoidalCoherence.hom -- type as \ot 𝟙 attribute [instance] MonoidalCoherence.isIso noncomputable section /-- Construct an isomorphism between two objects in a monoidal category out of unitors and associators. -/ def monoidalIso (X Y : C) [MonoidalCoherence X Y] : X ≅ Y := asIso ⊗𝟙 /-- Compose two morphisms in a monoidal category, inserting unitors and associators between as necessary. -/ def monoidalComp {W X Y Z : C} [MonoidalCoherence X Y] (f : W ⟶ X) (g : Y ⟶ Z) : W ⟶ Z := f ≫ ⊗𝟙 ≫ g @[inherit_doc monoidalComp] scoped[CategoryTheory.MonoidalCategory] infixr:80 " ⊗≫ " => monoidalComp -- type as \ot \gg /-- Compose two isomorphisms in a monoidal category, inserting unitors and associators between as necessary. -/ def monoidalIsoComp {W X Y Z : C} [MonoidalCoherence X Y] (f : W ≅ X) (g : Y ≅ Z) : W ≅ Z := f ≪≫ asIso ⊗𝟙 ≪≫ g @[inherit_doc monoidalIsoComp] scoped[CategoryTheory.MonoidalCategory] infixr:80 " ≪⊗≫ " => monoidalIsoComp -- type as \ll \ot \gg end namespace MonoidalCoherence variable [MonoidalCategory C] @[simps] instance refl (X : C) : MonoidalCoherence X X := ⟨𝟙 _⟩ @[simps] instance whiskerLeft (X Y Z : C) [MonoidalCoherence Y Z] : MonoidalCoherence (X ⊗ Y) (X ⊗ Z) := ⟨X ◁ ⊗𝟙⟩ @[simps] instance whiskerRight (X Y Z : C) [MonoidalCoherence X Y] : MonoidalCoherence (X ⊗ Z) (Y ⊗ Z) := ⟨⊗𝟙 ▷ Z⟩ @[simps] instance tensor_right (X Y : C) [MonoidalCoherence (𝟙_ C) Y] : MonoidalCoherence X (X ⊗ Y) := ⟨(ρ_ X).inv ≫ X ◁ ⊗𝟙⟩ @[simps] instance tensor_right' (X Y : C) [MonoidalCoherence Y (𝟙_ C)] : MonoidalCoherence (X ⊗ Y) X := ⟨X ◁ ⊗𝟙 ≫ (ρ_ X).hom⟩ @[simps] instance left (X Y : C) [MonoidalCoherence X Y] : MonoidalCoherence (𝟙_ C ⊗ X) Y := ⟨(λ_ X).hom ≫ ⊗𝟙⟩ @[simps] instance left' (X Y : C) [MonoidalCoherence X Y] : MonoidalCoherence X (𝟙_ C ⊗ Y) := ⟨⊗𝟙 ≫ (λ_ Y).inv⟩ @[simps] instance right (X Y : C) [MonoidalCoherence X Y] : MonoidalCoherence (X ⊗ 𝟙_ C) Y := ⟨(ρ_ X).hom ≫ ⊗𝟙⟩ @[simps] instance right' (X Y : C) [MonoidalCoherence X Y] : MonoidalCoherence X (Y ⊗ 𝟙_ C) := ⟨⊗𝟙 ≫ (ρ_ Y).inv⟩ @[simps] instance assoc (X Y Z W : C) [MonoidalCoherence (X ⊗ (Y ⊗ Z)) W] : MonoidalCoherence ((X ⊗ Y) ⊗ Z) W := ⟨(α_ X Y Z).hom ≫ ⊗𝟙⟩ @[simps] instance assoc' (W X Y Z : C) [MonoidalCoherence W (X ⊗ (Y ⊗ Z))] : MonoidalCoherence W ((X ⊗ Y) ⊗ Z) := ⟨⊗𝟙 ≫ (α_ X Y Z).inv⟩ end MonoidalCoherence @[simp] lemma monoidalComp_refl {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) : f ⊗≫ g = f ≫ g := by simp [monoidalComp] end CategoryTheory
Tactic\CategoryTheory\Reassoc.lean
/- Copyright (c) 2022 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.Functor.Basic import Mathlib.Util.AddRelatedDecl /-! # The `reassoc` attribute Adding `@[reassoc]` to a lemma named `F` of shape `∀ .., f = g`, where `f g : X ⟶ Y` in some category will create a new lemma named `F_assoc` of shape `∀ .. {Z : C} (h : Y ⟶ Z), f ≫ h = g ≫ h` but with the conclusions simplified using the axioms for a category (`Category.comp_id`, `Category.id_comp`, and `Category.assoc`). This is useful for generating lemmas which the simplifier can use even on expressions that are already right associated. There is also a term elaborator `reassoc_of% t` for use within proofs. -/ open Lean Meta Elab Tactic open Mathlib.Tactic namespace CategoryTheory variable {C : Type*} [Category C] /-- A variant of `eq_whisker` with a more convenient argument order for use in tactics. -/ theorem eq_whisker' {X Y : C} {f g : X ⟶ Y} (w : f = g) {Z : C} (h : Y ⟶ Z) : f ≫ h = g ≫ h := by rw [w] /-- Simplify an expression using only the axioms of a category. -/ def categorySimp (e : Expr) : MetaM Simp.Result := simpOnlyNames [``Category.comp_id, ``Category.id_comp, ``Category.assoc, ``Functor.id_obj, ``Functor.id_map, ``Functor.comp_obj, ``Functor.comp_map] e (config := { decide := false }) /-- Given an equation `f = g` between morphisms `X ⟶ Y` in a category (possibly after a `∀` binder), produce the equation `∀ {Z} (h : Y ⟶ Z), f ≫ h = g ≫ h`, but with compositions fully right associated and identities removed. -/ def reassocExpr (e : Expr) : MetaM Expr := do mapForallTelescope (fun e => do simpType categorySimp (← mkAppM ``eq_whisker' #[e])) e /-- Adding `@[reassoc]` to a lemma named `F` of shape `∀ .., f = g`, where `f g : X ⟶ Y` are morphisms in some category, will create a new lemma named `F_assoc` of shape `∀ .. {Z : C} (h : Y ⟶ Z), f ≫ h = g ≫ h` but with the conclusions simplified using the axioms for a category (`Category.comp_id`, `Category.id_comp`, and `Category.assoc`). So, for example, if the conclusion of `F` is `a ≫ b = g` then the conclusion of `F_assoc` will be `a ≫ (b ≫ h) = g ≫ h` (note that `≫` reassociates to the right so the brackets will not appear in the statement). This attribute is useful for generating lemmas which the simplifier can use even on expressions that are already right associated. Note that if you want both the lemma and the reassociated lemma to be `simp` lemmas, you should tag the lemma `@[reassoc (attr := simp)]`. The variant `@[simp, reassoc]` on a lemma `F` will tag `F` with `@[simp]`, but not `F_apply` (this is sometimes useful). -/ syntax (name := reassoc) "reassoc" (" (" &"attr" ":=" Parser.Term.attrInstance,* ")")? : attr initialize registerBuiltinAttribute { name := `reassoc descr := "" applicationTime := .afterCompilation add := fun src ref kind => match ref with | `(attr| reassoc $[(attr := $stx?,*)]?) => MetaM.run' do if (kind != AttributeKind.global) then throwError "`reassoc` can only be used as a global attribute" addRelatedDecl src "_assoc" ref stx? fun type value levels => do pure (← reassocExpr (← mkExpectedTypeHint value type), levels) | _ => throwUnsupportedSyntax } open Term in /-- `reassoc_of% t`, where `t` is an equation `f = g` between morphisms `X ⟶ Y` in a category (possibly after a `∀` binder), produce the equation `∀ {Z} (h : Y ⟶ Z), f ≫ h = g ≫ h`, but with compositions fully right associated and identities removed. -/ elab "reassoc_of% " t:term : term => do reassocExpr (← elabTerm t none) end CategoryTheory
Tactic\CategoryTheory\Slice.lean
/- Copyright (c) 2018 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.Category.Basic import Mathlib.Tactic.Conv /-! # The `slice` tactic Applies a tactic to an interval of terms from a term obtained by repeated application of `Category.comp`. -/ open CategoryTheory open Lean Parser.Tactic Elab Command Elab.Tactic Meta -- TODO someone might like to generalise this tactic to work with other associative structures. /- Porting note: moved `repeat_with_results` to `repeat_count` to `Mathlib.Tactic.Core` -/ open Tactic open Parser.Tactic.Conv /-- `slice` is a conv tactic; if the current focus is a composition of several morphisms, `slice a b` reassociates as needed, and zooms in on the `a`-th through `b`-th morphisms. Thus if the current focus is `(a ≫ b) ≫ ((c ≫ d) ≫ e)`, then `slice 2 3` zooms to `b ≫ c`. -/ syntax (name := slice) "slice " num ppSpace num : conv /-- `evalSlice` - rewrites the target expression using `Category.assoc`. - uses `congr` to split off the first `a-1` terms and rotates to `a`-th (last) term - counts the number `k` of rewrites as it uses `←Category.assoc` to bring the target to left associated form; from the first step this is the total number of remaining terms from `C` - it now splits off `b-a` terms from target using `congr` leaving the desired subterm - finally, it rewrites it once more using `Category.assoc` to bring it to right-associated normal form -/ def evalSlice (a b : Nat) : TacticM Unit := do let _ ← iterateUntilFailureWithResults do evalTactic (← `(conv| rw [Category.assoc])) iterateRange (a - 1) (a - 1) do evalTactic (← `(conv| congr)) evalTactic (← `(tactic| rotate_left)) let k ← iterateUntilFailureCount <| evalTactic (← `(conv| rw [← Category.assoc])) let c := k+1+a-b iterateRange c c <| evalTactic (← `(conv| congr)) let _ ← iterateUntilFailureWithResults do evalTactic (← `(conv| rw [Category.assoc])) /-- `slice` is implemented by `evalSlice`. -/ elab "slice " a:num ppSpace b:num : conv => evalSlice a.getNat b.getNat /-- `slice_lhs a b => tac` zooms to the left hand side, uses associativity for categorical composition as needed, zooms in on the `a`-th through `b`-th morphisms, and invokes `tac`. -/ syntax (name := sliceLHS) "slice_lhs " num ppSpace num " => " convSeq : tactic macro_rules | `(tactic| slice_lhs $a $b => $seq) => `(tactic| conv => lhs; slice $a $b; ($seq:convSeq)) /-- `slice_rhs a b => tac` zooms to the right hand side, uses associativity for categorical composition as needed, zooms in on the `a`-th through `b`-th morphisms, and invokes `tac`. -/ syntax (name := sliceRHS) "slice_rhs " num ppSpace num " => " convSeq : tactic macro_rules | `(tactic| slice_rhs $a $b => $seq) => `(tactic| conv => rhs; slice $a $b; ($seq:convSeq)) /- Porting note: update when `add_tactic_doc` is supported` -/ -- add_tactic_doc -- { Name := "slice" -- category := DocCategory.tactic -- declNames := [`tactic.interactive.sliceLHS, `tactic.interactive.sliceRHS] -- tags := ["category theory"] } --
Tactic\CC\Addition.lean
/- Copyright (c) 2016 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura, Miyahara Kō -/ import Mathlib.Logic.Basic import Mathlib.Data.Option.Defs import Mathlib.Lean.Expr.Basic import Mathlib.Tactic.Relation.Rfl import Mathlib.Tactic.Relation.Symm import Mathlib.Tactic.CC.Datatypes import Mathlib.Tactic.CC.Lemmas import Batteries.Data.RBMap.Alter /-! # Process when an new equation is added to a congruence closure -/ universe u open Lean Meta Elab Tactic Std namespace Mathlib.Tactic.CC initialize registerTraceClass `Meta.Tactic.cc.merge registerTraceClass `Meta.Tactic.cc.failure registerTraceClass `Debug.Meta.Tactic.cc registerTraceClass `Debug.Meta.Tactic.cc.ac registerTraceClass `Debug.Meta.Tactic.cc.parentOccs /-- The monad for the `cc` tactic stores the current state of the tactic. -/ abbrev CCM := StateRefT CCStructure MetaM namespace CCM /-- Run a computation in the `CCM` monad. -/ @[inline] def run {α : Type} (x : CCM α) (c : CCStructure) : MetaM (α × CCStructure) := StateRefT'.run x c /-- Update the `todo` field of the state. -/ @[inline] def modifyTodo (f : Array TodoEntry → Array TodoEntry) : CCM Unit := modify fun cc => { cc with todo := f cc.todo } /-- Update the `acTodo` field of the state. -/ @[inline] def modifyACTodo (f : Array ACTodoEntry → Array ACTodoEntry) : CCM Unit := modify fun cc => { cc with acTodo := f cc.acTodo } /-- Update the `cache` field of the state. -/ @[inline] def modifyCache (f : CCCongrTheoremCache → CCCongrTheoremCache) : CCM Unit := modify fun cc => { cc with cache := f cc.cache } /-- Read the `todo` field of the state. -/ @[inline] def getTodo : CCM (Array TodoEntry) := do return (← get).todo /-- Read the `acTodo` field of the state. -/ @[inline] def getACTodo : CCM (Array ACTodoEntry) := do return (← get).acTodo /-- Read the `cache` field of the state. -/ @[inline] def getCache : CCM CCCongrTheoremCache := do return (← get).cache /-- Look up an entry associated with the given expression. -/ def getEntry (e : Expr) : CCM (Option Entry) := do return (← get).entries.find? e /-- Use the normalizer to normalize `e`. If no normalizer was configured, returns `e` itself. -/ def normalize (e : Expr) : CCM Expr := do if let some normalizer := (← get).normalizer then normalizer.normalize e else return e /-- Add a new entry to the end of the todo list. See also `pushEq`, `pushHEq` and `pushReflEq`. -/ def pushTodo (lhs rhs : Expr) (H : EntryExpr) (heqProof : Bool) : CCM Unit := do modifyTodo fun todo => todo.push (lhs, rhs, H, heqProof) /-- Add the equality proof `H : lhs = rhs` to the end of the todo list. -/ @[inline] def pushEq (lhs rhs : Expr) (H : EntryExpr) : CCM Unit := pushTodo lhs rhs H false /-- Add the heterogeneous equality proof `H : HEq lhs rhs` to the end of the todo list. -/ @[inline] def pushHEq (lhs rhs : Expr) (H : EntryExpr) : CCM Unit := pushTodo lhs rhs H true /-- Add `rfl : lhs = rhs` to the todo list. -/ @[inline] def pushReflEq (lhs rhs : Expr) : CCM Unit := pushEq lhs rhs .refl /-- Return the root expression of the expression's congruence class. -/ def getRoot (e : Expr) : CCM Expr := do return (← get).root e /-- Is `e` the root of its congruence class? -/ def isCgRoot (e : Expr) : CCM Bool := do return (← get).isCgRoot e /-- Update the `child` so its parent becomes `parent`. -/ def addOccurrence (parent child : Expr) (symmTable : Bool) : CCM Unit := do let childRoot ← getRoot child modify fun ccs => { ccs with parents := ccs.parents.alter childRoot fun ps? => let ps := ps?.getD ∅ ps.insert { expr := parent, symmTable } } /-- Return true iff the given function application are congruent `e₁` should have the form `f a` and `e₂` the form `g b`. See paper: Congruence Closure for Intensional Type Theory. -/ partial def isCongruent (e₁ e₂ : Expr) : CCM Bool := do let .app f a := e₁ | failure let .app g b := e₂ | failure -- If they are non-dependent functions, then we can compare all arguments at once. if (← getEntry e₁).any Entry.fo then e₁.withApp fun f₁ args₁ => e₂.withApp fun f₂ args₂ => do if ha : args₁.size = args₂.size then for hi : i in [:args₁.size] do if (← getRoot (args₁[i]'hi.2)) != (← getRoot (args₂[i]'(ha.symm ▸ hi.2))) then return false if f₁ == f₂ then return true else if (← getRoot f₁) != (← getRoot f₂) then -- `f₁` and `f₂` are not equivalent return false else if ← pureIsDefEq (← inferType f₁) (← inferType f₂) then return true else return false else return false else -- Given `e₁ := f a`, `e₂ := g b` if (← getRoot a) != (← getRoot b) then -- `a` and `b` are not equivalent return false else if (← getRoot f) != (← getRoot g) then -- `f` and `g` are not equivalent return false else if ← pureIsDefEq (← inferType f) (← inferType g) then /- Case 1: `f` and `g` have the same type, then we can create a congruence proof for `HEq (f a) (g b)` -/ return true else if f.isApp && g.isApp then -- Case 2: `f` and `g` are congruent isCongruent f g else /- f and g are not congruent nor they have the same type. We can't generate a congruence proof in this case because the following lemma `hcongr : HEq f₁ f₂ → HEq a₁ a₂ → HEq (f₁ a₁) (f₂ a₂)` is not provable. Remark: it is also not provable in MLTT, Coq and Agda (even if we assume UIP). -/ return false /-- Return the `CongruencesKey` associated with an expression of the form `f a`. -/ def mkCongruencesKey (e : Expr) : CCM CongruencesKey := do let .app f a := e | failure if (← getEntry e).any Entry.fo then -- first-order case, where we do not consider all partial applications e.withApp fun fn args => do return .fo (← getRoot fn) (← args.mapM getRoot) else return .ho (← getRoot f) (← getRoot a) /-- Return the `SymmCongruencesKey` associated with the equality `lhs = rhs`. -/ def mkSymmCongruencesKey (lhs rhs : Expr) : CCM SymmCongruencesKey := do let lhs ← getRoot lhs let rhs ← getRoot rhs if hash lhs > hash rhs then return { h₁ := rhs, h₂ := lhs } else return { h₁ := lhs, h₂ := rhs } /-- Try to find a congruence theorem for an application of `fn` with `nargs` arguments, with support for `HEq`. -/ def mkCCHCongrTheorem (fn : Expr) (nargs : Nat) : CCM (Option CCCongrTheorem) := do let cache ← getCache -- Check if `{ fn, nargs }` is in the cache let key₁ : CCCongrTheoremKey := { fn, nargs } if let some it₁ := cache.findEntry? key₁ then return it₁.2 -- Try automatically generated congruence lemma with support for heterogeneous equality. let lemm ← mkCCHCongrWithArity fn nargs if let some lemm := lemm then modifyCache fun ccc => ccc.insert key₁ (some lemm) return lemm -- cache failure modifyCache fun ccc => ccc.insert key₁ none return none /-- Try to find a congruence theorem for the expression `e` with support for `HEq`. -/ def mkCCCongrTheorem (e : Expr) : CCM (Option CCCongrTheorem) := do let fn := e.getAppFn let nargs := e.getAppNumArgs mkCCHCongrTheorem fn nargs /-- Record the instance `e` and add it to the set of known defeq instances. -/ def propagateInstImplicit (e : Expr) : CCM Unit := do let type ← inferType e let type ← normalize type match (← get).instImplicitReprs.find? type with | some l => for e' in l do if ← pureIsDefEq e e' then pushReflEq e e' return modify fun ccs => { ccs with instImplicitReprs := ccs.instImplicitReprs.insert type (e :: l) } | none => modify fun ccs => { ccs with instImplicitReprs := ccs.instImplicitReprs.insert type [e] } /-- Treat the entry associated with `e` as a first-order function. -/ def setFO (e : Expr) : CCM Unit := modify fun ccs => { ccs with entries := ccs.entries.modify e fun d => { d with fo := true } } /-- Update the modification time of the congruence class of `e`. -/ partial def updateMT (e : Expr) : CCM Unit := do let r ← getRoot e let some ps := (← get).parents.find? r | return for p in ps do let some it ← getEntry p.expr | failure let gmt := (← get).gmt if it.mt < gmt then let newIt := { it with mt := gmt } modify fun ccs => { ccs with entries := ccs.entries.insert p.expr newIt } updateMT p.expr /-- Does the congruence class with root `root` have any `HEq` proofs? -/ def hasHEqProofs (root : Expr) : CCM Bool := do let some n ← getEntry root | failure guard (n.root == root) return n.heqProofs /-- Apply symmetry to `H`, which is an `Eq` or a `HEq`. * If `heqProofs` is true, ensure the result is a `HEq` (otherwise it is assumed to be `Eq`). * If `flipped` is true, apply `symm`, otherwise keep the same direction. -/ def flipProofCore (H : Expr) (flipped heqProofs : Bool) : CCM Expr := do let mut newH := H if ← liftM <| pure heqProofs <&&> Expr.isEq <$> (inferType H >>= whnf) then newH ← mkAppM ``heq_of_eq #[H] if !flipped then return newH else if heqProofs then mkHEqSymm newH else mkEqSymm newH /-- In a delayed way, apply symmetry to `H`, which is an `Eq` or a `HEq`. * If `heqProofs` is true, ensure the result is a `HEq` (otherwise it is assumed to be `Eq`). * If `flipped` is true, apply `symm`, otherwise keep the same direction. -/ def flipDelayedProofCore (H : DelayedExpr) (flipped heqProofs : Bool) : CCM DelayedExpr := do let mut newH := H if heqProofs then newH := .heqOfEq H if !flipped then return newH else if heqProofs then return .heqSymm newH else return .eqSymm newH /-- Apply symmetry to `H`, which is an `Eq` or a `HEq`. * If `heqProofs` is true, ensure the result is a `HEq` (otherwise it is assumed to be `Eq`). * If `flipped` is true, apply `symm`, otherwise keep the same direction. -/ def flipProof (H : EntryExpr) (flipped heqProofs : Bool) : CCM EntryExpr := match H with | .ofExpr H => EntryExpr.ofExpr <$> flipProofCore H flipped heqProofs | .ofDExpr H => EntryExpr.ofDExpr <$> flipDelayedProofCore H flipped heqProofs | _ => return H /-- Are `e₁` and `e₂` known to be in the same equivalence class? -/ def isEqv (e₁ e₂ : Expr) : CCM Bool := do let some n₁ ← getEntry e₁ | return false let some n₂ ← getEntry e₂ | return false return n₁.root == n₂.root /-- Is `e₁ ≠ e₂` known to be true? Note that this is stronger than `not (isEqv e₁ e₂)`: only if we can prove they are distinct this returns `true`. -/ def isNotEqv (e₁ e₂ : Expr) : CCM Bool := do let tmp ← mkEq e₁ e₂ if ← isEqv tmp (.const ``False []) then return true let tmp ← mkHEq e₁ e₂ isEqv tmp (.const ``False []) /-- Is the proposition `e` known to be true? -/ @[inline] def isEqTrue (e : Expr) : CCM Bool := isEqv e (.const ``True []) /-- Is the proposition `e` known to be false? -/ @[inline] def isEqFalse (e : Expr) : CCM Bool := isEqv e (.const ``False []) /-- Apply transitivity to `H₁` and `H₂`, which are both `Eq` or `HEq` depending on `heqProofs`. -/ def mkTrans (H₁ H₂ : Expr) (heqProofs : Bool) : MetaM Expr := if heqProofs then mkHEqTrans H₁ H₂ else mkEqTrans H₁ H₂ /-- Apply transitivity to `H₁?` and `H₂`, which are both `Eq` or `HEq` depending on `heqProofs`. If `H₁?` is `none`, return `H₂` instead. -/ def mkTransOpt (H₁? : Option Expr) (H₂ : Expr) (heqProofs : Bool) : MetaM Expr := match H₁? with | some H₁ => mkTrans H₁ H₂ heqProofs | none => pure H₂ mutual /-- Use congruence on arguments to prove `lhs = rhs`. That is, tries to prove that `lhsFn lhsArgs[0] ... lhsArgs[n-1] = lhsFn rhsArgs[0] ... rhsArgs[n-1]` by showing that `lhsArgs[i] = rhsArgs[i]` for all `i`. Fails if the head function of `lhs` is not that of `rhs`. -/ partial def mkCongrProofCore (lhs rhs : Expr) (heqProofs : Bool) : CCM Expr := do let mut lhsArgsRev : Array Expr := #[] let mut rhsArgsRev : Array Expr := #[] let mut lhsIt := lhs let mut rhsIt := rhs -- Collect the arguments to `lhs` and `rhs`. -- As an optimization, we stop collecting arguments as soon as the functions are defeq, -- so `lhsFn` and `rhsFn` might end up still of the form `(f x y z)` and `(f x' y' z')`. if lhs != rhs then repeat let .app lhsItFn lhsItArg := lhsIt | failure let .app rhsItFn rhsItArg := rhsIt | failure lhsArgsRev := lhsArgsRev.push lhsItArg rhsArgsRev := rhsArgsRev.push rhsItArg lhsIt := lhsItFn rhsIt := rhsItFn if lhsIt == rhsIt then break if ← pureIsDefEq lhsIt rhsIt then break if ← isEqv lhsIt rhsIt <&&> inferType lhsIt >>= fun i₁ => inferType rhsIt >>= fun i₂ => pureIsDefEq i₁ i₂ then break -- If we collect no arguments, the expressions themselves are defeq; return `rfl`. if lhsArgsRev.isEmpty then if heqProofs then return (← mkHEqRefl lhs) else return (← mkEqRefl lhs) let lhsArgs := lhsArgsRev.reverse let rhsArgs := rhsArgsRev.reverse -- Ensure that `lhsFn = rhsFn`, they have the same type and the same list of arguments. let PLift.up ha ← if ha : lhsArgs.size = rhsArgs.size then pure (PLift.up ha) else failure let lhsFn := lhsIt let rhsFn := rhsIt guard (← isEqv lhsFn rhsFn <||> pureIsDefEq lhsFn rhsFn) guard (← pureIsDefEq (← inferType lhsFn) (← inferType rhsFn)) /- Create `r`, a proof for `lhsFn lhsArgs[0] ... lhsArgs[n-1] = lhsFn rhsArgs[0] ... rhsArgs[n-1]` where `n := lhsArgs.size` -/ let some specLemma ← mkCCHCongrTheorem lhsFn lhsArgs.size | failure let mut kindsIt := specLemma.argKinds let mut lemmaArgs : Array Expr := #[] for hi : i in [:lhsArgs.size] do guard !kindsIt.isEmpty lemmaArgs := lemmaArgs.push (lhsArgs[i]'hi.2) |>.push (rhsArgs[i]'(ha.symm ▸ hi.2)) if kindsIt[0]! matches CongrArgKind.heq then let some p ← getHEqProof (lhsArgs[i]'hi.2) (rhsArgs[i]'(ha.symm ▸ hi.2)) | failure lemmaArgs := lemmaArgs.push p else guard (kindsIt[0]! matches .eq) let some p ← getEqProof (lhsArgs[i]'hi.2) (rhsArgs[i]'(ha.symm ▸ hi.2)) | failure lemmaArgs := lemmaArgs.push p kindsIt := kindsIt.eraseIdx 0 let mut r := mkAppN specLemma.proof lemmaArgs if specLemma.heqResult && !heqProofs then r ← mkAppM ``eq_of_heq #[r] else if !specLemma.heqResult && heqProofs then r ← mkAppM ``heq_of_eq #[r] if ← pureIsDefEq lhsFn rhsFn then return r /- Convert `r` into a proof of `lhs = rhs` using `Eq.rec` and the proof that `lhsFn = rhsFn` -/ let some lhsFnEqRhsFn ← getEqProof lhsFn rhsFn | failure let motive ← withLocalDeclD `x (← inferType lhsFn) fun x => do let motiveRhs := mkAppN x rhsArgs let motive ← if heqProofs then mkHEq lhs motiveRhs else mkEq lhs motiveRhs let hType ← mkEq lhsFn x withLocalDeclD `h hType fun h => mkLambdaFVars #[x, h] motive mkEqRec motive r lhsFnEqRhsFn /-- If `e₁ : R lhs₁ rhs₁`, `e₂ : R lhs₂ rhs₂` and `lhs₁ = rhs₂`, where `R` is a symmetric relation, prove `R lhs₁ rhs₁` is equivalent to `R lhs₂ rhs₂`. * if `lhs₁` is known to equal `lhs₂`, return `none` * if `lhs₁` is not known to equal `rhs₂`, fail. -/ partial def mkSymmCongrProof (e₁ e₂ : Expr) (heqProofs : Bool) : CCM (Option Expr) := do let some (R₁, lhs₁, rhs₁) ← e₁.relSidesIfSymm? | return none let some (R₂, lhs₂, rhs₂) ← e₂.relSidesIfSymm? | return none if R₁ != R₂ then return none if (← isEqv lhs₁ lhs₂) then return none guard (← isEqv lhs₁ rhs₂) /- We must apply symmetry. The symm congruence table is implicitly using symmetry. That is, we have `e₁ := lhs₁ ~R₁~ rhs₁` and `e2 := lhs₂ ~R₁~ rhs₂` But, `lhs₁ ~R₁~ rhs₂` and `rhs₁ ~R₁~ lhs₂` -/ /- Given `e₁ := lhs₁ ~R₁~ rhs₁`, create proof for `lhs₁ ~R₁~ rhs₁` = `rhs₁ ~R₁~ lhs₁` -/ let newE₁ ← mkRel R₁ rhs₁ lhs₁ let e₁IffNewE₁ ← withLocalDeclD `h₁ e₁ fun h₁ => withLocalDeclD `h₂ newE₁ fun h₂ => do mkAppM ``Iff.intro #[← mkLambdaFVars #[h₁] (← h₁.applySymm), ← mkLambdaFVars #[h₂] (← h₂.applySymm)] let mut e₁EqNewE₁ := mkApp3 (.const ``propext []) e₁ newE₁ e₁IffNewE₁ let newE₁EqE₂ ← mkCongrProofCore newE₁ e₂ heqProofs if heqProofs then e₁EqNewE₁ ← mkAppM ``heq_of_eq #[e₁EqNewE₁] return some (← mkTrans e₁EqNewE₁ newE₁EqE₂ heqProofs) /-- Use congruence on arguments to prove `e₁ = e₂`. Special case: if `e₁` and `e₂` have the form `R lhs₁ rhs₁` and `R lhs₂ rhs₂` such that `R` is symmetric and `lhs₁ = rhs₂`, then use those facts instead. -/ partial def mkCongrProof (e₁ e₂ : Expr) (heqProofs : Bool) : CCM Expr := do if let some r ← mkSymmCongrProof e₁ e₂ heqProofs then return r else mkCongrProofCore e₁ e₂ heqProofs /-- Turn a delayed proof into an actual proof term. -/ partial def mkDelayedProof (H : DelayedExpr) : CCM Expr := do match H with | .ofExpr H => return H | .eqProof lhs rhs => liftOption (← getEqProof lhs rhs) | .congrArg f h => mkCongrArg f (← mkDelayedProof h) | .congrFun h a => mkCongrFun (← mkDelayedProof h) (← liftOption a.toExpr) | .eqSymm h => mkEqSymm (← mkDelayedProof h) | .eqSymmOpt a₁ a₂ h => mkAppOptM ``Eq.symm #[none, ← liftOption a₁.toExpr, ← liftOption a₂.toExpr, ← mkDelayedProof h] | .eqTrans h₁ h₂ => mkEqTrans (← mkDelayedProof h₁) (← mkDelayedProof h₂) | .eqTransOpt a₁ a₂ a₃ h₁ h₂ => mkAppOptM ``Eq.trans #[none, ← liftOption a₁.toExpr, ← liftOption a₂.toExpr, ← liftOption a₃.toExpr, ← mkDelayedProof h₁, ← mkDelayedProof h₂] | .heqOfEq h => mkAppM ``heq_of_eq #[← mkDelayedProof h] | .heqSymm h => mkHEqSymm (← mkDelayedProof h) /-- Use the format of `H` to try and construct a proof or `lhs = rhs`: * If `H = .congr`, then use congruence. * If `H = .eqTrue`, try to prove `lhs = True` or `rhs = True`, if they have the format `R a b`, by proving `a = b`. * Otherwise, return the (delayed) proof encoded by `H` itself. -/ partial def mkProof (lhs rhs : Expr) (H : EntryExpr) (heqProofs : Bool) : CCM Expr := do match H with | .congr => mkCongrProof lhs rhs heqProofs | .eqTrue => let (flip, some (R, a, b)) ← if lhs == .const ``True [] then ((true, ·)) <$> rhs.relSidesIfRefl? else ((false, ·)) <$> lhs.relSidesIfRefl? | failure let aRb ← if R == ``Eq then getEqProof a b >>= liftOption else if R == ``HEq then getHEqProof a b >>= liftOption else -- TODO(Leo): the following code assumes R is homogeneous. -- We should add support arbitrary heterogenous reflexive relations. getEqProof a b >>= liftOption >>= fun aEqb => liftM (liftFromEq R aEqb) let aRbEqTrue ← mkEqTrue aRb if flip then mkEqSymm aRbEqTrue else return aRbEqTrue | .refl => let type ← if heqProofs then mkHEq lhs rhs else mkEq lhs rhs let proof ← if heqProofs then mkHEqRefl lhs else mkEqRefl lhs mkExpectedTypeHint proof type | .ofExpr H => return H | .ofDExpr H => mkDelayedProof H /-- If `asHEq` is `true`, then build a proof for `HEq e₁ e₂`. Otherwise, build a proof for `e₁ = e₂`. The result is `none` if `e₁` and `e₂` are not in the same equivalence class. -/ partial def getEqProofCore (e₁ e₂ : Expr) (asHEq : Bool) : CCM (Option Expr) := do if e₁.hasExprMVar || e₂.hasExprMVar then return none if ← pureIsDefEq e₁ e₂ then if asHEq then return some (← mkHEqRefl e₁) else return some (← mkEqRefl e₁) let some n₁ ← getEntry e₁ | return none let some n₂ ← getEntry e₂ | return none if n₁.root != n₂.root then return none let heqProofs ← hasHEqProofs n₁.root -- 1. Retrieve "path" from `e₁` to `root` let mut path₁ : Array Expr := #[] let mut Hs₁ : Array EntryExpr := #[] let mut visited : RBExprSet := ∅ let mut it₁ := e₁ repeat visited := visited.insert it₁ let some it₁N ← getEntry it₁ | failure let some t := it₁N.target | break path₁ := path₁.push t let some p := it₁N.proof | failure Hs₁ := Hs₁.push (← flipProof p it₁N.flipped heqProofs) it₁ := t guard (it₁ == n₁.root) -- 2. The path from `e₂` to root must have at least one element `c` in visited -- Retrieve "path" from `e₂` to `c` let mut path₂ : Array Expr := #[] let mut Hs₂ : Array EntryExpr := #[] let mut it₂ := e₂ repeat if visited.contains it₂ then break -- found common let some it₂N ← getEntry it₂ | failure let some t := it₂N.target | failure path₂ := path₂.push it₂ let some p := it₂N.proof | failure Hs₂ := Hs₂.push (← flipProof p (!it₂N.flipped) heqProofs) it₂ := t -- `it₂` is the common element... -- 3. Shrink `path₁`/`Hs₁` until we find `it₂` (the common element) repeat if path₁.isEmpty then guard (it₂ == e₁) break if path₁.back == it₂ then -- found it! break path₁ := path₁.pop Hs₁ := Hs₁.pop -- 4. Build transitivity proof let mut pr? : Option Expr := none let mut lhs := e₁ for i in [:path₁.size] do pr? ← some <$> mkTransOpt pr? (← mkProof lhs path₁[i]! Hs₁[i]! heqProofs) heqProofs lhs := path₁[i]! let mut i := Hs₂.size while i > 0 do i := i - 1 pr? ← some <$> mkTransOpt pr? (← mkProof lhs path₂[i]! Hs₂[i]! heqProofs) heqProofs lhs := path₂[i]! let mut some pr := pr? | failure if heqProofs && !asHEq then pr ← mkAppM ``eq_of_heq #[pr] else if !heqProofs && asHEq then pr ← mkAppM ``heq_of_eq #[pr] return pr /-- Build a proof for `e₁ = e₂`. The result is `none` if `e₁` and `e₂` are not in the same equivalence class. -/ @[inline] partial def getEqProof (e₁ e₂ : Expr) : CCM (Option Expr) := getEqProofCore e₁ e₂ false /-- Build a proof for `HEq e₁ e₂`. The result is `none` if `e₁` and `e₂` are not in the same equivalence class. -/ @[inline] partial def getHEqProof (e₁ e₂ : Expr) : CCM (Option Expr) := getEqProofCore e₁ e₂ true end /-- Build a proof for `e = True`. Fails if `e` is not known to be true. -/ def getEqTrueProof (e : Expr) : CCM Expr := do guard (← isEqTrue e) let some p ← getEqProof e (.const ``True []) | failure return p /-- Build a proof for `e = False`. Fails if `e` is not known to be false. -/ def getEqFalseProof (e : Expr) : CCM Expr := do guard (← isEqFalse e) let some p ← getEqProof e (.const ``False []) | failure return p /-- Build a proof for `a = b`. Fails if `a` and `b` are not known to be equal. -/ def getPropEqProof (a b : Expr) : CCM Expr := do guard (← isEqv a b) let some p ← getEqProof a b | failure return p /-- Build a proof of `False` if the context is inconsistent. Returns `none` if `False` is not known to be true. -/ def getInconsistencyProof : CCM (Option Expr) := do guard !(← get).frozePartitions if let some p ← getEqProof (.const ``True []) (.const ``False []) then return some (← mkAppM ``false_of_true_eq_false #[p]) else return none /-- Auxiliary function for comparing `lhs₁ ~ rhs₁` and `lhs₂ ~ rhs₂`, when `~` is symmetric/commutative. It returns `true` (equal) for `a ~ b` `b ~ a`-/ def compareSymmAux (lhs₁ rhs₁ lhs₂ rhs₂ : Expr) : CCM Bool := do let lhs₁ ← getRoot lhs₁ let rhs₁ ← getRoot rhs₁ let lhs₂ ← getRoot lhs₂ let rhs₂ ← getRoot rhs₂ let (lhs₁, rhs₁) := if rhs₁.lt lhs₁ then (rhs₁, lhs₁) else (lhs₁, rhs₁) let (lhs₂, rhs₂) := if rhs₂.lt lhs₂ then (rhs₂, lhs₂) else (lhs₂, rhs₂) return lhs₁ == lhs₂ && rhs₁ == rhs₂ /-- Given ``k₁ := (R₁ lhs₁ rhs₁, `R₁)`` and ``k₂ := (R₂ lhs₂ rhs₂, `R₂)``, return `true` if `R₁ lhs₁ rhs₁` is equivalent to `R₂ lhs₂ rhs₂` modulo the symmetry of `R₁` and `R₂`. -/ def compareSymm : (k₁ k₂ : Expr × Name) → CCM Bool | (e₁, n₁), (e₂, n₂) => do if n₁ != n₂ then return false if n₁ == ``Eq || n₁ == ``Iff then compareSymmAux e₁.appFn!.appArg! e₁.appArg! e₂.appFn!.appArg! e₂.appArg! else let some (_, lhs₁, rhs₁) ← e₁.relSidesIfSymm? | failure let some (_, lhs₂, rhs₂) ← e₂.relSidesIfSymm? | failure compareSymmAux lhs₁ rhs₁ lhs₂ rhs₂ /-- Given `e := R lhs rhs`, if `R` is a reflexive relation and `lhs` is equivalent to `rhs`, add equality `e = True`. -/ def checkEqTrue (e : Expr) : CCM Unit := do let some (_, lhs, rhs) ← e.relSidesIfRefl? | return if ← isEqv e (.const ``True []) then return -- it is already equivalent to `True` let lhsR ← getRoot lhs let rhsR ← getRoot rhs if lhsR != rhsR then return -- Add `e = True` pushEq e (.const ``True []) .eqTrue /-- If the congruence table (`congruences` field) has congruent expression to `e`, add the equality to the todo list. If not, add `e` to the congruence table. -/ def addCongruenceTable (e : Expr) : CCM Unit := do guard e.isApp let k ← mkCongruencesKey e if let some es := (← get).congruences.find? k then for oldE in es do if ← isCongruent e oldE then -- Found new equivalence: `e ~ oldE` -- 1. Update `cgRoot` field for `e` let some currEntry ← getEntry e | failure let newEntry := { currEntry with cgRoot := oldE } modify fun ccs => { ccs with entries := ccs.entries.insert e newEntry } -- 2. Put new equivalence in the todo queue -- TODO(Leo): check if the following line is a bottleneck let heqProof ← (!·) <$> pureIsDefEq (← inferType e) (← inferType oldE) pushTodo e oldE .congr heqProof return modify fun ccs => { ccs with congruences := ccs.congruences.insert k (e :: es) } else modify fun ccs => { ccs with congruences := ccs.congruences.insert k [e] } /-- If the symm congruence table (`symmCongruences` field) has congruent expression to `e`, add the equality to the todo list. If not, add `e` to the symm congruence table. -/ def addSymmCongruenceTable (e : Expr) : CCM Unit := do let some (rel, lhs, rhs) ← e.relSidesIfSymm? | failure let k ← mkSymmCongruencesKey lhs rhs let newP := (e, rel) if let some ps := (← get).symmCongruences.find? k then for p in ps do if ← compareSymm newP p then -- Found new equivalence: `e ~ p.1` -- 1. Update `cgRoot` field for `e` let some currEntry ← getEntry e | failure let newEntry := { currEntry with cgRoot := p.1 } modify fun ccs => { ccs with entries := ccs.entries.insert e newEntry } -- 2. Put new equivalence in the TODO queue -- NOTE(gabriel): support for symmetric relations is pretty much broken, -- since it ignores all arguments except the last two ones. -- e.g. this would claim that `ModEq n a b` and `ModEq m a b` are equivalent. -- Whitelist some relations to contain breakage: if rel == ``Eq || e.getAppNumArgs == 2 then pushEq e p.1 .congr checkEqTrue e return modify fun ccs => { ccs with symmCongruences := ccs.symmCongruences.insert k (newP :: ps) } checkEqTrue e else modify fun ccs => { ccs with symmCongruences := ccs.symmCongruences.insert k [newP] } checkEqTrue e /-- Given subsingleton elements `a` and `b` which are not necessarily of the same type, if the types of `a` and `b` are equivalent, add the (heterogeneous) equality proof between `a` and `b` to the todo list. -/ def pushSubsingletonEq (a b : Expr) : CCM Unit := do -- Remark: we must normalize here because we have to do so before -- internalizing the types of `a` and `b`. let A ← normalize (← inferType a) let B ← normalize (← inferType b) -- TODO(Leo): check if the following test is a performance bottleneck if ← pureIsDefEq A B then -- TODO(Leo): to improve performance we can create the following proof lazily let proof ← mkAppM ``Subsingleton.elim #[a, b] pushEq a b proof else let some AEqB ← getEqProof A B | failure let proof ← mkAppM ``Subsingleton.helim #[AEqB, a, b] pushHEq a b proof /-- Given the equivalent expressions `oldRoot` and `newRoot` the root of `oldRoot` is `newRoot`, if `oldRoot` has root representative of subsingletons, try to push the equality proof between their root representatives to the todo list, or update the root representative to `newRoot`. -/ def checkNewSubsingletonEq (oldRoot newRoot : Expr) : CCM Unit := do guard (← isEqv oldRoot newRoot) guard ((← getRoot oldRoot) == newRoot) let some it₁ := (← get).subsingletonReprs.find? oldRoot | return if let some it₂ := (← get).subsingletonReprs.find? newRoot then pushSubsingletonEq it₁ it₂ else modify fun ccs => { ccs with subsingletonReprs := ccs.subsingletonReprs.insert newRoot it₁ } /-- Get all lambda expressions in the equivalence class of `e` and append to `r`. `e` must be the root of its equivalence class. -/ def getEqcLambdas (e : Expr) (r : Array Expr := #[]) : CCM (Array Expr) := do guard ((← getRoot e) == e) let mut r := r let some ee ← getEntry e | failure unless ee.hasLambdas do return r let mut it := e repeat if it.isLambda then r := r.push it let some itN ← getEntry it | failure it := itN.next until it == e return r /-- Remove `fn` and expressions whose type isn't def-eq to `fn`'s type out from `lambdas`, return the remaining lambdas applied to the reversed arguments. -/ def propagateBeta (fn : Expr) (revArgs : Array Expr) (lambdas : Array Expr) (newLambdaApps : Array Expr := #[]) : CCM (Array Expr) := do let mut newLambdaApps := newLambdaApps for lambda in lambdas do guard lambda.isLambda if fn != lambda then if ← pureIsDefEq (← inferType fn) (← inferType lambda) then let newApp := mkAppRev lambda revArgs newLambdaApps := newLambdaApps.push newApp return newLambdaApps /-- Given `a`, `a₁` and `a₁NeB : a₁ ≠ b`, return a proof of `a ≠ b` if `a` and `a₁` are in the same equivalence class. -/ def mkNeOfEqOfNe (a a₁ a₁NeB : Expr) : CCM (Option Expr) := do guard (← isEqv a a₁) if a == a₁ then return some a₁NeB let aEqA₁ ← getEqProof a a₁ match aEqA₁ with | none => return none -- failed to build proof | some aEqA₁ => mkAppM ``ne_of_eq_of_ne #[aEqA₁, a₁NeB] /-- Given `aNeB₁ : a ≠ b₁`, `b₁` and `b`, return a proof of `a ≠ b` if `b` and `b₁` are in the same equivalence class. -/ def mkNeOfNeOfEq (aNeB₁ b₁ b : Expr) : CCM (Option Expr) := do guard (← isEqv b b₁) if b == b₁ then return some aNeB₁ let b₁EqB ← getEqProof b b₁ match b₁EqB with | none => return none -- failed to build proof | some b₁EqB => mkAppM ``ne_of_ne_of_eq #[aNeB₁, b₁EqB] /-- If `e` is of the form `op e₁ e₂` where `op` is an associative and commutative binary operator, return the canonical form of `op`. -/ def isAC (e : Expr) : CCM (Option Expr) := do let .app (.app op _) _ := e | return none let ccs ← get if let some cop := ccs.canOps.find? op then let some b := ccs.opInfo.find? cop | throwError "opInfo should contain all canonical operators in canOps" return bif b then some cop else none for (cop, b) in ccs.opInfo do if ← pureIsDefEq op cop then modify fun _ => { ccs with canOps := ccs.canOps.insert op cop } return bif b then some cop else none let b ← try let aop ← mkAppM ``Std.Associative #[op] let some _ ← synthInstance? aop | failure let cop ← mkAppM ``Std.Commutative #[op] let some _ ← synthInstance? cop | failure pure true catch _ => pure false modify fun _ => { ccs with canOps := ccs.canOps.insert op op opInfo := ccs.opInfo.insert op b } return bif b then some op else none open MessageData in /-- Given `lhs`, `rhs`, and `header := "my header:"`, Trace `my header: lhs = rhs`. -/ def dbgTraceACEq (header : String) (lhs rhs : ACApps) : CCM Unit := do let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group (ofFormat (header ++ .line) ++ ccs.ppACApps lhs ++ ofFormat (.line ++ "=" ++ .line) ++ ccs.ppACApps rhs) open MessageData in /-- Trace the state of AC module. -/ def dbgTraceACState : CCM Unit := do let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group ("state: " ++ nest 6 ccs.ppAC) /-- Return the proof of `e₁ = e₂` using `ac_rfl` tactic. -/ def mkACProof (e₁ e₂ : Expr) : MetaM Expr := do let eq ← mkEq e₁ e₂ let .mvar m ← mkFreshExprSyntheticOpaqueMVar eq | failure AC.rewriteUnnormalized m let pr ← instantiateMVars (.mvar m) mkExpectedTypeHint pr eq /-- Given `tr := t*r` `sr := s*r` `tEqs : t = s`, return a proof for `tr = sr` We use `a*b` to denote an AC application. That is, `(a*b)*(c*a)` is the term `a*a*b*c`. -/ def mkACSimpProof (tr t s r sr : ACApps) (tEqs : DelayedExpr) : MetaM DelayedExpr := do if tr == t then return tEqs else if tr == sr then let some tre := tr.toExpr | failure DelayedExpr.ofExpr <$> mkEqRefl tre else let .apps op _ := tr | failure let some re := r.toExpr | failure let some te := t.toExpr | failure let some se := s.toExpr | failure let some tre := tr.toExpr | failure let some sre := sr.toExpr | failure let opr := op.app re -- `(*) r` let rt := mkApp2 op re te -- `r * t` let rs := mkApp2 op re se -- `r * s` let rtEqrs := DelayedExpr.congrArg opr tEqs let trEqrt ← mkACProof tre rt let rsEqsr ← mkACProof rs sre return .eqTrans (.eqTrans trEqrt rtEqrs) rsEqsr /-- Given `ra := a*r` `sb := b*s` `ts := t*s` `tr := t*r` `tsEqa : t*s = a` `trEqb : t*r = b`, return a proof for `ra = sb`. We use `a*b` to denote an AC application. That is, `(a*b)*(c*a)` is the term `a*a*b*c`. -/ def mkACSuperposeProof (ra sb a b r s ts tr : ACApps) (tsEqa trEqb : DelayedExpr) : MetaM DelayedExpr := do let .apps _ _ := tr | failure let .apps op _ := ts | failure let some tse := ts.toExpr | failure let some re := r.toExpr | failure let some tre := tr.toExpr | failure let some se := s.toExpr | failure let some ae := a.toExpr | failure let some be := b.toExpr | failure let some rae := ra.toExpr | failure let some sbe := sb.toExpr | failure let tsrEqar := DelayedExpr.congrFun (.congrArg op tsEqa) r -- `(t * s) * r = a * r` let trsEqbs := DelayedExpr.congrFun (.congrArg op trEqb) s -- `(t * r) * s = b * s` let tsr := mkApp2 op tse re -- `(t * s) * r` let trs := mkApp2 op tre se -- `(t * r) * s` let ar := mkApp2 op ae re -- `a * r` let bs := mkApp2 op be se -- `b * r` let tsrEqtrs ← mkACProof tsr trs -- `(t * s) * r = (t * r) * s` let raEqar ← mkACProof rae ar -- `r * a = a * r` let bsEqsb ← mkACProof bs sbe -- `b * s = s * b` return .eqTrans raEqar (.eqTrans (.eqSymm tsrEqar) (.eqTrans tsrEqtrs (.eqTrans trsEqbs bsEqsb))) /-- Given `e := lhs * r` and `H : lhs = rhs`, return `rhs * r` and the proof of `e = rhs * r`. -/ def simplifyACCore (e lhs rhs : ACApps) (H : DelayedExpr) : CCM (ACApps × DelayedExpr) := do guard (lhs.isSubset e) if e == lhs then return (rhs, H) else let .apps op _ := e | failure let newArgs := e.diff lhs let r : ACApps := if newArgs.isEmpty then default else .mkApps op newArgs let newArgs := ACApps.append op rhs newArgs let newE := ACApps.mkApps op newArgs let some true := (← get).opInfo.find? op | failure let newPr ← mkACSimpProof e lhs rhs r newE H return (newE, newPr) /-- The single step of `simplifyAC`. Simplifies an expression `e` by either simplifying one argument to the AC operator, or the whole expression. -/ def simplifyACStep (e : ACApps) : CCM (Option (ACApps × DelayedExpr)) := do if let .apps _ args := e then for h : i in [:args.size] do if i == 0 || (args[i]'h.2) != (args[i - 1]'(Nat.lt_of_le_of_lt (i.sub_le 1) h.2)) then let some ae := (← get).acEntries.find? (args[i]'h.2) | failure let occs := ae.RLHSOccs let mut Rlhs? : Option ACApps := none for Rlhs in occs do if Rlhs.isSubset e then Rlhs? := some Rlhs break if let some Rlhs := Rlhs? then let some (Rrhs, H) := (← get).acR.find? Rlhs | failure return (some <| ← simplifyACCore e Rlhs Rrhs H) else if let some p := (← get).acR.find? e then return some p return none /-- If `e` can be simplified by the AC module, return the simplified term and the proof term of the equality. -/ def simplifyAC (e : ACApps) : CCM (Option (ACApps × DelayedExpr)) := do let mut some (curr, pr) ← simplifyACStep e | return none repeat let some (newCurr, newPr) ← simplifyACStep curr | break pr := .eqTransOpt e curr newCurr pr newPr curr := newCurr return some (curr, pr) /-- Insert or erase `lhs` to the occurrences of `arg` on an equality in `acR`. -/ def insertEraseROcc (arg : Expr) (lhs : ACApps) (inLHS isInsert : Bool) : CCM Unit := do let some entry := (← get).acEntries.find? arg | failure let occs := entry.ROccs inLHS let newOccs := if isInsert then occs.insert lhs else occs.erase (compare lhs) let newEntry := if inLHS then { entry with RLHSOccs := newOccs } else { entry with RRHSOccs := newOccs } modify fun ccs => { ccs with acEntries := ccs.acEntries.insert arg newEntry } /-- Insert or erase `lhs` to the occurrences of arguments of `e` on an equality in `acR`. -/ def insertEraseROccs (e lhs : ACApps) (inLHS isInsert : Bool) : CCM Unit := do match e with | .apps _ args => insertEraseROcc args[0]! lhs inLHS isInsert for i in [1:args.size] do if args[i]! != args[i - 1]! then insertEraseROcc args[i]! lhs inLHS isInsert | .ofExpr e => insertEraseROcc e lhs inLHS isInsert /-- Insert `lhs` to the occurrences of arguments of `e` on an equality in `acR`. -/ @[inline] def insertROccs (e lhs : ACApps) (inLHS : Bool) : CCM Unit := insertEraseROccs e lhs inLHS true /-- Erase `lhs` to the occurrences of arguments of `e` on an equality in `acR`. -/ @[inline] def eraseROccs (e lhs : ACApps) (inLHS : Bool) : CCM Unit := insertEraseROccs e lhs inLHS false /-- Insert `lhs` to the occurrences on an equality in `acR` corresponding to the equality `lhs := rhs`. -/ @[inline] def insertRBHSOccs (lhs rhs : ACApps) : CCM Unit := do insertROccs lhs lhs true insertROccs rhs lhs false /-- Erase `lhs` to the occurrences on an equality in `acR` corresponding to the equality `lhs := rhs`. -/ @[inline] def eraseRBHSOccs (lhs rhs : ACApps) : CCM Unit := do eraseROccs lhs lhs true eraseROccs rhs lhs false /-- Insert `lhs` to the occurrences of arguments of `e` on the right hand side of an equality in `acR`. -/ @[inline] def insertRRHSOccs (e lhs : ACApps) : CCM Unit := insertROccs e lhs false /-- Erase `lhs` to the occurrences of arguments of `e` on the right hand side of an equality in `acR`. -/ @[inline] def eraseRRHSOccs (e lhs : ACApps) : CCM Unit := eraseROccs e lhs false open MessageData in /-- Try to simplify the right hand sides of equalities in `acR` by `H : lhs = rhs`. -/ def composeAC (lhs rhs : ACApps) (H : DelayedExpr) : CCM Unit := do let some x := (← get).getVarWithLeastRHSOccs lhs | failure let some ent := (← get).acEntries.find? x | failure let occs := ent.RRHSOccs for Rlhs in occs do let some (Rrhs, RH) := (← get).acR.find? Rlhs | failure if lhs.isSubset Rrhs then let (newRrhs, RrhsEqNewRrhs) ← simplifyACCore Rrhs lhs rhs H let newRH := DelayedExpr.eqTransOpt Rlhs Rrhs newRrhs RH RrhsEqNewRrhs modify fun ccs => { ccs with acR := ccs.acR.insert Rlhs (newRrhs, newRH) } eraseRRHSOccs Rrhs Rlhs insertRRHSOccs newRrhs Rlhs let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group <| let oldRw := paren (ccs.ppACApps Rlhs ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps Rrhs) let newRw := paren (ccs.ppACApps lhs ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps rhs) "compose: " ++ nest 9 (group (oldRw ++ ofFormat (Format.line ++ "with" ++ .line) ++ newRw) ++ ofFormat (Format.line ++ ":=" ++ .line) ++ ccs.ppACApps newRrhs) open MessageData in /-- Try to simplify the left hand sides of equalities in `acR` by `H : lhs = rhs`. -/ def collapseAC (lhs rhs : ACApps) (H : DelayedExpr) : CCM Unit := do let some x := (← get).getVarWithLeastLHSOccs lhs | failure let some ent := (← get).acEntries.find? x | failure let occs := ent.RLHSOccs for Rlhs in occs do if lhs.isSubset Rlhs then let some (Rrhs, RH) := (← get).acR.find? Rlhs | failure eraseRBHSOccs Rlhs Rrhs modify fun ccs => { ccs with acR := ccs.acR.erase Rlhs } let (newRlhs, RlhsEqNewRlhs) ← simplifyACCore Rlhs lhs rhs H let newRlhsEqRlhs := DelayedExpr.eqSymmOpt Rlhs newRlhs RlhsEqNewRlhs let newRH := DelayedExpr.eqTransOpt newRlhs Rlhs Rrhs newRlhsEqRlhs RH modifyACTodo fun todo => todo.push (newRlhs, Rrhs, newRH) let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group <| let newRw := paren (ccs.ppACApps lhs ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps rhs) let oldRw := paren (ccs.ppACApps Rrhs ++ ofFormat (Format.line ++ "<--" ++ .line) ++ ccs.ppACApps Rlhs) "collapse: " ++ nest 10 (group (newRw ++ ofFormat (Format.line ++ "at" ++ .line) ++ oldRw) ++ ofFormat (Format.line ++ ":=" ++ .line) ++ ccs.ppACApps newRlhs) open MessageData in /-- Given `tsEqa : ts = a`, for each equality `trEqb : tr = b` in `acR` where the intersection `t` of `ts` and `tr` is nonempty, let `ts = t*s` and `tr := t*r`, add a new equality `r*a = s*b`. -/ def superposeAC (ts a : ACApps) (tsEqa : DelayedExpr) : CCM Unit := do let .apps op args := ts | return for hi : i in [:args.size] do if i == 0 || (args[i]'hi.2) != (args[i - 1]'(Nat.lt_of_le_of_lt (i.sub_le 1) hi.2)) then let some ent := (← get).acEntries.find? (args[i]'hi.2) | failure let occs := ent.RLHSOccs for tr in occs do let .apps optr _ := tr | continue unless optr == op do continue let some (b, trEqb) := (← get).acR.find? tr | failure let tArgs := ts.intersection tr guard !tArgs.isEmpty let t := ACApps.mkApps op tArgs let sArgs := ts.diff t guard !sArgs.isEmpty let rArgs := tr.diff t guard !rArgs.isEmpty let s := ACApps.mkApps op sArgs let r := ACApps.mkApps op rArgs let ra := ACApps.mkFlatApps op r a let sb := ACApps.mkFlatApps op s b let some true := (← get).opInfo.find? op | failure let raEqsb ← mkACSuperposeProof ra sb a b r s ts tr tsEqa trEqb modifyACTodo fun todo => todo.push (ra, sb, raEqsb) let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group <| let rw₁ := paren (ccs.ppACApps ts ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps a) let rw₂ := paren (ccs.ppACApps tr ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps b) let eq := paren (ccs.ppACApps ra ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps sb) "superpose: " ++ nest 11 (group (rw₁ ++ ofFormat (Format.line ++ "with" ++ .line) ++ rw₂) ++ ofFormat (Format.line ++ ":=" ++ .line) ++ eq) open MessageData in /-- Process the tasks in the `acTodo` field. -/ def processAC : CCM Unit := do repeat let acTodo ← getACTodo let mut some (lhs, rhs, H) := acTodo.back? | break modifyACTodo fun _ => acTodo.pop let lhs₀ := lhs let rhs₀ := rhs dbgTraceACEq "process eq:" lhs rhs -- Forward simplification lhs/rhs if let some p ← simplifyAC lhs then H := .eqTransOpt p.1 lhs rhs (.eqSymmOpt lhs p.1 p.2) H lhs := p.1 if let some p ← simplifyAC rhs then H := .eqTransOpt lhs rhs p.1 H p.2 rhs := p.1 if lhs != lhs₀ || rhs != rhs₀ then dbgTraceACEq "after simp:" lhs rhs -- Skip propagation if the equality is trivial. if lhs == rhs then trace[Debug.Meta.Tactic.cc.ac] "trivial" continue -- Propagate new equality to congruence closure module if let .ofExpr lhse := lhs then if let .ofExpr rhse := rhs then if (← getRoot lhse) != (← getRoot rhse) then pushEq lhse rhse (.ofDExpr H) -- Orient if compare lhs rhs == .lt then H := .eqSymmOpt lhs rhs H (lhs, rhs) := (rhs, lhs) -- Backward simplification composeAC lhs rhs H collapseAC lhs rhs H -- Superposition superposeAC lhs rhs H -- Update acR modify fun ccs => { ccs with acR := ccs.acR.insert lhs (rhs, H) } insertRBHSOccs lhs rhs let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group <| "new rw: " ++ group (ccs.ppACApps lhs ++ ofFormat (Format.line ++ "-->" ++ .line) ++ ccs.ppACApps rhs) /-- Given AC variables `e₁` and `e₂` which are in the same equivalence class, add the proof of `e₁ = e₂` to the AC module. -/ def addACEq (e₁ e₂ : Expr) : CCM Unit := do dbgTraceACEq "cc eq:" e₁ e₂ modifyACTodo fun acTodo => acTodo.push (e₁, e₂, .eqProof e₁ e₂) processAC dbgTraceACState /-- If the root expression of `e` is AC variable, add equality to AC module. If not, register the AC variable to the root entry. -/ def setACVar (e : Expr) : CCM Unit := do let eRoot ← getRoot e let some rootEntry ← getEntry eRoot | failure if let some acVar := rootEntry.acVar then addACEq acVar e else let newRootEntry := { rootEntry with acVar := some e } modify fun ccs => { ccs with entries := ccs.entries.insert eRoot newRootEntry } /-- If `e` isn't an AC variable, set `e` as an new AC variable. -/ def internalizeACVar (e : Expr) : CCM Bool := do let ccs ← get if ccs.acEntries.contains e then return false modify fun _ => { ccs with acEntries := ccs.acEntries.insert e { idx := ccs.acEntries.size } } setACVar e return true /-- Given `e := op₁ (op₂ a₁ a₂) (op₃ a₃ a₄)` where `opₙ`s are canonicalized to `op`, internalize `aₙ`s as AC variables and return `(op (op a₁ a₂) (op a₃ a₄), args ++ #[a₁, a₂, a₃, a₄])`. -/ partial def convertAC (op e : Expr) (args : Array Expr := #[]) : CCM (Array Expr × Expr) := do if let some currOp ← isAC e then if op == currOp then let (args, arg₁) ← convertAC op e.appFn!.appArg! args let (args, arg₂) ← convertAC op e.appArg! args return (args, mkApp2 op arg₁ arg₂) let _ ← internalizeACVar e return (args.push e, e) open MessageData in /-- Internalize `e` so that the AC module can deal with the given expression. If the expression does not contain an AC operator, or the parent expression is already processed by `internalizeAC`, this operation does nothing. -/ def internalizeAC (e : Expr) (parent? : Option Expr) : CCM Unit := do let some op ← isAC e | return let parentOp? ← parent?.casesOn (pure none) isAC if parentOp?.any (· == op) then return unless (← internalizeACVar e) do return let (args, norme) ← convertAC op e let rep := ACApps.mkApps op args let some true := (← get).opInfo.find? op | failure let some repe := rep.toExpr | failure let pr ← mkACProof norme repe let ccs ← get trace[Debug.Meta.Tactic.cc.ac] group <| let d := paren (ccs.ppACApps e ++ ofFormat (" :=" ++ Format.line) ++ ofExpr e) "new term: " ++ d ++ ofFormat (Format.line ++ "===>" ++ .line) ++ ccs.ppACApps rep modifyACTodo fun todo => todo.push (e, rep, pr) processAC dbgTraceACState mutual /-- The specialized `internalizeCore` for applications or literals. -/ partial def internalizeAppLit (e : Expr) : CCM Unit := do if ← isInterpretedValue e then mkEntry e true if (← get).values then return -- we treat values as atomic symbols else mkEntry e false if (← get).values && isValue e then return -- we treat values as atomic symbols -- At this point we should have handled a literal; otherwise we fail. unless e.isApp do return if let some (_, lhs, rhs) ← e.relSidesIfSymm? then internalizeCore lhs (some e) internalizeCore rhs (some e) addOccurrence e lhs true addOccurrence e rhs true addSymmCongruenceTable e else if (← mkCCCongrTheorem e).isSome then let fn := e.getAppFn let apps := e.getAppApps guard (apps.size > 0) guard (apps.back == e) let mut pinfo : List ParamInfo := [] let state ← get if state.ignoreInstances then pinfo := (← getFunInfoNArgs fn apps.size).paramInfo.toList if state.hoFns.isSome && fn.isConst && !(state.hoFns.iget.contains fn.constName) then for h : i in [:apps.size] do let arg := (apps[i]'h.2).appArg! addOccurrence e arg false if pinfo.head?.any ParamInfo.isInstImplicit then -- We do not recurse on instances when `(← get).config.ignoreInstances` is `true`. mkEntry arg false propagateInstImplicit arg else internalizeCore arg (some e) unless pinfo.isEmpty do pinfo := pinfo.tail internalizeCore fn (some e) addOccurrence e fn false setFO e addCongruenceTable e else -- Expensive case where we store a quadratic number of occurrences, -- as described in the paper "Congruence Closure in Internsional Type Theory" for h : i in [:apps.size] do let curr := apps[i]'h.2 let .app currFn currArg := curr | unreachable! if i < apps.size - 1 then mkEntry curr false for h : j in [i:apps.size] do addOccurrence (apps[j]'h.2) currArg false addOccurrence (apps[j]'h.2) currFn false if pinfo.head?.any ParamInfo.isInstImplicit then -- We do not recurse on instances when `(← get).config.ignoreInstances` is `true`. mkEntry currArg false mkEntry currFn false propagateInstImplicit currArg else internalizeCore currArg (some e) mkEntry currFn false unless pinfo.isEmpty do pinfo := pinfo.tail addCongruenceTable curr applySimpleEqvs e /-- Internalize `e` so that the congruence closure can deal with the given expression. Don't forget to process the tasks in the `todo` field later. -/ partial def internalizeCore (e : Expr) (parent? : Option Expr) : CCM Unit := do guard !e.hasLooseBVars /- We allow metavariables after partitions have been frozen. -/ if e.hasExprMVar && !(← get).frozePartitions then return /- Check whether `e` has already been internalized. -/ if (← getEntry e).isNone then match e with | .bvar _ => unreachable! | .sort _ => pure () | .const _ _ | .mvar _ => mkEntry e false | .lam _ _ _ _ | .letE _ _ _ _ _ => mkEntry e false | .fvar f => mkEntry e false if let some v ← f.getValue? then pushReflEq e v | .mdata _ e' => mkEntry e false internalizeCore e' e addOccurrence e e' false pushReflEq e e' | .forallE _ t b _ => if e.isArrow then if ← isProp t <&&> isProp b then internalizeCore t e internalizeCore b e addOccurrence e t false addOccurrence e b false propagateImpUp e if ← isProp e then mkEntry e false | .app _ _ | .lit _ => internalizeAppLit e | .proj sn i pe => mkEntry e false let some fn := (getStructureFields (← getEnv) sn)[i]? | failure let e' ← pe.mkDirectProjection fn internalizeAppLit e' pushReflEq e e' /- Remark: if should invoke `internalizeAC` even if the test `(← getEntry e).isNone` above failed. Reason, the first time `e` was visited, it may have been visited with a different parent. -/ if (← get).ac then internalizeAC e parent? /-- Propagate equality from `a` and `b` to `a ↔ b`. -/ partial def propagateIffUp (e : Expr) : CCM Unit := do let some (a, b) := e.iff? | failure if ← isEqTrue a then -- `a = True → (Iff a b) = b` pushEq e b (mkApp3 (.const ``iff_eq_of_eq_true_left []) a b (← getEqTrueProof a)) else if ← isEqTrue b then -- `b = True → (Iff a b) = a` pushEq e a (mkApp3 (.const ``iff_eq_of_eq_true_right []) a b (← getEqTrueProof b)) else if ← isEqv a b then -- `a = b → (Iff a b) = True` pushEq e (.const ``True []) (mkApp3 (.const ``iff_eq_true_of_eq []) a b (← getPropEqProof a b)) /-- Propagate equality from `a` and `b` to `a ∧ b`. -/ partial def propagateAndUp (e : Expr) : CCM Unit := do let some (a, b) := e.and? | failure if ← isEqTrue a then -- `a = True → (And a b) = b` pushEq e b (mkApp3 (.const ``and_eq_of_eq_true_left []) a b (← getEqTrueProof a)) else if ← isEqTrue b then -- `b = True → (And a b) = a` pushEq e a (mkApp3 (.const ``and_eq_of_eq_true_right []) a b (← getEqTrueProof b)) else if ← isEqFalse a then -- `a = False → (And a b) = False` pushEq e (.const ``False []) (mkApp3 (.const ``and_eq_of_eq_false_left []) a b (← getEqFalseProof a)) else if ← isEqFalse b then -- `b = False → (And a b) = False` pushEq e (.const ``False []) (mkApp3 (.const ``and_eq_of_eq_false_right []) a b (← getEqFalseProof b)) else if ← isEqv a b then -- `a = b → (And a b) = a` pushEq e a (mkApp3 (.const ``and_eq_of_eq []) a b (← getPropEqProof a b)) -- We may also add `a = Not b -> (And a b) = False` /-- Propagate equality from `a` and `b` to `a ∨ b`. -/ partial def propagateOrUp (e : Expr) : CCM Unit := do let some (a, b) := e.app2? ``Or | failure if ← isEqTrue a then -- `a = True → (Or a b) = True` pushEq e (.const ``True []) (mkApp3 (.const ``or_eq_of_eq_true_left []) a b (← getEqTrueProof a)) else if ← isEqTrue b then -- `b = True → (Or a b) = True` pushEq e (.const ``True []) (mkApp3 (.const ``or_eq_of_eq_true_right []) a b (← getEqTrueProof b)) else if ← isEqFalse a then -- `a = False → (Or a b) = b` pushEq e b (mkApp3 (.const ``or_eq_of_eq_false_left []) a b (← getEqFalseProof a)) else if ← isEqFalse b then -- `b = False → (Or a b) = a` pushEq e a (mkApp3 (.const ``or_eq_of_eq_false_right []) a b (← getEqFalseProof b)) else if ← isEqv a b then -- `a = b → (Or a b) = a` pushEq e a (mkApp3 (.const ``or_eq_of_eq []) a b (← getPropEqProof a b)) -- We may also add `a = Not b -> (Or a b) = True` /-- Propagate equality from `a` to `¬a`. -/ partial def propagateNotUp (e : Expr) : CCM Unit := do let some a := e.not? | failure if ← isEqTrue a then -- `a = True → Not a = False` pushEq e (.const ``False []) (mkApp2 (.const ``not_eq_of_eq_true []) a (← getEqTrueProof a)) else if ← isEqFalse a then -- `a = False → Not a = True` pushEq e (.const ``True []) (mkApp2 (.const ``not_eq_of_eq_false []) a (← getEqFalseProof a)) else if ← isEqv a e then let falsePr := mkApp2 (.const ``false_of_a_eq_not_a []) a (← getPropEqProof a e) let H := Expr.app (.const ``true_eq_false_of_false []) falsePr pushEq (.const ``True []) (.const ``False []) H /-- Propagate equality from `a` and `b` to `a → b`. -/ partial def propagateImpUp (e : Expr) : CCM Unit := do guard e.isArrow let .forallE _ a b _ := e | unreachable! if ← isEqTrue a then -- `a = True → (a → b) = b` pushEq e b (mkApp3 (.const ``imp_eq_of_eq_true_left []) a b (← getEqTrueProof a)) else if ← isEqFalse a then -- `a = False → (a → b) = True` pushEq e (.const ``True []) (mkApp3 (.const ``imp_eq_of_eq_false_left []) a b (← getEqFalseProof a)) else if ← isEqTrue b then -- `b = True → (a → b) = True` pushEq e (.const ``True []) (mkApp3 (.const ``imp_eq_of_eq_true_right []) a b (← getEqTrueProof b)) else if ← isEqFalse b then let isNot : Expr → Bool × Expr | .app (.const ``Not []) a => (true, a) | .forallE _ a (.const ``False []) _ => (true, a) | e => (false, e) if let (true, arg) := isNot a then if (← get).em then -- `b = False → (Not a → b) = a` pushEq e arg (mkApp3 (.const ``not_imp_eq_of_eq_false_right []) arg b (← getEqFalseProof b)) else -- `b = False → (a → b) = Not a` let notA := mkApp (.const ``Not []) a internalizeCore notA none pushEq e notA (mkApp3 (.const ``imp_eq_of_eq_false_right []) a b (← getEqFalseProof b)) else if ← isEqv a b then pushEq e (.const ``True []) (mkApp3 (.const ``imp_eq_true_of_eq []) a b (← getPropEqProof a b)) /-- Propagate equality from `p`, `a` and `b` to `if p then a else b`. -/ partial def propagateIteUp (e : Expr) : CCM Unit := do let .app (.app (.app (.app (.app (.const ``ite [lvl]) A) c) d) a) b := e | failure if ← isEqTrue c then -- `c = True → (ite c a b) = a` pushEq e a (mkApp6 (.const ``if_eq_of_eq_true [lvl]) c d A a b (← getEqTrueProof c)) else if ← isEqFalse c then -- `c = False → (ite c a b) = b` pushEq e b (mkApp6 (.const ``if_eq_of_eq_false [lvl]) c d A a b (← getEqFalseProof c)) else if ← isEqv a b then -- `a = b → (ite c a b) = a` pushEq e a (mkApp6 (.const ``if_eq_of_eq [lvl]) c d A a b (← getPropEqProof a b)) /-- Propagate equality from `a` and `b` to *disprove* `a = b`. -/ partial def propagateEqUp (e : Expr) : CCM Unit := do -- Remark: the positive case is implemented at `checkEqTrue` for any reflexive relation. let some (_, a, b) := e.eq? | failure let ra ← getRoot a let rb ← getRoot b if ra != rb then let mut raNeRb : Option Expr := none /- We disprove inequality for interpreted values here. The possible types of interpreted values are in `{String, Char, Int, Nat}`. 1- `String` `ra` & `rb` are string literals, so if `ra != rb`, `ra.int?.isNone` is `true` and we can prove `$ra ≠ $rb`. 2- `Char` `ra` & `rb` are the form of `Char.ofNat (nat_lit n)`, so if `ra != rb`, `ra.int?.isNone` is `true` and we can prove `$ra ≠ $rb` (assuming that `n` is not pathological value, i.e. `n.isValidChar`). 3- `Int`, `Nat` `ra` & `rb` are the form of `@OfNat.ofNat ℤ (nat_lit n) i` or `@Neg.neg ℤ i' (@OfNat.ofNat ℤ (nat_lit n) i)`, so even if `ra != rb`, `$ra ≠ $rb` can be false when `i` or `i'` in `ra` & `rb` are not alpha-equivalent but def-eq. If `ra.int? != rb.int?`, we can prove `$ra ≠ $rb` (assuming that `i` & `i'` are not pathological instances). -/ if ← isInterpretedValue ra <&&> isInterpretedValue rb <&&> pure (ra.int?.isNone || ra.int? != rb.int?) then raNeRb := some (Expr.app (.proj ``Iff 0 (← mkAppM ``bne_iff_ne #[ra, rb])) (← mkEqRefl (.const ``true []))) else if let some c₁ ← isConstructorApp? ra then if let some c₂ ← isConstructorApp? rb then if c₁.name != c₂.name then raNeRb ← withLocalDeclD `h (← mkEq ra rb) fun h => do mkLambdaFVars #[h] (← mkNoConfusion (.const ``False []) h) if let some raNeRb' := raNeRb then if let some aNeRb ← mkNeOfEqOfNe a ra raNeRb' then if let some aNeB ← mkNeOfNeOfEq aNeRb rb b then pushEq e (.const ``False []) (← mkEqFalse aNeB) /-- Propagate equality from subexpressions of `e` to `e`. -/ partial def propagateUp (e : Expr) : CCM Unit := do if (← get).inconsistent then return if e.isAppOfArity ``Iff 2 then propagateIffUp e else if e.isAppOfArity ``And 2 then propagateAndUp e else if e.isAppOfArity ``Or 2 then propagateOrUp e else if e.isAppOfArity ``Not 1 then propagateNotUp e else if e.isArrow then propagateImpUp e else if e.isIte then propagateIteUp e else if e.isEq then propagateEqUp e /-- This method is invoked during internalization and eagerly apply basic equivalences for term `e` Examples: - If `e := cast H e'`, then it merges the equivalence classes of `cast H e'` and `e'` In principle, we could mark theorems such as `cast_eq` as simplification rules, but this created problems with the builtin support for cast-introduction in the ematching module in Lean 3. TODO: check if this is now possible in Lean 4. Eagerly merging the equivalence classes is also more efficient. -/ partial def applySimpleEqvs (e : Expr) : CCM Unit := do if let .app (.app (.app (.app (.const ``cast [l₁]) A) B) H) a := e then /- ``` HEq (cast H a) a theorem cast_heq.{l₁} : ∀ {A B : Sort l₁} (H : A = B) (a : A), HEq (@cast.{l₁} A B H a) a ``` -/ let proof := mkApp4 (.const ``cast_heq [l₁]) A B H a pushHEq e a proof if let .app (.app (.app (.app (.app (.app (.const ``Eq.rec [l₁, l₂]) A) a) P) p) a') H := e then /- ``` HEq (t ▸ p) p theorem eqRec_heq'.{l₁, l₂} : ∀ {A : Sort l₂} {a : A} {P : (a' : A) → a = a' → Sort l₁} (p : P a) {a' : A} (H : a = a'), HEq (@Eq.rec.{l₁ l₂} A a P p a' H) p ``` -/ let proof := mkApp6 (.const ``eqRec_heq' [l₁, l₂]) A a P p a' H pushHEq e p proof if let .app (.app (.app (.const ``Ne [l₁]) α) a) b := e then -- `(a ≠ b) = (Not (a = b))` let newE := Expr.app (.const ``Not []) (mkApp3 (.const ``Eq [l₁]) α a b) internalizeCore newE none pushReflEq e newE if let some r ← e.reduceProjStruct? then pushReflEq e r let fn := e.getAppFn if fn.isLambda then let reducedE := e.headBeta if let some phandler := (← get).phandler then phandler.newAuxCCTerm reducedE internalizeCore reducedE none pushReflEq e reducedE let mut revArgs : Array Expr := #[] let mut it := e while it.isApp do revArgs := revArgs.push it.appArg! let fn := it.appFn! let rootFn ← getRoot fn let en ← getEntry rootFn if en.any Entry.hasLambdas then let lambdas ← getEqcLambdas rootFn let newLambdaApps ← propagateBeta fn revArgs lambdas for newApp in newLambdaApps do internalizeCore newApp none it := fn propagateUp e /-- If `e` is a subsingleton element, push the equality proof between `e` and its canonical form to the todo list or register `e` as the canonical form of itself. -/ partial def processSubsingletonElem (e : Expr) : CCM Unit := do let type ← inferType e -- TODO: this is likely to become a bottleneck. See e.g. -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/convert.20is.20often.20slow/near/433830798 let ss ← synthInstance? (← mkAppM ``Subsingleton #[type]) if ss.isNone then return -- type is not a subsingleton let type ← normalize type -- Make sure type has been internalized internalizeCore type none -- Try to find representative if let some it := (← get).subsingletonReprs.find? type then pushSubsingletonEq e it else modify fun ccs => { ccs with subsingletonReprs := ccs.subsingletonReprs.insert type e } let typeRoot ← getRoot type if typeRoot == type then return if let some it2 := (← get).subsingletonReprs.find? typeRoot then pushSubsingletonEq e it2 else modify fun ccs => { ccs with subsingletonReprs := ccs.subsingletonReprs.insert typeRoot e } /-- Add an new entry for `e` to the congruence closure. -/ partial def mkEntry (e : Expr) (interpreted : Bool) : CCM Unit := do if (← getEntry e).isSome then return let constructor ← isConstructorApp e modify fun ccs => { ccs with toCCState := ccs.toCCState.mkEntryCore e interpreted constructor } processSubsingletonElem e end /-- Can we propagate equality from subexpressions of `e` to `e`? -/ def mayPropagate (e : Expr) : Bool := e.isAppOfArity ``Iff 2 || e.isAppOfArity ``And 2 || e.isAppOfArity ``Or 2 || e.isAppOfArity ``Not 1 || e.isArrow || e.isIte /-- Remove parents of `e` from the congruence table and the symm congruence table, and append parents to propagate equality, to `parentsToPropagate`. Returns the new value of `parentsToPropagate`. -/ def removeParents (e : Expr) (parentsToPropagate : Array Expr := #[]) : CCM (Array Expr) := do let some ps := (← get).parents.find? e | return parentsToPropagate let mut parentsToPropagate := parentsToPropagate for pocc in ps do let p := pocc.expr trace[Debug.Meta.Tactic.cc] "remove parent: {p}" if mayPropagate p then parentsToPropagate := parentsToPropagate.push p if p.isApp then if pocc.symmTable then let some (rel, lhs, rhs) ← p.relSidesIfSymm? | failure let k' ← mkSymmCongruencesKey lhs rhs if let some lst := (← get).symmCongruences.find? k' then let k := (p, rel) let newLst ← lst.filterM fun k₂ => (!·) <$> compareSymm k k₂ if !newLst.isEmpty then modify fun ccs => { ccs with symmCongruences := ccs.symmCongruences.insert k' newLst } else modify fun ccs => { ccs with symmCongruences := ccs.symmCongruences.erase k' } else let k' ← mkCongruencesKey p if let some es := (← get).congruences.find? k' then let newEs := es.erase p if !newEs.isEmpty then modify fun ccs => { ccs with congruences := ccs.congruences.insert k' newEs } else modify fun ccs => { ccs with congruences := ccs.congruences.erase k' } return parentsToPropagate /-- The fields `target` and `proof` in `e`'s entry are encoding a transitivity proof Let `e.rootTarget` and `e.rootProof` denote these fields. ```lean e = e.rootTarget := e.rootProof _ = e.rootTarget.rootTarget := e.rootTarget.rootProof ... _ = e.root := ... ``` The transitivity proof eventually reaches the root of the equivalence class. This method "inverts" the proof. That is, the `target` goes from `e.root` to e after we execute it. -/ partial def invertTrans (e : Expr) (newFlipped : Bool := false) (newTarget : Option Expr := none) (newProof : Option EntryExpr := none) : CCM Unit := do let some n ← getEntry e | failure if let some t := n.target then invertTrans t (!n.flipped) (some e) n.proof let newN : Entry := { n with flipped := newFlipped target := newTarget proof := newProof } modify fun ccs => { ccs with entries := ccs.entries.insert e newN } /-- Traverse the `root`'s equivalence class, and for each function application, collect the function's equivalence class root. -/ def collectFnRoots (root : Expr) (fnRoots : Array Expr := #[]) : CCM (Array Expr) := do guard ((← getRoot root) == root) let mut fnRoots : Array Expr := fnRoots let mut visited : RBExprSet := ∅ let mut it := root repeat let fnRoot ← getRoot (it.getAppFn) if !visited.contains fnRoot then visited := visited.insert fnRoot fnRoots := fnRoots.push fnRoot let some itN ← getEntry it | failure it := itN.next until it == root return fnRoots /-- Reinsert parents of `e` to the congruence table and the symm congruence table. Together with `removeParents`, this allows modifying parents of an expression. -/ def reinsertParents (e : Expr) : CCM Unit := do let some ps := (← get).parents.find? e | return for p in ps do trace[Debug.Meta.Tactic.cc] "reinsert parent: {p.expr}" if p.expr.isApp then if p.symmTable then addSymmCongruenceTable p.expr else addCongruenceTable p.expr /-- Check for integrity of the `CCStructure`. -/ def checkInvariant : CCM Unit := do guard (← get).checkInvariant /-- For each `fnRoot` in `fnRoots` traverse its parents, and look for a parent prefix that is in the same equivalence class of the given lambdas. remark All expressions in lambdas are in the same equivalence class -/ def propagateBetaToEqc (fnRoots lambdas : Array Expr) (newLambdaApps : Array Expr := #[]) : CCM (Array Expr) := do if lambdas.isEmpty then return newLambdaApps let mut newLambdaApps := newLambdaApps let lambdaRoot ← getRoot lambdas.back guard (← lambdas.allM fun l => pure l.isLambda <&&> (· == lambdaRoot) <$> getRoot l) for fnRoot in fnRoots do if let some ps := (← get).parents.find? fnRoot then for { expr := p,.. } in ps do let mut revArgs : Array Expr := #[] let mut it₂ := p while it₂.isApp do let fn := it₂.appFn! revArgs := revArgs.push it₂.appArg! if (← getRoot fn) == lambdaRoot then -- found it newLambdaApps ← propagateBeta fn revArgs lambdas newLambdaApps break it₂ := it₂.appFn! return newLambdaApps /-- Given `c` a constructor application, if `p` is a projection application (not `.proj _ _ _`, but `.app (.const projName _) _`) such that major premise is equal to `c`, then propagate new equality. Example: if `p` is of the form `b.fst`, `c` is of the form `(x, y)`, and `b = c`, we add the equality `(x, y).fst = x` -/ def propagateProjectionConstructor (p c : Expr) : CCM Unit := do guard (← isConstructorApp c) p.withApp fun pFn pArgs => do let some pFnN := pFn.constName? | return let some info ← getProjectionFnInfo? pFnN | return let mkidx := info.numParams if h : mkidx < pArgs.size then unless ← isEqv (pArgs[mkidx]'h) c do return unless ← pureIsDefEq (← inferType (pArgs[mkidx]'h)) (← inferType c) do return /- Create new projection application using c (e.g., `(x, y).fst`), and internalize it. The internalizer will add the new equality. -/ let pArgs := pArgs.set ⟨mkidx, h⟩ c let newP := mkAppN pFn pArgs internalizeCore newP none else return /-- Given a new equality `e₁ = e₂`, where `e₁` and `e₂` are constructor applications. Implement the following implications: ```lean c a₁ ... aₙ = c b₁ ... bₙ => a₁ = b₁, ..., aₙ = bₙ c₁ ... = c₂ ... => False ``` where `c`, `c₁` and `c₂` are constructors -/ partial def propagateConstructorEq (e₁ e₂ : Expr) : CCM Unit := do let env ← getEnv let some c₁ ← isConstructorApp? e₁ | failure let some c₂ ← isConstructorApp? e₂ | failure unless ← pureIsDefEq (← inferType e₁) (← inferType e₂) do -- The implications above only hold if the types are equal. -- TODO(Leo): if the types are different, we may still propagate by searching the equivalence -- classes of `e₁` and `e₂` for other constructors that may have compatible types. return let some h ← getEqProof e₁ e₂ | failure if c₁.name == c₂.name then if 0 < c₁.numFields then let name := mkInjectiveTheoremNameFor c₁.name if env.contains name then let rec /-- Given an injective theorem `val : type`, whose `type` is the form of `a₁ = a₂ ∧ HEq b₁ b₂ ∧ ..`, destruct `val` and push equality proofs to the todo list. -/ go (type val : Expr) : CCM Unit := do let push (type val : Expr) : CCM Unit := match type.eq? with | some (_, lhs, rhs) => pushEq lhs rhs val | none => match type.heq? with | some (_, _, lhs, rhs) => pushHEq lhs rhs val | none => failure match type.and? with | some (l, r) => push l (.proj ``And 0 val) go r (.proj ``And 1 val) | none => push type val let val ← mkAppM name #[h] let type ← inferType val go type val else let falsePr ← mkNoConfusion (.const ``False []) h let H := Expr.app (.const ``true_eq_false_of_false []) falsePr pushEq (.const ``True []) (.const ``False []) H /-- Derive contradiction if we can get equality between different values. -/ def propagateValueInconsistency (e₁ e₂ : Expr) : CCM Unit := do guard (← isInterpretedValue e₁) guard (← isInterpretedValue e₂) let some eqProof ← getEqProof e₁ e₂ | failure let trueEqFalse ← mkEq (.const ``True []) (.const ``False []) let neProof := Expr.app (.proj ``Iff 0 (← mkAppM ``bne_iff_ne #[e₁, e₂])) (← mkEqRefl (.const ``true [])) let H ← mkAbsurd trueEqFalse eqProof neProof pushEq (.const ``True []) (.const ``False []) H /-- Propagate equality from `a ∧ b = True` to `a = True` and `b = True`. -/ def propagateAndDown (e : Expr) : CCM Unit := do if ← isEqTrue e then let some (a, b) := e.and? | failure let h ← getEqTrueProof e pushEq a (.const ``True []) (mkApp3 (.const ``eq_true_of_and_eq_true_left []) a b h) pushEq b (.const ``True []) (mkApp3 (.const ``eq_true_of_and_eq_true_right []) a b h) /-- Propagate equality from `a ∨ b = False` to `a = False` and `b = False`. -/ def propagateOrDown (e : Expr) : CCM Unit := do if ← isEqFalse e then let some (a, b) := e.app2? ``Or | failure let h ← getEqFalseProof e pushEq a (.const ``False []) (mkApp3 (.const ``eq_false_of_or_eq_false_left []) a b h) pushEq b (.const ``False []) (mkApp3 (.const ``eq_false_of_or_eq_false_right []) a b h) /-- Propagate equality from `¬a` to `a`. -/ def propagateNotDown (e : Expr) : CCM Unit := do if ← isEqTrue e then let some a := e.not? | failure pushEq a (.const ``False []) (mkApp2 (.const ``eq_false_of_not_eq_true []) a (← getEqTrueProof e)) else if ← (·.em) <$> get <&&> isEqFalse e then let some a := e.not? | failure pushEq a (.const ``True []) (mkApp2 (.const ``eq_true_of_not_eq_false []) a (← getEqFalseProof e)) /-- Propagate equality from `(a = b) = True` to `a = b`. -/ def propagateEqDown (e : Expr) : CCM Unit := do if ← isEqTrue e then let some (a, b) := e.eqOrIff? | failure pushEq a b (← mkAppM ``of_eq_true #[← getEqTrueProof e]) /-- Propagate equality from `¬∃ x, p x` to `∀ x, ¬p x`. -/ def propagateExistsDown (e : Expr) : CCM Unit := do if ← isEqFalse e then let hNotE ← mkAppM ``not_of_eq_false #[← getEqFalseProof e] let (all, hAll) ← e.forallNot_of_notExists hNotE internalizeCore all none pushEq all (.const ``True []) (← mkEqTrue hAll) /-- Propagate equality from `e` to subexpressions of `e`. -/ def propagateDown (e : Expr) : CCM Unit := do if e.isAppOfArity ``And 2 then propagateAndDown e else if e.isAppOfArity ``Or 2 then propagateOrDown e else if e.isAppOfArity ``Not 1 then propagateNotDown e else if e.isEq || e.isAppOfArity ``Iff 2 then propagateEqDown e else if e.isAppOfArity ``Exists 2 then propagateExistsDown e /-- Performs one step in the process when the new equation is added. Here, `H` contains the proof that `e₁ = e₂` (if `heqProof` is false) or `HEq e₁ e₂` (if `heqProof` is true). -/ def addEqvStep (e₁ e₂ : Expr) (H : EntryExpr) (heqProof : Bool) : CCM Unit := do let some n₁ ← getEntry e₁ | return -- `e₁` have not been internalized let some n₂ ← getEntry e₂ | return -- `e₂` have not been internalized if n₁.root == n₂.root then return -- they are already in the same equivalence class. let some r₁ ← getEntry n₁.root | failure let some r₂ ← getEntry n₂.root | failure -- We want `r₂` to be the root of the combined class. /- We swap `(e₁,n₁,r₁)` with `(e₂,n₂,r₂)` when 1- `r₁.interpreted && !r₂.interpreted`. Reason: to decide when to propagate we check whether the root of the equivalence class is `True`/`False`. So, this condition is to make sure if `True`/`False` is in an equivalence class, then one of them is the root. If both are, it doesn't matter, since the state is inconsistent anyway. 2- `r₁.constructor && !r₂.interpreted && !r₂.constructor` Reason: we want constructors to be the representative of their equivalence classes. 3- `r₁.size > r₂.size && !r₂.interpreted && !r₂.constructor` Reason: performance. -/ if (r₁.interpreted && !r₂.interpreted) || (r₁.constructor && !r₂.interpreted && !r₂.constructor) || (decide (r₁.size > r₂.size) && !r₂.interpreted && !r₂.constructor) then go e₂ e₁ n₂ n₁ r₂ r₁ true H heqProof else go e₁ e₂ n₁ n₂ r₁ r₂ false H heqProof where /-- The auxiliary definition for `addEqvStep` to flip the input. -/ go (e₁ e₂: Expr) (n₁ n₂ r₁ r₂ : Entry) (flipped : Bool) (H : EntryExpr) (heqProof : Bool) : CCM Unit := do -- Interpreted values are already in the correct equivalence class, -- so merging two different classes means we found an inconsistency. let mut valueInconsistency := false if r₁.interpreted && r₂.interpreted then if n₁.root.isConstOf ``True || n₂.root.isConstOf ``True then modify fun ccs => { ccs with inconsistent := true } else if n₁.root.int?.isSome && n₂.root.int?.isSome then valueInconsistency := n₁.root.int? != n₂.root.int? else valueInconsistency := true let e₁Root := n₁.root let e₂Root := n₂.root trace[Debug.Meta.Tactic.cc] "merging\n{e₁} ==> {e₁Root}\nwith\n{e₂Root} <== {e₂}" /- Following target/proof we have `e₁ → ... → r₁` `e₂ → ... → r₂` We want `r₁ → ... → e₁ → e₂ → ... → r₂` -/ invertTrans e₁ let newN₁ : Entry := { n₁ with target := e₂ proof := H flipped } modify fun ccs => { ccs with entries := ccs.entries.insert e₁ newN₁ } -- The hash code for the parents is going to change let parentsToPropagate ← removeParents e₁Root let lambdas₁ ← getEqcLambdas e₁Root let lambdas₂ ← getEqcLambdas e₂Root let fnRoots₂ ← if !lambdas₁.isEmpty then collectFnRoots e₂Root else pure #[] let fnRoots₁ ← if !lambdas₂.isEmpty then collectFnRoots e₁Root else pure #[] -- force all `root` fields in `e₁` equivalence class to point to `e₂Root` let propagate := e₂Root.isConstOf ``True || e₂Root.isConstOf ``False let mut toPropagate : Array Expr := #[] let mut it := e₁ repeat let some itN ← getEntry it | failure if propagate then toPropagate := toPropagate.push it let newItN : Entry := { itN with root := e₂Root } modify fun ccs => { ccs with entries := ccs.entries.insert it newItN } it := newItN.next until it == e₁ reinsertParents e₁Root -- update next of `e₁Root` and `e₂Root`, ac representative, and size of `e₂Root` let some r₁ ← getEntry e₁Root | failure let some r₂ ← getEntry e₂Root | failure guard (r₁.root == e₂Root) let acVar?₁ := r₁.acVar let acVar?₂ := r₂.acVar let newR₁ : Entry := { r₁ with next := r₂.next } let newR₂ : Entry := { r₂ with next := r₁.next size := r₂.size + r₁.size hasLambdas := r₂.hasLambdas || r₁.hasLambdas heqProofs := r₂.heqProofs || heqProof acVar := acVar?₂ <|> acVar?₁ } modify fun ccs => { ccs with entries := ccs.entries.insert e₁Root newR₁ |>.insert e₂Root newR₂ } checkInvariant let lambdaAppsToInternalize ← propagateBetaToEqc fnRoots₂ lambdas₁ let lambdaAppsToInternalize ← propagateBetaToEqc fnRoots₁ lambdas₂ lambdaAppsToInternalize -- copy `e₁Root` parents to `e₂Root` let constructorEq := r₁.constructor && r₂.constructor if let some ps₁ := (← get).parents.find? e₁Root then let mut ps₂ : ParentOccSet := ∅ if let some it' := (← get).parents.find? e₂Root then ps₂ := it' for p in ps₁ do if ← pure p.expr.isApp <||> isCgRoot p.expr then if !constructorEq && r₂.constructor then propagateProjectionConstructor p.expr e₂Root ps₂ := ps₂.insert p modify fun ccs => { ccs with parents := ccs.parents.erase e₁Root |>.insert e₂Root ps₂ } if !(← get).inconsistent then if let some acVar₁ := acVar?₁ then if let some acVar₂ := acVar?₂ then addACEq acVar₁ acVar₂ if !(← get).inconsistent && constructorEq then propagateConstructorEq e₁Root e₂Root if !(← get).inconsistent && valueInconsistency then propagateValueInconsistency e₁Root e₂Root if !(← get).inconsistent then updateMT e₂Root checkNewSubsingletonEq e₁Root e₂Root if !(← get).inconsistent then for p in parentsToPropagate do propagateUp p if !(← get).inconsistent && !toPropagate.isEmpty then for e in toPropagate do propagateDown e if let some phandler := (← get).phandler then phandler.propagated toPropagate if !(← get).inconsistent then for e in lambdaAppsToInternalize do internalizeCore e none let ccs ← get trace[Meta.Tactic.cc.merge] "{e₁Root} = {e₂Root}" trace[Debug.Meta.Tactic.cc] "merged: {e₁Root} = {e₂Root}\n{ccs.ppEqcs}" trace[Debug.Meta.Tactic.cc.parentOccs] ccs.ppParentOccs /-- Process the tasks in the `todo` field. -/ def processTodo : CCM Unit := do repeat let todo ← getTodo let some (lhs, rhs, H, heqProof) := todo.back? | return if (← get).inconsistent then modifyTodo fun _ => #[] return modifyTodo Array.pop addEqvStep lhs rhs H heqProof /-- Internalize `e` so that the congruence closure can deal with the given expression. -/ def internalize (e : Expr) : CCM Unit := do internalizeCore e none processTodo /-- Add `H : lhs = rhs` or `H : HEq lhs rhs` to the congruence closure. Don't forget to internalize `lhs` and `rhs` beforehand. -/ def addEqvCore (lhs rhs H : Expr) (heqProof : Bool) : CCM Unit := do pushTodo lhs rhs H heqProof processTodo /-- Add `proof : type` to the congruence closure. -/ def add (type : Expr) (proof : Expr) : CCM Unit := do if (← get).inconsistent then return modifyTodo fun _ => #[] let (isNeg, p) := match type with | .app (.const ``Not []) a => (true, a) | .forallE _ a (.const ``False []) _ => (true, a) | .app (.app (.app (.const ``Ne [u]) α) lhs) rhs => (true, .app (.app (.app (.const ``Eq [u]) α) lhs) rhs) | e => (false, e) match p with | .app (.app (.app (.const ``Eq _) _) lhs) rhs => if isNeg then internalizeCore p none addEqvCore p (.const ``False []) (← mkEqFalse proof) false else internalizeCore lhs none internalizeCore rhs none addEqvCore lhs rhs proof false | .app (.app (.app (.app (.const ``HEq _) _) lhs) _) rhs => if isNeg then internalizeCore p none addEqvCore p (.const ``False []) (← mkEqFalse proof) false else internalizeCore lhs none internalizeCore rhs none addEqvCore lhs rhs proof true | .app (.app (.const ``Iff _) lhs) rhs => if isNeg then let neqProof ← mkAppM ``neq_of_not_iff #[proof] internalizeCore p none addEqvCore p (.const ``False []) (← mkEqFalse neqProof) false else internalizeCore lhs none internalizeCore rhs none addEqvCore lhs rhs (mkApp3 (.const ``propext []) lhs rhs proof) false | _ => if ← pure isNeg <||> isProp p then internalizeCore p none if isNeg then addEqvCore p (.const ``False []) (← mkEqFalse proof) false else addEqvCore p (.const ``True []) (← mkEqTrue proof) false end CCM end Mathlib.Tactic.CC
Tactic\CC\Datatypes.lean
/- Copyright (c) 2016 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura, Miyahara Kō -/ import Lean.Meta.CongrTheorems import Lean.Meta.Tactic.Rfl import Batteries.Data.HashMap.Basic import Batteries.Data.RBMap.Basic import Mathlib.Lean.Meta.Basic /-! # Datatypes for `cc` Some of the data structures here are used in multiple parts of the tactic. We split them into their own file. ## TODO This file is ported from C++ code, so many declarations lack documents. -/ universe u open Lean Meta Elab Tactic namespace Mathlib.Tactic.CC /-- Return true if `e` represents a constant value (numeral, character, or string). -/ def isValue (e : Expr) : Bool := e.int?.isSome || e.isCharLit || e.isStringLit /-- Return true if `e` represents a value (nat/int numeral, character, or string). In addition to the conditions in `Mathlib.Tactic.CC.isValue`, this also checks that kernel computation can compare the values for equality. -/ def isInterpretedValue (e : Expr) : MetaM Bool := do if e.isCharLit || e.isStringLit then return true else if e.int?.isSome then let type ← inferType e pureIsDefEq type (.const ``Nat []) <||> pureIsDefEq type (.const ``Int []) else return false /-- Given a reflexive relation `R`, and a proof `H : a = b`, build a proof for `R a b` -/ def liftFromEq (R : Name) (H : Expr) : MetaM Expr := do if R == ``Eq then return H let HType ← whnf (← inferType H) -- `HType : @Eq A a _` let some (A, a, _) := HType.eq? | throwError "failed to build liftFromEq equality proof expected: {H}" -- `motive : (x : _) → a = x → Prop := fun x h => R a x` let motive ← withLocalDeclD `x A fun x => do let hType ← mkEq a x withLocalDeclD `h hType fun h => mkRel R a x >>= mkLambdaFVars #[x, h] -- `minor : R a a := by rfl` let minor ← do let mt ← mkRel R a a let m ← mkFreshExprSyntheticOpaqueMVar mt m.mvarId!.applyRfl instantiateMVars m mkEqRec motive minor H /-- Ordering on `Expr`. -/ scoped instance : Ord Expr where compare a b := bif Expr.lt a b then .lt else bif Expr.eqv b a then .eq else .gt /-- Red-black maps whose keys are `Expr`s. TODO: the choice between `RBMap` and `HashMap` is not obvious: the current version follows the Lean 3 C++ implementation. Once the `cc` tactic is used a lot in Mathlib, we should profile and see if `HashMap` could be more optimal. -/ abbrev RBExprMap (α : Type u) := Batteries.RBMap Expr α compare /-- Red-black sets of `Expr`s. TODO: the choice between `RBSet` and `HashSet` is not obvious: the current version follows the Lean 3 C++ implementation. Once the `cc` tactic is used a lot in Mathlib, we should profile and see if `HashSet` could be more optimal. -/ abbrev RBExprSet := Batteries.RBSet Expr compare /-- `CongrTheorem`s equiped with additional infos used by congruence closure modules. -/ structure CCCongrTheorem extends CongrTheorem where /-- If `heqResult` is true, then lemma is based on heterogeneous equality and the conclusion is a heterogeneous equality. -/ heqResult : Bool := false /-- If `hcongrTheorem` is true, then lemma was created using `mkHCongrWithArity`. -/ hcongrTheorem : Bool := false /-- Automatically generated congruence lemma based on heterogeneous equality. This returns an annotated version of the result from `Lean.Meta.mkHCongrWithArity`. -/ def mkCCHCongrWithArity (fn : Expr) (nargs : Nat) : MetaM (Option CCCongrTheorem) := do let eqCongr ← try mkHCongrWithArity fn nargs catch _ => return none return some { eqCongr with heqResult := true hcongrTheorem := true } /-- Keys used to find corresponding `CCCongrTheorem`s. -/ structure CCCongrTheoremKey where /-- The function of the given `CCCongrTheorem`. -/ fn : Expr /-- The number of arguments of `fn`. -/ nargs : Nat deriving BEq, Hashable /-- Caches used to find corresponding `CCCongrTheorem`s. -/ abbrev CCCongrTheoremCache := Batteries.HashMap CCCongrTheoremKey (Option CCCongrTheorem) /-- Configs used in congruence closure modules. -/ structure CCConfig where /-- If `true`, congruence closure will treat implicit instance arguments as constants. This means that setting `ignoreInstances := false` will fail to unify two definitionally equal instances of the same class. -/ ignoreInstances : Bool := true /-- If `true`, congruence closure modulo Associativity and Commutativity. -/ ac : Bool := true /-- If `hoFns` is `some fns`, then full (and more expensive) support for higher-order functions is *only* considered for the functions in fns and local functions. The performance overhead is described in the paper "Congruence Closure in Intensional Type Theory". If `hoFns` is `none`, then full support is provided for *all* constants. -/ hoFns : Option (List Name) := none /-- If `true`, then use excluded middle -/ em : Bool := true /-- If `true`, we treat values as atomic symbols -/ values : Bool := false deriving Inhabited /-- An `ACApps` represents either just an `Expr` or applications of an associative and commutative binary operator. -/ inductive ACApps where /-- An `ACApps` of just an `Expr`. -/ | ofExpr (e : Expr) : ACApps /-- An `ACApps` of applications of a binary operator. `args` are assumed to be sorted. See also `ACApps.mkApps` if `args` are not yet sorted. -/ | apps (op : Expr) (args : Array Expr) : ACApps deriving Inhabited, BEq instance : Coe Expr ACApps := ⟨ACApps.ofExpr⟩ attribute [coe] ACApps.ofExpr /-- Ordering on `ACApps` sorts `.ofExpr` before `.apps`, and sorts `.apps` by function symbol, then by shortlex order. -/ scoped instance : Ord ACApps where compare | .ofExpr a, .ofExpr b => compare a b | .ofExpr _, .apps _ _ => .lt | .apps _ _, .ofExpr _ => .gt | .apps op₁ args₁, .apps op₂ args₂ => compare op₁ op₂ |>.then <| compare args₁.size args₂.size |>.then <| Id.run do for i in [:args₁.size] do let o := compare args₁[i]! args₂[i]! if o != .eq then return o return .eq /-- Return true iff `e₁` is a "subset" of `e₂`. Example: The result is `true` for `e₁ := a*a*a*b*d` and `e₂ := a*a*a*a*b*b*c*d*d`. The result is also `true` for `e₁ := a` and `e₂ := a*a*a*b*c`. -/ def ACApps.isSubset : (e₁ e₂ : ACApps) → Bool | .ofExpr a, .ofExpr b => a == b | .ofExpr a, .apps _ args => args.contains a | .apps _ _, .ofExpr _ => false | .apps op₁ args₁, .apps op₂ args₂ => if op₁ == op₂ then if args₁.size ≤ args₂.size then Id.run do let mut i₁ := 0 let mut i₂ := 0 while i₁ < args₁.size ∧ i₂ < args₂.size do if args₁[i₁]! == args₂[i₂]! then i₁ := i₁ + 1 i₂ := i₂ + 1 else if Expr.lt args₂[i₂]! args₁[i₁]! then i₂ := i₂ + 1 else return false return i₁ == args₁.size else false else false /-- Appends elements of the set difference `e₁ \ e₂` to `r`. Example: given `e₁ := a*a*a*a*b*b*c*d*d*d` and `e₂ := a*a*a*b*b*d`, the result is `#[a, c, d, d]` Precondition: `e₂.isSubset e₁` -/ def ACApps.diff (e₁ e₂ : ACApps) (r : Array Expr := #[]) : Array Expr := match e₁ with | .apps op₁ args₁ => Id.run do let mut r := r match e₂ with | .apps op₂ args₂ => if op₁ == op₂ then let mut i₂ := 0 for i₁ in [:args₁.size] do if i₂ == args₂.size then r := r.push args₁[i₁]! else if args₁[i₁]! == args₂[i₂]! then i₂ := i₂ + 1 else r := r.push args₁[i₁]! | .ofExpr e₂ => let mut found := false for i in [:args₁.size] do if !found && args₁[i]! == e₂ then found := true else r := r.push args₁[i]! return r | .ofExpr e => if e₂ == e then r else r.push e /-- Appends arguments of `e` to `r`. -/ def ACApps.append (op : Expr) (e : ACApps) (r : Array Expr := #[]) : Array Expr := match e with | .apps op' args => if op' == op then r ++ args else r | .ofExpr e => r.push e /-- Appends elements in the intersection of `e₁` and `e₂` to `r`. -/ def ACApps.intersection (e₁ e₂ : ACApps) (r : Array Expr := #[]) : Array Expr := match e₁, e₂ with | .apps _ args₁, .apps _ args₂ => Id.run do let mut r := r let mut i₁ := 0 let mut i₂ := 0 while i₁ < args₁.size ∧ i₂ < args₂.size do if args₁[i₁]! == args₂[i₂]! then r := r.push args₁[i₁]! i₁ := i₁ + 1 i₂ := i₂ + 1 else if Expr.lt args₂[i₂]! args₁[i₁]! then i₂ := i₂ + 1 else i₁ := i₁ + 1 return r | _, _ => r /-- Sorts `args` and applies them to `ACApps.apps`. -/ def ACApps.mkApps (op : Expr) (args : Array Expr) : ACApps := .apps op (args.qsort Expr.lt) /-- Flattens given two `ACApps`. -/ def ACApps.mkFlatApps (op : Expr) (e₁ e₂ : ACApps) : ACApps := let newArgs := ACApps.append op e₁ let newArgs := ACApps.append op e₂ newArgs -- TODO: this does a full sort but `newArgs` consists of two sorted subarrays, -- so if we want to optimize this, some form of merge sort might be faster. ACApps.mkApps op newArgs /-- Converts an `ACApps` to an `Expr`. This returns `none` when the empty applications are given. -/ def ACApps.toExpr : ACApps → Option Expr | .apps _ ⟨[]⟩ => none | .apps op ⟨arg₀ :: args⟩ => some <| args.foldl (fun e arg => mkApp2 op e arg) arg₀ | .ofExpr e => some e /-- Red-black maps whose keys are `ACApps`es. TODO: the choice between `RBMap` and `HashMap` is not obvious: the current version follows the Lean 3 C++ implementation. Once the `cc` tactic is used a lot in Mathlib, we should profile and see if `HashMap` could be more optimal. -/ abbrev RBACAppsMap (α : Type u) := Batteries.RBMap ACApps α compare /-- Red-black sets of `ACApps`es. TODO: the choice between `RBSet` and `HashSet` is not obvious: the current version follows the Lean 3 C++ implementation. Once the `cc` tactic is used a lot in Mathlib, we should profile and see if `HashSet` could be more optimal. -/ abbrev RBACAppsSet := Batteries.RBSet ACApps compare /-- For proof terms generated by AC congruence closure modules, we want a placeholder as an equality proof between given two terms which will be generated by non-AC congruence closure modules later. `DelayedExpr` represents it using `eqProof`. -/ inductive DelayedExpr where /-- A `DelayedExpr` of just an `Expr`. -/ | ofExpr (e : Expr) : DelayedExpr /-- A placeholder as an equality proof between given two terms which will be generated by non-AC congruence closure modules later. -/ | eqProof (lhs rhs : Expr) : DelayedExpr /-- Will be applied to `congr_arg`. -/ | congrArg (f : Expr) (h : DelayedExpr) : DelayedExpr /-- Will be applied to `congr_fun`. -/ | congrFun (h : DelayedExpr) (a : ACApps) : DelayedExpr /-- Will be applied to `Eq.symm`. -/ | eqSymm (h : DelayedExpr) : DelayedExpr /-- Will be applied to `Eq.symm`. -/ | eqSymmOpt (a₁ a₂ : ACApps) (h : DelayedExpr) : DelayedExpr /-- Will be applied to `Eq.trans`. -/ | eqTrans (h₁ h₂ : DelayedExpr) : DelayedExpr /-- Will be applied to `Eq.trans`. -/ | eqTransOpt (a₁ a₂ a₃ : ACApps) (h₁ h₂ : DelayedExpr) : DelayedExpr /-- Will be applied to `heq_of_eq`. -/ | heqOfEq (h : DelayedExpr) : DelayedExpr /-- Will be applied to `HEq.symm`. -/ | heqSymm (h : DelayedExpr) : DelayedExpr deriving Inhabited instance : Coe Expr DelayedExpr := ⟨DelayedExpr.ofExpr⟩ attribute [coe] DelayedExpr.ofExpr /-- This is used as a proof term in `Entry`s instead of `Expr`. -/ inductive EntryExpr /-- An `EntryExpr` of just an `Expr`. -/ | ofExpr (e : Expr) : EntryExpr /-- dummy congruence proof, it is just a placeholder. -/ | congr : EntryExpr /-- dummy eq_true proof, it is just a placeholder -/ | eqTrue : EntryExpr /-- dummy refl proof, it is just a placeholder. -/ | refl : EntryExpr /-- An `EntryExpr` of a `DelayedExpr`. -/ | ofDExpr (e : DelayedExpr) : EntryExpr deriving Inhabited instance : ToMessageData EntryExpr where toMessageData | .ofExpr e => toMessageData e | .congr => m!"[congruence proof]" | .eqTrue => m!"[eq_true proof]" | .refl => m!"[refl proof]" | .ofDExpr _ => m!"[delayed expression]" instance : Coe Expr EntryExpr := ⟨EntryExpr.ofExpr⟩ attribute [coe] EntryExpr.ofExpr /-- Equivalence class data associated with an expression `e`. -/ structure Entry where /-- next element in the equivalence class. -/ next : Expr /-- root (aka canonical) representative of the equivalence class. -/ root : Expr /-- root of the congruence class, it is meaningless if `e` is not an application. -/ cgRoot : Expr /-- When `e` was added to this equivalence class because of an equality `(H : e = tgt)`, then we store `tgt` at `target`, and `H` at `proof`. Both fields are none if `e == root` -/ target : Option Expr := none /-- When `e` was added to this equivalence class because of an equality `(H : e = tgt)`, then we store `tgt` at `target`, and `H` at `proof`. Both fields are none if `e == root` -/ proof : Option EntryExpr := none /-- Variable in the AC theory. -/ acVar : Option Expr := none /-- proof has been flipped -/ flipped : Bool /-- `true` if the node should be viewed as an abstract value -/ interpreted : Bool /-- `true` if head symbol is a constructor -/ constructor : Bool /-- `true` if equivalence class contains lambda expressions -/ hasLambdas : Bool /-- `heqProofs == true` iff some proofs in the equivalence class are based on heterogeneous equality. We represent equality and heterogeneous equality in a single equivalence class. -/ heqProofs : Bool /-- If `fo == true`, then the expression associated with this entry is an application, and we are using first-order approximation to encode it. That is, we ignore its partial applications. -/ fo : Bool /-- number of elements in the equivalence class, it is meaningless if `e != root` -/ size : Nat /-- The field `mt` is used to implement the mod-time optimization introduce by the Simplify theorem prover. The basic idea is to introduce a counter gmt that records the number of heuristic instantiation that have occurred in the current branch. It is incremented after each round of heuristic instantiation. The field `mt` records the last time any proper descendant of of thie entry was involved in a merge. -/ mt : Nat deriving Inhabited /-- Stores equivalence class data associated with an expression `e`. -/ abbrev Entries := RBExprMap Entry /-- Equivalence class data associated with an expression `e` used by AC congruence closure modules. -/ structure ACEntry where /-- Natural number associated to an expression. -/ idx : Nat /-- AC variables that occur on the left hand side of an equality which `e` occurs as the left hand side of in `CCState.acR`. -/ RLHSOccs : RBACAppsSet := ∅ /-- AC variables that occur on the **left** hand side of an equality which `e` occurs as the right hand side of in `CCState.acR`. Don't confuse. -/ RRHSOccs : RBACAppsSet := ∅ deriving Inhabited /-- Returns the occurrences of this entry in either the LHS or RHS. -/ def ACEntry.ROccs (ent : ACEntry) : (inLHS : Bool) → RBACAppsSet | true => ent.RLHSOccs | false => ent.RRHSOccs /-- Used to record when an expression processed by `cc` occurs in another expression. -/ structure ParentOcc where expr : Expr /-- If `symmTable` is true, then we should use the `symmCongruences`, otherwise `congruences`. Remark: this information is redundant, it can be inferred from `expr`. We use store it for performance reasons. -/ symmTable : Bool /-- Red-black sets of `ParentOcc`s. -/ abbrev ParentOccSet := Batteries.RBSet ParentOcc (Ordering.byKey ParentOcc.expr compare) /-- Used to map an expression `e` to another expression that contains `e`. When `e` is normalized, its parents should also change. -/ abbrev Parents := RBExprMap ParentOccSet inductive CongruencesKey /-- `fn` is First-Order: we do not consider all partial applications. -/ | fo (fn : Expr) (args : Array Expr) : CongruencesKey /-- `fn` is Higher-Order. -/ | ho (fn : Expr) (arg : Expr) : CongruencesKey deriving BEq, Hashable /-- Maps each expression (via `mkCongruenceKey`) to expressions it might be congruent to. -/ abbrev Congruences := Batteries.HashMap CongruencesKey (List Expr) structure SymmCongruencesKey where (h₁ h₂ : Expr) deriving BEq, Hashable /-- The symmetric variant of `Congruences`. The `Name` identifies which relation the congruence is considered for. Note that this only works for two-argument relations: `ModEq n` and `ModEq m` are considered the same. -/ abbrev SymmCongruences := Batteries.HashMap SymmCongruencesKey (List (Expr × Name)) /-- Stores the root representatives of subsingletons. -/ abbrev SubsingletonReprs := RBExprMap Expr /-- Stores the root representatives of `.instImplicit` arguments. -/ abbrev InstImplicitReprs := RBExprMap (List Expr) abbrev TodoEntry := Expr × Expr × EntryExpr × Bool abbrev ACTodoEntry := ACApps × ACApps × DelayedExpr /-- Congruence closure state. This may be considered to be a set of expressions and an equivalence class over this set. The equivalence class is generated by the equational rules that are added to the `CCState` and congruence, that is, if `a = b` then `f(a) = f(b)` and so on. -/ structure CCState extends CCConfig where /-- Maps known expressions to their equivalence class data. -/ entries : Entries := ∅ /-- Maps an expression `e` to the expressions `e` occurs in. -/ parents : Parents := ∅ /-- Maps each expression to a set of expressions it might be congruent to. -/ congruences : Congruences := ∅ /-- Maps each expression to a set of expressions it might be congruent to, via the symmetrical relation. -/ symmCongruences : SymmCongruences := ∅ subsingletonReprs : SubsingletonReprs := ∅ /-- Records which instances of the same class are defeq. -/ instImplicitReprs : InstImplicitReprs := ∅ /-- The congruence closure module has a mode where the root of each equivalence class is marked as an interpreted/abstract value. Moreover, in this mode proof production is disabled. This capability is useful for heuristic instantiation. -/ frozePartitions : Bool := false /-- Mapping from operators occurring in terms and their canonical representation in this module -/ canOps : RBExprMap Expr := ∅ /-- Whether the canonical operator is suppoted by AC. -/ opInfo : RBExprMap Bool := ∅ /-- Extra `Entry` information used by the AC part of the tactic. -/ acEntries : RBExprMap ACEntry := ∅ /-- Records equality between `ACApps`. -/ acR : RBACAppsMap (ACApps × DelayedExpr) := ∅ /-- Returns true if the `CCState` is inconsistent. For example if it had both `a = b` and `a ≠ b` in it.-/ inconsistent : Bool := false /-- "Global Modification Time". gmt is a number stored on the `CCState`, it is compared with the modification time of a cc_entry in e-matching. See `CCState.mt`. -/ gmt : Nat := 0 deriving Inhabited attribute [inherit_doc SubsingletonReprs] CCState.subsingletonReprs /-- Update the `CCState` by constructing and inserting a new `Entry`. -/ def CCState.mkEntryCore (ccs : CCState) (e : Expr) (interpreted : Bool) (constructor : Bool) : CCState := assert! ccs.entries.find? e |>.isNone let n : Entry := { next := e root := e cgRoot := e size := 1 flipped := false interpreted constructor hasLambdas := e.isLambda heqProofs := false mt := ccs.gmt fo := false } { ccs with entries := ccs.entries.insert e n } namespace CCState /-- Get the root representative of the given expression. -/ def root (ccs : CCState) (e : Expr) : Expr := match ccs.entries.find? e with | some n => n.root | none => e /-- Get the next element in the equivalence class. Note that if the given `Expr` `e` is not in the graph then it will just return `e`. -/ def next (ccs : CCState) (e : Expr) : Expr := match ccs.entries.find? e with | some n => n.next | none => e /-- Check if `e` is the root of the congruence class. -/ def isCgRoot (ccs : CCState) (e : Expr) : Bool := match ccs.entries.find? e with | some n => e == n.cgRoot | none => true /-- "Modification Time". The field `mt` is used to implement the mod-time optimization introduced by the Simplify theorem prover. The basic idea is to introduce a counter `gmt` that records the number of heuristic instantiation that have occurred in the current branch. It is incremented after each round of heuristic instantiation. The field `mt` records the last time any proper descendant of of thie entry was involved in a merge. -/ def mt (ccs : CCState) (e : Expr) : Nat := match ccs.entries.find? e with | some n => n.mt | none => ccs.gmt /-- Is the expression in an equivalence class with only one element (namely, itself)? -/ def inSingletonEqc (ccs : CCState) (e : Expr) : Bool := match ccs.entries.find? e with | some it => it.next == e | none => true /-- Append to `roots` all the roots of equivalence classes in `ccs`. If `nonsingletonOnly` is true, we skip all the singleton equivalence classes. -/ def getRoots (ccs : CCState) (roots : Array Expr) (nonsingletonOnly : Bool) : Array Expr := Id.run do let mut roots := roots for (k, n) in ccs.entries do if k == n.root && (!nonsingletonOnly || !ccs.inSingletonEqc k) then roots := roots.push k return roots /-- Check for integrity of the `CCState`. -/ def checkEqc (ccs : CCState) (e : Expr) : Bool := toBool <| Id.run <| OptionT.run do let root := ccs.root e let mut size : Nat := 0 let mut it := e repeat let some itN := ccs.entries.find? it | failure guard (itN.root == root) let mut it₂ := it -- following `target` fields should lead to root repeat let it₂N := ccs.entries.find? it₂ match it₂N.bind Entry.target with | some it₃ => it₂ := it₃ | none => break guard (it₂ == root) it := itN.next size := size + 1 until it == e guard (ccs.entries.find? root |>.any (·.size == size)) /-- Check for integrity of the `CCState`. -/ def checkInvariant (ccs : CCState) : Bool := ccs.entries.all fun k n => k != n.root || checkEqc ccs k def getNumROccs (ccs : CCState) (e : Expr) (inLHS : Bool) : Nat := match ccs.acEntries.find? e with | some ent => (ent.ROccs inLHS).size | none => 0 /-- Search for the AC-variable (`Entry.acVar`) with the least occurrences in the state. -/ def getVarWithLeastOccs (ccs : CCState) (e : ACApps) (inLHS : Bool) : Option Expr := match e with | .apps _ args => Id.run do let mut r := args[0]? let mut numOccs := r.casesOn 0 fun r' => ccs.getNumROccs r' inLHS for hi : i in [1:args.size] do if (args[i]'hi.2) != (args[i - 1]'(Nat.lt_of_le_of_lt (i.sub_le 1) hi.2)) then let currOccs := ccs.getNumROccs (args[i]'hi.2) inLHS if currOccs < numOccs then r := (args[i]'hi.2) numOccs := currOccs return r | .ofExpr e => e def getVarWithLeastLHSOccs (ccs : CCState) (e : ACApps) : Option Expr := ccs.getVarWithLeastOccs e true def getVarWithLeastRHSOccs (ccs : CCState) (e : ACApps) : Option Expr := ccs.getVarWithLeastOccs e false open MessageData /-- Pretty print the entry associated with the given expression. -/ def ppEqc (ccs : CCState) (e : Expr) : MessageData := Id.run do let mut lr : List MessageData := [] let mut it := e repeat let some itN := ccs.entries.find? it | break let mdIt : MessageData := if it.isForall || it.isLambda || it.isLet then paren (ofExpr it) else ofExpr it lr := mdIt :: lr it := itN.next until it == e let l := lr.reverse return bracket "{" (group <| joinSep l (ofFormat ("," ++ .line))) "}" /-- Pretty print the entire cc graph. If the `nonSingleton` argument is set to `true` then singleton equivalence classes will be omitted. -/ def ppEqcs (ccs : CCState) (nonSingleton : Bool := true) : MessageData := let roots := ccs.getRoots #[] nonSingleton let a := roots.map (fun root => ccs.ppEqc root) let l := a.toList bracket "{" (group <| joinSep l (ofFormat ("," ++ .line))) "}" def ppParentOccsAux (ccs : CCState) (e : Expr) : MessageData := match ccs.parents.find? e with | some poccs => let r := ofExpr e ++ ofFormat (.line ++ ":=" ++ .line) let ps := poccs.toList.map fun o => ofExpr o.expr group (r ++ bracket "{" (group <| joinSep ps (ofFormat ("," ++ .line))) "}") | none => ofFormat .nil def ppParentOccs (ccs : CCState) : MessageData := let r := ccs.parents.toList.map fun (k, _) => ccs.ppParentOccsAux k bracket "{" (group <| joinSep r (ofFormat ("," ++ .line))) "}" def ppACDecl (ccs : CCState) (e : Expr) : MessageData := match ccs.acEntries.find? e with | some it => group (ofFormat (s!"x_{it.idx}" ++ .line ++ ":=" ++ .line) ++ ofExpr e) | none => nil def ppACDecls (ccs : CCState) : MessageData := let r := ccs.acEntries.toList.map fun (k, _) => ccs.ppACDecl k bracket "{" (joinSep r (ofFormat ("," ++ .line))) "}" def ppACExpr (ccs : CCState) (e : Expr) : MessageData := if let some it := ccs.acEntries.find? e then s!"x_{it.idx}" else ofExpr e partial def ppACApps (ccs : CCState) : ACApps → MessageData | .apps op args => let r := ofExpr op :: args.toList.map fun arg => ccs.ppACExpr arg sbracket (joinSep r (ofFormat .line)) | .ofExpr e => ccs.ppACExpr e def ppACR (ccs : CCState) : MessageData := let r := ccs.acR.toList.map fun (k, p, _) => group <| ccs.ppACApps k ++ ofFormat (Format.line ++ "--> ") ++ nest 4 (Format.line ++ ccs.ppACApps p) bracket "{" (joinSep r (ofFormat ("," ++ .line))) "}" def ppAC (ccs : CCState) : MessageData := sbracket (ccs.ppACDecls ++ ofFormat ("," ++ .line) ++ ccs.ppACR) end CCState /-- The congruence closure module (optionally) uses a normalizer. The idea is to use it (if available) to normalize auxiliary expressions produced by internal propagation rules (e.g., subsingleton propagator). -/ structure CCNormalizer where normalize : Expr → MetaM Expr attribute [inherit_doc CCNormalizer] CCNormalizer.normalize structure CCPropagationHandler where propagated : Array Expr → MetaM Unit /-- Congruence closure module invokes the following method when a new auxiliary term is created during propagation. -/ newAuxCCTerm : Expr → MetaM Unit /-- `CCStructure` extends `CCState` (which records a set of facts derived by congruence closure) by recording which steps still need to be taken to solve the goal. -/ structure CCStructure extends CCState where /-- Equalities that have been discovered but not processed. -/ todo : Array TodoEntry := #[] /-- AC-equalities that have been discovered but not processed. -/ acTodo : Array ACTodoEntry := #[] normalizer : Option CCNormalizer := none phandler : Option CCPropagationHandler := none cache : CCCongrTheoremCache := ∅ deriving Inhabited end Mathlib.Tactic.CC
Tactic\CC\Lemmas.lean
/- Copyright (c) 2017 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura -/ import Mathlib.Init.Logic /-! Lemmas use by the congruence closure module -/ namespace Mathlib.Tactic.CC theorem iff_eq_of_eq_true_left {a b : Prop} (h : a = True) : (a ↔ b) = b := h.symm ▸ propext true_iff_iff theorem iff_eq_of_eq_true_right {a b : Prop} (h : b = True) : (a ↔ b) = a := h.symm ▸ propext iff_true_iff theorem iff_eq_true_of_eq {a b : Prop} (h : a = b) : (a ↔ b) = True := h ▸ propext (iff_self_iff _) theorem and_eq_of_eq_true_left {a b : Prop} (h : a = True) : (a ∧ b) = b := h.symm ▸ propext (true_and_iff _) theorem and_eq_of_eq_true_right {a b : Prop} (h : b = True) : (a ∧ b) = a := h.symm ▸ propext (and_true_iff _) theorem and_eq_of_eq_false_left {a b : Prop} (h : a = False) : (a ∧ b) = False := h.symm ▸ propext (false_and_iff _) theorem and_eq_of_eq_false_right {a b : Prop} (h : b = False) : (a ∧ b) = False := h.symm ▸ propext (and_false_iff _) theorem and_eq_of_eq {a b : Prop} (h : a = b) : (a ∧ b) = a := h ▸ propext and_self_iff theorem or_eq_of_eq_true_left {a b : Prop} (h : a = True) : (a ∨ b) = True := h.symm ▸ propext (true_or_iff _) theorem or_eq_of_eq_true_right {a b : Prop} (h : b = True) : (a ∨ b) = True := h.symm ▸ propext (or_true_iff _) theorem or_eq_of_eq_false_left {a b : Prop} (h : a = False) : (a ∨ b) = b := h.symm ▸ propext (false_or_iff _) theorem or_eq_of_eq_false_right {a b : Prop} (h : b = False) : (a ∨ b) = a := h.symm ▸ propext (or_false_iff _) theorem or_eq_of_eq {a b : Prop} (h : a = b) : (a ∨ b) = a := h ▸ propext or_self_iff theorem imp_eq_of_eq_true_left {a b : Prop} (h : a = True) : (a → b) = b := h.symm ▸ propext ⟨fun h ↦ h trivial, fun h₁ _ ↦ h₁⟩ theorem imp_eq_of_eq_true_right {a b : Prop} (h : b = True) : (a → b) = True := h.symm ▸ propext ⟨fun _ ↦ trivial, fun h₁ _ ↦ h₁⟩ theorem imp_eq_of_eq_false_left {a b : Prop} (h : a = False) : (a → b) = True := h.symm ▸ propext ⟨fun _ ↦ trivial, fun _ h₂ ↦ False.elim h₂⟩ theorem imp_eq_of_eq_false_right {a b : Prop} (h : b = False) : (a → b) = Not a := h.symm ▸ propext ⟨fun h ↦ h, fun hna ha ↦ hna ha⟩ /- Remark: the congruence closure module will only use the following lemma is `CCConfig.em` is `true`. -/ theorem not_imp_eq_of_eq_false_right {a b : Prop} (h : b = False) : (Not a → b) = a := h.symm ▸ propext (Iff.intro ( fun h' ↦ Classical.byContradiction fun hna ↦ h' hna) fun ha hna ↦ hna ha) theorem imp_eq_true_of_eq {a b : Prop} (h : a = b) : (a → b) = True := h ▸ propext ⟨fun _ ↦ trivial, fun _ ha ↦ ha⟩ theorem not_eq_of_eq_true {a : Prop} (h : a = True) : Not a = False := h.symm ▸ propext not_true theorem not_eq_of_eq_false {a : Prop} (h : a = False) : Not a = True := h.symm ▸ propext not_false_iff theorem false_of_a_eq_not_a {a : Prop} (h : a = Not a) : False := have : Not a := fun ha ↦ absurd ha (Eq.mp h ha) absurd (Eq.mpr h this) this universe u theorem if_eq_of_eq_true {c : Prop} [d : Decidable c] {α : Sort u} (t e : α) (h : c = True) : @ite α c d t e = t := if_pos (of_eq_true h) theorem if_eq_of_eq_false {c : Prop} [d : Decidable c] {α : Sort u} (t e : α) (h : c = False) : @ite α c d t e = e := if_neg (not_of_eq_false h) theorem if_eq_of_eq (c : Prop) [d : Decidable c] {α : Sort u} {t e : α} (h : t = e) : @ite α c d t e = t := match d with | isTrue _ => rfl | isFalse _ => Eq.symm h theorem eq_true_of_and_eq_true_left {a b : Prop} (h : (a ∧ b) = True) : a = True := eq_true (And.left (of_eq_true h)) theorem eq_true_of_and_eq_true_right {a b : Prop} (h : (a ∧ b) = True) : b = True := eq_true (And.right (of_eq_true h)) theorem eq_false_of_or_eq_false_left {a b : Prop} (h : (a ∨ b) = False) : a = False := eq_false fun ha ↦ False.elim (Eq.mp h (Or.inl ha)) theorem eq_false_of_or_eq_false_right {a b : Prop} (h : (a ∨ b) = False) : b = False := eq_false fun hb ↦ False.elim (Eq.mp h (Or.inr hb)) theorem eq_false_of_not_eq_true {a : Prop} (h : Not a = True) : a = False := eq_false fun ha ↦ absurd ha (Eq.mpr h trivial) /- Remark: the congruence closure module will only use the following lemma is `CCConfig.em` is `true`. -/ theorem eq_true_of_not_eq_false {a : Prop} (h : Not a = False) : a = True := eq_true (Classical.byContradiction fun hna ↦ Eq.mp h hna) end Mathlib.Tactic.CC
Tactic\Continuity\Init.lean
/- Copyright (c) 2023 Jannis Limperg. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jannis Limperg -/ import Aesop /-! # Continuity Rule Set This module defines the `Continuous` Aesop rule set which is used by the `continuity` tactic. Aesop rule sets only become visible once the file in which they're declared is imported, so we must put this declaration into its own file. -/ declare_aesop_rule_sets [Continuous]
Tactic\Explode\Datatypes.lean
/- Copyright (c) 2018 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Evgenia Karunus, Kyle Miller -/ import Lean.Util.Trace /-! # Explode command: datatypes This file contains datatypes used by the `#explode` command and their associated methods. -/ open Lean namespace Mathlib.Explode initialize registerTraceClass `explode /-- How to display pipes (`│`) for this entry in the Fitch table . -/ inductive Status where /-- `├` Start intro (top-level) -/ | sintro : Status /-- `Entry.depth` * `│` + `┌` Normal intro -/ | intro : Status /-- `Entry.depth` * `│` + `├` Continuation intro -/ | cintro : Status /-- `Entry.depth` * `│` -/ | lam : Status /-- `Entry.depth` * `│` -/ | reg : Status deriving Inhabited /-- The row in the Fitch table. -/ structure Entry where /-- A type of this expression as a `MessageData`. Make sure to use `addMessageContext`. -/ type : MessageData /-- The row number, starting from `0`. This is set by `Entries.add`. -/ line : Option Nat := none /-- How many `if`s (aka lambda-abstractions) this row is nested under. -/ depth : Nat /-- What `Status` this entry has - this only affects how `│`s are displayed. -/ status : Status /-- What to display in the "theorem applied" column. Make sure to use `addMessageContext` if needed. -/ thm : MessageData /-- Which other lines (aka rows) this row depends on. `none` means that the dependency has been filtered out of the table. -/ deps : List (Option Nat) /-- Whether or not to use this in future deps lists. Generally controlled by the `select` function passed to `explodeCore`. Exception: `∀I` may ignore this for introduced hypotheses. -/ useAsDep : Bool /-- Get the `line` for an `Entry` that has been added to the `Entries` structure. -/ def Entry.line! (entry : Entry) : Nat := entry.line.get! /-- Instead of simply keeping a list of entries (`List Entry`), we create a datatype `Entries` that allows us to compare expressions faster. -/ structure Entries : Type where /-- Allows us to compare `Expr`s fast. -/ s : ExprMap Entry /-- Simple list of `Expr`s. -/ l : List Entry deriving Inhabited /-- Find a row where `Entry.expr` == `e`. -/ def Entries.find? (es : Entries) (e : Expr) : Option Entry := es.s.find? e /-- Length of our entries. -/ def Entries.size (es : Entries) : Nat := es.s.size /-- Add the entry unless it already exists. Sets the `line` field to the next available value. -/ def Entries.add (entries : Entries) (expr : Expr) (entry : Entry) : Entry × Entries := if let some entry' := entries.find? expr then (entry', entries) else let entry := { entry with line := entries.size } (entry, ⟨entries.s.insert expr entry, entry :: entries.l⟩) /-- Add a pre-existing entry to the `ExprMap` for an additional expression. This is used by `let` bindings where `expr` is an fvar. -/ def Entries.addSynonym (entries : Entries) (expr : Expr) (entry : Entry) : Entries := ⟨entries.s.insert expr entry, entries.l⟩
Tactic\Explode\Pretty.lean
/- Copyright (c) 2018 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Evgenia Karunus, Kyle Miller -/ import Lean.Meta.Basic import Mathlib.Tactic.Explode.Datatypes /-! # Explode command: pretty This file contains UI code to render the Fitch table. -/ open Lean namespace Mathlib.Explode /-- Given a list of `MessageData`s, make them of equal length. We need this in order to form columns in our Fitch table. ```lean padRight ["hi", "hello"] = ["hi ", "hello"] ``` -/ def padRight (mds : List MessageData) : MetaM (List MessageData) := do -- 1. Find the max length of the word in a list let mut maxLength := 0 for md in mds do maxLength := max maxLength (← md.toString).length -- 2. Pad all words in a list with " " let pad (md : MessageData) : MetaM MessageData := do let padWidth : Nat := maxLength - (← md.toString).length return md ++ "".pushn ' ' padWidth mds.mapM pad /-- Render a particular row of the Fitch table. -/ def rowToMessageData : List MessageData → List MessageData → List MessageData → List Entry → MetaM MessageData | line :: lines, dep :: deps, thm :: thms, en :: es => do let pipes := String.join (List.replicate en.depth "│ ") let pipes := match en.status with | Status.sintro => s!"├ " | Status.intro => s!"│ {pipes}┌ " | Status.cintro => s!"│ {pipes}├ " | Status.lam => s!"│ {pipes}" | Status.reg => s!"│ {pipes}" let row := m!"{line}│{dep}│ {thm} {pipes}{en.type}\n" return (← rowToMessageData lines deps thms es).compose row | _, _, _, _ => return MessageData.nil /-- Given all `Entries`, return the entire Fitch table. -/ def entriesToMessageData (entries : Entries) : MetaM MessageData := do -- ['1', '2', '3'] let paddedLines ← padRight <| entries.l.map fun entry => m!"{entry.line!}" -- [' ', '1,2', '1 '] let paddedDeps ← padRight <| entries.l.map fun entry => String.intercalate "," <| entry.deps.map (fun dep => (dep.map toString).getD "_") -- ['p ', 'hP ', '∀I '] let paddedThms ← padRight <| entries.l.map (·.thm) rowToMessageData paddedLines paddedDeps paddedThms entries.l
Tactic\FunProp\AEMeasurable.lean
/- Copyright (c) 2024 Tomáš Skřivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomáš Skřivan -/ import Mathlib.MeasureTheory.Measure.AEMeasurable /-! ## `fun_prop` minimal setup for AEMeasurable -/ open Mathlib -- lambda rules: these two are currently missing -- attribute [fun_prop] AEMeasurable_apply AEMeasurable_pi
Tactic\FunProp\Attr.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean import Mathlib.Tactic.FunProp.Decl import Mathlib.Tactic.FunProp.Theorems /-! ## `funProp` attribute -/ namespace Mathlib open Lean Meta namespace Meta.FunProp -- TODO: add support for specifying priority and discharger -- open Lean.Parser.Tactic -- syntax (name:=Attr.funProp) "funProp" (prio)? (discharger)? : attr private def funPropHelpString : String := "`funProp` tactic to prove function properties like `Continuous`, `Differentiable`, `IsLinearMap`" /-- Initialization of `funProp` attribute -/ initialize funPropAttr : Unit ← registerBuiltinAttribute { name := `fun_prop descr := funPropHelpString applicationTime := AttributeApplicationTime.afterCompilation add := fun declName _stx attrKind => discard <| MetaM.run do let info ← getConstInfo declName forallTelescope info.type fun _ b => do if b.isProp then addFunPropDecl declName else addTheorem declName attrKind erase := fun _declName => throwError "can't remove `funProp` attribute (not implemented yet)" }
Tactic\FunProp\ContDiff.lean
/- Copyright (c) 2024 Tomáš Skřivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomáš Skřivan -/ import Mathlib.Analysis.Calculus.IteratedDeriv.Lemmas import Mathlib.Analysis.SpecialFunctions.ExpDeriv import Mathlib.Analysis.SpecialFunctions.Log.Deriv import Mathlib.Tactic.FunProp import Mathlib.Tactic.FunProp.Differentiable /-! ## `funProp` minimal setup for ContDiff(At/On) -/ section Missing section lambda_rules variable {K : Type*} [NontriviallyNormedField K] variable {E : Type*} [NormedAddCommGroup E] [NormedSpace K E] variable {F : Type*} [NormedAddCommGroup F] [NormedSpace K F] variable {G : Type*} [NormedAddCommGroup G] [NormedSpace K G] variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace K G'] variable {f f₀ f₁ g : E → F} {x} {s t} {n} theorem contDiff_id' : ContDiff K n (fun x : E => x) := contDiff_id theorem contDiffAt_id' : ContDiffAt K n (fun x : E => x) x := contDiffAt_id theorem contDiffOn_id' : ContDiffOn K n (fun x : E => x) s := contDiff_id.contDiffOn theorem ContDiff.comp' {g : F → G} (hg : ContDiff K n g) (hf : ContDiff K n f) : ContDiff K n (fun x => g (f x)) := ContDiff.comp hg hf theorem ContDiffAt.comp' {f : E → F} {g : F → G} (hg : ContDiffAt K n g (f x)) (hf : ContDiffAt K n f x) : ContDiffAt K n (fun x => g (f x)) x := ContDiffAt.comp x hg hf -- theorem ContDiffOn.comp'' {g : F → G} {t : Set F} (hg : ContDiffOn K n g t) -- (hf : ContDiffOn K n f s) (st : Set.MapsTo f s t) : ContDiffOn K n (fun x => g (f x)) s := variable {ι ι' : Type*} [Fintype ι] [Fintype ι'] {F' : ι → Type*} [∀ i, NormedAddCommGroup (F' i)] [∀ i, NormedSpace K (F' i)] {φ : ∀ i, E → F' i} {Φ : E → ∀ i, F' i} theorem contDiff_pi' (hΦ : ∀ i, ContDiff K n fun x => Φ x i) : ContDiff K n Φ := contDiff_pi.2 hΦ theorem contDiffOn_pi' (hΦ : ∀ i, ContDiffOn K n (fun x => Φ x i) s) : ContDiffOn K n Φ s := contDiffOn_pi.2 hΦ theorem contDiffAt_pi' (hΦ : ∀ i, ContDiffAt K n (fun x => Φ x i) x) : ContDiffAt K n Φ x := contDiffAt_pi.2 hΦ end lambda_rules section div variable {K : Type*} [NontriviallyNormedField K] variable {E : Type*} [NormedAddCommGroup E] [NormedSpace K E] variable {F : Type*} [NormedAddCommGroup F] [NormedSpace K F] variable {f f₀ f₁ g : E → F} {x} {s t} {n} theorem ContDiffOn.div' [CompleteSpace K] {f g : E → K} {n} (hf : ContDiffOn K n f s) (hg : ContDiffOn K n g s) (h₀ : ∀ x ∈ s, g x ≠ 0) : ContDiffOn K n (fun x => f x / g x) s := ContDiffOn.div hf hg h₀ end div section deriv variable {K : Type*} [NontriviallyNormedField K] variable {E : Type*} [NormedAddCommGroup E] [NormedSpace K E] variable {F : Type*} [NormedAddCommGroup F] [NormedSpace K F] /-- Original version `ContDiff.differentiable_iteratedDeriv` introduces a new variable `(n:ℕ∞)` and `funProp` can't work with such theorem. The theorem should be state where `n` is explicitly the smallest possible value i.e. `n=m+1`. In conjunction with `ContDiff.of_le` we can recover the full power of the original theorem. -/ theorem ContDiff.differentiable_iteratedDeriv' {m : ℕ} {f : K → F} (hf : ContDiff K (m+1) f) : Differentiable K (iteratedDeriv m f) := ContDiff.differentiable_iteratedDeriv m hf (Nat.cast_lt.mpr m.lt_succ_self) end deriv end Missing -- mark definition attribute [fun_prop] ContDiff ContDiffAt ContDiffOn -- lambda rules attribute [fun_prop] contDiff_id' contDiff_const ContDiff.comp' contDiff_apply contDiff_pi' contDiffAt_id' contDiffAt_const ContDiffAt.comp' -- contDiffAt_apply -- missing contDiffAt_pi' contDiffOn_id' contDiffOn_const ContDiffOn.comp' -- contDiffOn_apply -- missing contDiffOn_pi' -- product attribute [fun_prop] ContDiff.prod ContDiff.fst ContDiff.snd ContDiffAt.prod ContDiffAt.fst ContDiffAt.snd ContDiffOn.prod ContDiffOn.fst ContDiffOn.snd -- transitions attribute [fun_prop] ContDiff.contDiffAt ContDiff.contDiffOn ContDiffAt.differentiableAt ContDiffOn.differentiableOn ContDiffAt.continuousAt ContDiffOn.continuousOn ContDiff.of_le -- algebra attribute [fun_prop] ContDiff.add ContDiff.sub ContDiff.neg ContDiff.mul ContDiff.smul ContDiff.div ContDiff.inv ContDiffAt.add ContDiffAt.sub ContDiffAt.neg ContDiffAt.mul ContDiffAt.smul ContDiffAt.div ContDiffAt.inv ContDiffOn.add ContDiffOn.sub ContDiffOn.neg ContDiffOn.mul ContDiffOn.smul ContDiffOn.div' ContDiffOn.inv -- special function attribute [fun_prop] ContDiff.exp ContDiff.log ContDiff.pow ContDiffAt.exp ContDiffAt.log ContDiffAt.pow ContDiffOn.exp ContDiffOn.log ContDiffOn.pow ContDiff.differentiable_iteratedDeriv'
Tactic\FunProp\Core.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean import Mathlib.Tactic.FunProp.Theorems import Mathlib.Tactic.FunProp.ToBatteries import Mathlib.Tactic.FunProp.Types import Mathlib.Lean.Expr.Basic import Batteries.Tactic.Exact /-! ## `funProp` core tactic algorithm -/ namespace Mathlib open Lean Meta Qq namespace Meta.FunProp /-- Synthesize instance of type `type` and 1. assign it to `x` if `x` is meta variable 2. check it is equal to `x` -/ def synthesizeInstance (thmId : Origin) (x type : Expr) : MetaM Bool := do match (← trySynthInstance type) with | LOption.some val => if (← withReducibleAndInstances <| isDefEq x val) then return true else trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to assign instance{indentExpr type} sythesized value{indentExpr val}\nis not definitionally equal to{indentExpr x}" return false | _ => trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to synthesize instance{indentExpr type}" return false /-- Synthesize arguments `xs` either with typeclass synthesis, with funProp or with discharger. -/ def synthesizeArgs (thmId : Origin) (xs : Array Expr) (bis : Array BinderInfo) (funProp : Expr → FunPropM (Option Result)) : FunPropM Bool := do let mut postponed : Array Expr := #[] for x in xs, bi in bis do let type ← inferType x if bi.isInstImplicit then unless (← synthesizeInstance thmId x type) do logError s!"Failed to synthesize instance {← ppExpr type} \ when applying theorem {← ppOrigin' thmId}." return false else if (← instantiateMVars x).isMVar then -- try type class if (← isClass? type).isSome then if (← synthesizeInstance thmId x type) then continue -- try function property if (← isFunProp type.getForallBody) then if let .some ⟨proof⟩ ← funProp type then if (← isDefEq x proof) then continue else do trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to assign proof{indentExpr type}" return false else -- try user provided discharger let cfg : Config ← read if (← isProp type) then if let .some proof ← cfg.disch type then if (← isDefEq x proof) then continue else do trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to assign proof{indentExpr type}" return false else logError s!"Failed to prove necessary assumption {← ppExpr type} \ when applying theorem {← ppOrigin' thmId}." if ¬(← isProp type) then postponed := postponed.push x continue else trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to discharge hypotheses{indentExpr type}" return false for x in postponed do if (← instantiateMVars x).isMVar then logError s!"Failed to infer `({← ppExpr x} : {← ppExpr (← inferType x)})` \ when applying theorem {← ppOrigin' thmId}." trace[Meta.Tactic.fun_prop] "{← ppOrigin thmId}, failed to infer `({← ppExpr x} : {← ppExpr (← inferType x)})`" return false return true /-- Try to apply theorem - core function -/ def tryTheoremCore (xs : Array Expr) (bis : Array BinderInfo) (val : Expr) (type : Expr) (e : Expr) (thmId : Origin) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do withTraceNode `Meta.Tactic.fun_prop (fun r => return s!"[{ExceptToEmoji.toEmoji r}] applying: {← ppOrigin' thmId}") do -- add theorem to the stack withTheorem thmId do if (← isDefEq type e) then if ¬(← synthesizeArgs thmId xs bis funProp) then return none let proof ← instantiateMVars (mkAppN val xs) trace[Meta.Tactic.fun_prop.apply] "{← ppOrigin thmId}, \n{e}" return .some { proof := proof } else trace[Meta.Tactic.fun_prop] "failed to unify {← ppOrigin thmId}\n{type}\nwith\n{e}" return none /-- Try to apply a theorem provided some of the theorem arguments. -/ def tryTheoremWithHint? (e : Expr) (thmOrigin : Origin) (hint : Array (Nat×Expr)) (funProp : Expr → FunPropM (Option Result)) (newMCtxDepth : Bool := false) : FunPropM (Option Result) := do let go : FunPropM (Option Result) := do let thmProof ← thmOrigin.getValue let type ← inferType thmProof let (xs, bis, type) ← forallMetaTelescope type for (i,x) in hint do try for (id,v) in hint do xs[id]!.mvarId!.assignIfDefeq v catch _ => trace[Meta.Tactic.fun_trans] "failed to use hint {i} `{← ppExpr x} when applying theorem {← ppOrigin thmOrigin}" tryTheoremCore xs bis thmProof type e thmOrigin funProp if newMCtxDepth then withNewMCtxDepth go else go /-- Try to apply a theorem -/ def tryTheorem? (e : Expr) (thmOrigin : Origin) (funProp : Expr → FunPropM (Option Result)) (newMCtxDepth : Bool := false) : FunPropM (Option Result) := tryTheoremWithHint? e thmOrigin #[] funProp newMCtxDepth /-- Apply lambda calculus rule P fun x => x` -/ def applyIdRule (funPropDecl : FunPropDecl) (e X : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := lambdaTheoremsExt.getState (← getEnv) let .some thm := ext.theorems.find? (funPropDecl.funPropName, .id) | trace[Meta.Tactic.fun_prop] "missing identity rule to prove `{← ppExpr e}`" return none let .id id_X := thm.thmArgs | return none tryTheoremWithHint? e (.decl thm.thmName) #[(id_X,X)] funProp /-- Apply lambda calculus rule P fun x => y` -/ def applyConstRule (funPropDecl : FunPropDecl) (e X y : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := lambdaTheoremsExt.getState (← getEnv) let .some thm := ext.theorems.find? (funPropDecl.funPropName, .const) | trace[Meta.Tactic.fun_prop] "missing constant rule to prove `{← ppExpr e}`" return none let .const id_X id_y := thm.thmArgs | return none tryTheoremWithHint? e (.decl thm.thmName) #[(id_X,X),(id_y,y)] funProp /-- Apply lambda calculus rule P fun f => f i` -/ def applyProjRule (funPropDecl : FunPropDecl) (e x XY : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := lambdaTheoremsExt.getState (← getEnv) let .forallE n X Y _ := XY | return none -- non dependent case if ¬(Y.hasLooseBVars) then if let .some thm := ext.theorems.find? (funPropDecl.funPropName, .proj) then let .proj id_x id_Y := thm.thmArgs | return none return ← tryTheoremWithHint? e (.decl thm.thmName) #[(id_x,x),(id_Y,Y)] funProp -- dependent case -- can also handle non-dependent cases if non-dependent theorem is not available let Y := Expr.lam n X Y default let .some thm := ext.theorems.find? (funPropDecl.funPropName, .projDep) | trace[Meta.Tactic.fun_prop] "missing projection rule to prove `{← ppExpr e}`" return none let .projDep id_x id_Y := thm.thmArgs | return none tryTheoremWithHint? e (.decl thm.thmName) #[(id_x,x),(id_Y,Y)] funProp /-- Apply lambda calculus rule `P f → P g → P fun x => f (g x)` -/ def applyCompRule (funPropDecl : FunPropDecl) (e f g : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := lambdaTheoremsExt.getState (← getEnv) let .some thm := ext.theorems.find? (funPropDecl.funPropName, .comp) | trace[Meta.Tactic.fun_prop] "missing composition rule to prove `{← ppExpr e}`" return none let .comp id_f id_g := thm.thmArgs | return none tryTheoremWithHint? e (.decl thm.thmName) #[(id_f,f),(id_g,g)] funProp /-- Apply lambda calculus rule `∀ y, P (f · y) → P fun x y => f x y` -/ def applyPiRule (funPropDecl : FunPropDecl) (e f : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := lambdaTheoremsExt.getState (← getEnv) let .some thm := ext.theorems.find? (funPropDecl.funPropName, .pi) | trace[Meta.Tactic.fun_prop] "missing pi rule to prove `{← ppExpr e}`" return none let .pi id_f := thm.thmArgs | return none tryTheoremWithHint? e (.decl thm.thmName) #[(id_f,f)] funProp /-- Prove function property of `fun x => let y := g x; f x y`. -/ def letCase (funPropDecl : FunPropDecl) (e : Expr) (f : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do match f with | .lam xName xType (.letE yName yType yValue yBody _) xBi => do let yType := yType.consumeMData let yValue := yValue.consumeMData let yBody := yBody.consumeMData -- We perform reduction because the type is quite often of the form -- `(fun x => Y) #0` which is just `Y` -- Usually this is caused by the usage of `FunLike` let yType := yType.headBeta if (yType.hasLooseBVar 0) then throwError "dependent type encountered {← ppExpr (Expr.forallE xName xType yType default)}" -- let binding can be pulled out of the lambda function if ¬(yValue.hasLooseBVar 0) then let body := yBody.swapBVars 0 1 let e' := .letE yName yType yValue (nonDep := false) (e.setArg (funPropDecl.funArgId) (.lam xName xType body xBi)) return ← funProp e' match (yBody.hasLooseBVar 0), (yBody.hasLooseBVar 1) with | true, true => let f ← mkUncurryFun 2 (Expr.lam xName xType (.lam yName yType yBody default) xBi) let g := Expr.lam xName xType (binderInfo := default) (mkAppN (← mkConstWithFreshMVarLevels ``Prod.mk) #[xType,yType,.bvar 0, yValue]) applyCompRule funPropDecl e f g funProp | true, false => let f := Expr.lam yName yType yBody default let g := Expr.lam xName xType yValue default applyCompRule funPropDecl e f g funProp | false, _ => let f := Expr.lam xName xType (yBody.lowerLooseBVars 1 1) xBi funProp (e.setArg (funPropDecl.funArgId) f) | _ => throwError "expected expression of the form `fun x => lam y := ..; ..`" /-- Prove function property of using "morphism theorems" e.g. bundled linear map is linear map. -/ def applyMorRules (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do trace[Meta.Tactic.fun_prop.step] "applying morphism theoresm to {← ppExpr e}" match ← fData.isMorApplication with | .none => throwError "fun_prop bug: ivalid use of mor rules on {← ppExpr e}" | .underApplied => applyPiRule funPropDecl e (← fData.toExpr) funProp | .overApplied => let .some (f,g) ← fData.peeloffArgDecomposition | return none applyCompRule funPropDecl e f g funProp | .exact => let ext := morTheoremsExt.getState (← getEnv) let candidates ← ext.theorems.getMatchWithScore e false { iota := false, zeta := false } let candidates := candidates.map (·.1) |>.flatten trace[Meta.Tactic.fun_prop] "candidate morphism theorems: {← candidates.mapM fun c => ppOrigin (.decl c.thmName)}" for c in candidates do if let .some r ← tryTheorem? e (.decl c.thmName) funProp then return r trace[Meta.Tactic.fun_prop.step] "no theorem matched" return none /-- Prove function property of using "transition theorems" e.g. continuity from linearity. -/ def applyTransitionRules (e : Expr) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let ext := transitionTheoremsExt.getState (← getEnv) let candidates ← ext.theorems.getMatchWithScore e false { iota := false, zeta := false } let candidates := candidates.map (·.1) |>.flatten trace[Meta.Tactic.fun_prop] "candidate transition theorems: {← candidates.mapM fun c => ppOrigin (.decl c.thmName)}" for c in candidates do if ← previouslyUsedThm (.decl c.thmName) then trace[Meta.Tactic.fun_prop] "skipping {c.thmName} to prevent potential infinite loop" else if let .some r ← tryTheorem? e (.decl c.thmName) funProp then return r trace[Meta.Tactic.fun_prop.step] "no theorem matched" return none /-- Try to remove applied argument. -/ def removeArgRule (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do match fData.args.size with | 0 => throwError "fun_prop bug: invalid use of remove arg case {←ppExpr e}" | _ => let n := fData.args.size let arg := fData.args[n-1]! if arg.coe.isSome then -- if have to apply morphisms rules if we deal with morphims return ← applyMorRules funPropDecl e fData funProp else let .some (f,g) ← fData.peeloffArgDecomposition | return none applyCompRule funPropDecl e f g funProp /-- Prove function property of `fun f => f x₁ ... xₙ`. -/ def bvarAppCase (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do if (← fData.isMorApplication) != .none then applyMorRules funPropDecl e fData funProp else if let .some (f, g) ← fData.nontrivialDecomposition then applyCompRule funPropDecl e f g funProp else applyProjRule funPropDecl e fData.args[0]!.expr (← fData.domainType) funProp /-- Get candidate theorems from the environment for function property `funPropDecl` and function `funName`. -/ def getDeclTheorems (funPropDecl : FunPropDecl) (funName : Name) (mainArgs : Array Nat) (appliedArgs : Nat) : MetaM (Array FunctionTheorem) := do let thms ← getTheoremsForFunction funName funPropDecl.funPropName let thms := thms |>.filter (fun thm => (isOrderedSubsetOf mainArgs thm.mainArgs)) |>.qsort (fun t s => let dt := (Int.ofNat t.appliedArgs - Int.ofNat appliedArgs).natAbs let ds := (Int.ofNat s.appliedArgs - Int.ofNat appliedArgs).natAbs match compare dt ds with | .lt => true | .gt => false | .eq => t.mainArgs.size < s.mainArgs.size) -- todo: sorting and filtering return thms /-- Get candidate theorems from the local context for function property `funPropDecl` and function `funName`. -/ def getLocalTheorems (funPropDecl : FunPropDecl) (funOrigin : Origin) (mainArgs : Array Nat) (appliedArgs : Nat) : FunPropM (Array FunctionTheorem) := do let mut thms : Array FunctionTheorem := #[] let lctx ← getLCtx for var in lctx do if (var.kind = Lean.LocalDeclKind.auxDecl) then continue let type ← instantiateMVars var.type let thm? : Option FunctionTheorem ← forallTelescope type fun _ b => do let b ← whnfR b let .some (decl,f) ← getFunProp? b | return none unless decl.funPropName = funPropDecl.funPropName do return none let .data fData ← getFunctionData? f (← unfoldNamePred) {zeta := false} | return none unless (fData.getFnOrigin == funOrigin) do return none unless isOrderedSubsetOf mainArgs fData.mainArgs do return none let dec? ← fData.nontrivialDecomposition let thm : FunctionTheorem := { funPropName := funPropDecl.funPropName thmOrigin := .fvar var.fvarId funOrigin := funOrigin mainArgs := fData.mainArgs appliedArgs := fData.args.size priority := eval_prio default form := if dec?.isSome then .comp else .uncurried } return .some thm if let .some thm := thm? then thms := thms.push thm thms := thms |>.qsort (fun t s => let dt := (Int.ofNat t.appliedArgs - Int.ofNat appliedArgs).natAbs let ds := (Int.ofNat s.appliedArgs - Int.ofNat appliedArgs).natAbs match compare dt ds with | .lt => true | .gt => false | .eq => t.mainArgs.size < s.mainArgs.size) return thms /-- Try to apply theorems `thms` to `e` -/ def tryTheorems (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (thms : Array FunctionTheorem) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do -- none - decomposition not tried -- some none - decomposition failed -- some some (f, g) - successful decomposition let mut dec? : Option (Option (Expr × Expr)) := none for thm in thms do trace[Meta.Tactic.fun_prop.step] s!"trying theorem {← ppOrigin' thm.thmOrigin}" match compare thm.appliedArgs fData.args.size with | .lt => trace[Meta.Tactic.fun_prop] s!"removing argument to later use {← ppOrigin' thm.thmOrigin}" if let .some r ← removeArgRule funPropDecl e fData funProp then return r continue | .gt => trace[Meta.Tactic.fun_prop] s!"adding argument to later use {← ppOrigin' thm.thmOrigin}" if let .some r ← applyPiRule funPropDecl e (← fData.toExpr) funProp then return r continue | .eq => if thm.form == .comp then if let .some r ← tryTheorem? e thm.thmOrigin funProp then return r else if thm.mainArgs.size == fData.mainArgs.size then if dec?.isNone then dec? := .some (← fData.nontrivialDecomposition) match dec? with | .some .none => if let .some r ← tryTheorem? e thm.thmOrigin funProp then return r | .some (.some (f,g)) => trace[Meta.Tactic.fun_prop.step] s!"decomposing to later use {←ppOrigin' thm.thmOrigin}" trace[Meta.Tactic.fun_prop.step] s!"decomposition: {← ppExpr f} ∘ {← ppExpr g}" if let .some r ← applyCompRule funPropDecl e f g funProp then return r | _ => continue else trace[Meta.Tactic.fun_prop.step] s!"decomposing in args {thm.mainArgs} to later use {←ppOrigin' thm.thmOrigin}" let .some (f,g) ← fData.decompositionOverArgs thm.mainArgs | continue trace[Meta.Tactic.fun_prop.step] s!"decomposition: {← ppExpr f} ∘ {← ppExpr g}" if let .some r ← applyCompRule funPropDecl e f g funProp then return r -- todo: decompose if uncurried and arguments do not match exactly return none /-- Prove function property of `fun x => f x₁ ... xₙ` where `f` is free variable. -/ def fvarAppCase (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do -- fvar theorems are almost exclusively in uncurried form so we decompose if we can if let .some (f,g) ← fData.nontrivialDecomposition then applyCompRule funPropDecl e f g funProp else let .fvar id := fData.fn | throwError "fun_prop bug: invalid use of fvar app case" let thms ← getLocalTheorems funPropDecl (.fvar id) fData.mainArgs fData.args.size trace[Meta.Tactic.fun_prop] s!"candidate local theorems for {←ppExpr (.fvar id)} \ {← thms.mapM fun thm => ppOrigin' thm.thmOrigin}" if let .some r ← tryTheorems funPropDecl e fData thms funProp then return r if let .some f ← fData.unfoldHeadFVar? then let e' := e.setArg funPropDecl.funArgId f if let .some r ← funProp e' then return r if (← fData.isMorApplication) != .none then if let .some r ← applyMorRules funPropDecl e fData funProp then return r if (← fData.nontrivialDecomposition).isNone then if let .some r ← applyTransitionRules e funProp then return r if thms.size = 0 then logError s!"No theorems found for `{← ppExpr (.fvar id)}` in order to prove {← ppExpr e}" return none /-- Prove function property of `fun x => f x₁ ... xₙ` where `f` is declared function. -/ def constAppCase (funPropDecl : FunPropDecl) (e : Expr) (fData : FunctionData) (funProp : Expr → FunPropM (Option Result)) : FunPropM (Option Result) := do let .some (funName,_) := fData.fn.const? | throwError "fun_prop bug: invelid use of const app case" let globalThms ← getDeclTheorems funPropDecl funName fData.mainArgs fData.args.size trace[Meta.Tactic.fun_prop] s!"candidate theorems for {funName} {← globalThms.mapM fun thm => ppOrigin' thm.thmOrigin}" if let .some r ← tryTheorems funPropDecl e fData globalThms funProp then return r -- Try local theorems - this is useful for recursive functions let localThms ← getLocalTheorems funPropDecl (.decl funName) fData.mainArgs fData.args.size if localThms.size ≠ 0 then trace[Meta.Tactic.fun_prop] s!"candidate local theorems for {funName} \ {← localThms.mapM fun thm => ppOrigin' thm.thmOrigin}" if let .some r ← tryTheorems funPropDecl e fData localThms funProp then return r if (← fData.isMorApplication) != .none then if let .some r ← applyMorRules funPropDecl e fData funProp then return r if let .some (f,g) ← fData.nontrivialDecomposition then if let .some r ← applyCompRule funPropDecl e f g funProp then return r else if let .some r ← applyTransitionRules e funProp then return r if globalThms.size = 0 && localThms.size = 0 then logError s!"No theorems found for `{funName}` in order to prove {← ppExpr e}" return none /-- Cache result if it does not have any subgoals. -/ def cacheResult (e : Expr) (r : Result) : FunPropM Result := do -- return proof? modify (fun s => { s with cache := s.cache.insert e { expr := q(True), proof? := r.proof} }) return r mutual /-- Main `funProp` function. Returns proof of `e`. -/ partial def funProp (e : Expr) : FunPropM (Option Result) := do -- check cache if let .some { expr := _, proof? := .some proof } := (← get).cache.find? e then trace[Meta.Tactic.fun_prop.cache] "cached result for {e}" return .some { proof := proof } else -- take care of forall and let binders and run main match e with | .letE .. => letTelescope e fun xs b => do let .some r ← funProp b | return none cacheResult e {proof := ← mkLambdaFVars xs r.proof } | .forallE .. => forallTelescope e fun xs b => do let .some r ← funProp b | return none cacheResult e {proof := ← mkLambdaFVars xs r.proof } | .mdata _ e' => funProp e' | .mvar _ => instantiateMVars e >>= funProp | _ => let .some r ← main e | return none cacheResult e r /-- Main `funProp` function. Returns proof of `e`. -/ private partial def main (e : Expr) : FunPropM (Option Result) := do let .some (funPropDecl, f) ← getFunProp? e | return none increaseSteps withTraceNode `Meta.Tactic.fun_prop (fun r => do pure s!"[{ExceptToEmoji.toEmoji r}] {← ppExpr e}") do -- if function starts with let bindings move them the top of `e` and try -- again if f.isLet then return ← letTelescope f fun xs b => do let e' := e.setArg funPropDecl.funArgId b funProp (← mkLambdaFVars xs e') match ← getFunctionData? f (← unfoldNamePred) {zeta := false} with | .letE f => trace[Meta.Tactic.fun_prop.step] "let case on {← ppExpr f}" let e := e.setArg funPropDecl.funArgId f -- update e with reduced f letCase funPropDecl e f funProp | .lam f => trace[Meta.Tactic.fun_prop.step] "pi case on {← ppExpr f}" let e := e.setArg funPropDecl.funArgId f -- update e with reduced f applyPiRule funPropDecl e f funProp | .data fData => let e := e.setArg funPropDecl.funArgId (← fData.toExpr) -- update e with reduced f if fData.isIdentityFun then applyIdRule funPropDecl e (← fData.domainType) funProp else if fData.isConstantFun then applyConstRule funPropDecl e (← fData.domainType) (Mor.mkAppN fData.fn fData.args) funProp else match fData.fn with | .fvar id => if id == fData.mainVar.fvarId! then bvarAppCase funPropDecl e fData funProp else fvarAppCase funPropDecl e fData funProp | .mvar .. => funProp (← instantiateMVars e) | .const .. | .proj .. => do constAppCase funPropDecl e fData funProp | _ => trace[Meta.Tactic.fun_prop.step] "unknown case, ctor: {f.ctorName}\n{e}" return none end
Tactic\FunProp\Decl.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean /-! ## `funProp` environment extension that stores all registered function properties -/ namespace Mathlib open Lean Meta namespace Meta.FunProp /-- Basic information about function property To use `funProp` to prove a function property `P : (α→β)→Prop` one has to provide `FunPropDecl`. -/ structure FunPropDecl where /-- function transformation name -/ funPropName : Name /-- path for discrimination tree -/ path : Array DiscrTree.Key /-- argument index of a function this function property talks about. For example, this would be 4 for `@Continuous α β _ _ f` -/ funArgId : Nat deriving Inhabited, BEq /-- Discrimination tree for function properties. -/ structure FunPropDecls where /-- Discrimination tree for function properties. -/ decls : DiscrTree FunPropDecl := {} deriving Inhabited /-- -/ abbrev FunPropDeclsExt := SimpleScopedEnvExtension FunPropDecl FunPropDecls /-- Extension storing function properties tracked and used by the `fun_prop` attribute and tactic, including, for example, `Continuous`, `Measurable`, `Differentiable`, etc. -/ initialize funPropDeclsExt : FunPropDeclsExt ← registerSimpleScopedEnvExtension { name := by exact decl_name% initial := {} addEntry := fun d e => {d with decls := d.decls.insertCore e.path e} } /-- Register new function property. -/ def addFunPropDecl (declName : Name) : MetaM Unit := do let info ← getConstInfo declName let (xs,bi,b) ← forallMetaTelescope info.type if ¬b.isProp then throwError "invalid fun_prop declaration, has to be `Prop` valued function" let lvls := info.levelParams.map (fun l => Level.param l) let e := mkAppN (.const declName lvls) xs let path ← DiscrTree.mkPath e {} -- find the argument position of the function `f` in `P f` let mut .some funArgId ← (xs.zip bi).findIdxM? fun (x,bi) => do if (← inferType x).isForall && bi.isExplicit then return true else return false | throwError "invalid fun_prop declaration, can't find argument of type `α → β`" let decl : FunPropDecl := { funPropName := declName path := path funArgId := funArgId } modifyEnv fun env => funPropDeclsExt.addEntry env decl trace[Meta.Tactic.funProp.attr] "added new function property `{declName}`\nlook up pattern is `{path}`" /-- Is `e` a function property statement? If yes return function property declaration and the function it talks about. -/ def getFunProp? (e : Expr) : MetaM (Option (FunPropDecl × Expr)) := do let ext := funPropDeclsExt.getState (← getEnv) let decls ← ext.decls.getMatch e {} if decls.size = 0 then return none if decls.size > 1 then throwError "\ fun_prop bug: expression {← ppExpr e} matches multiple function properties {decls.map (fun d => d.funPropName)}" let decl := decls[0]! let f := e.getArg! decl.funArgId return (decl,f) /-- Is `e` a function property statement? -/ def isFunProp (e : Expr) : MetaM Bool := do return (← getFunProp? e).isSome /-- Returns function property declaration from `e = P f`. -/ def getFunPropDecl? (e : Expr) : MetaM (Option FunPropDecl) := do match ← getFunProp? e with | .some (decl,_) => return decl | .none => return none /-- Returns function `f` from `e = P f` and `P` is function property. -/ def getFunPropFun? (e : Expr) : MetaM (Option Expr) := do match ← getFunProp? e with | .some (_,f) => return f | .none => return none open Elab Term in /-- Turn tactic syntax into a discharger function. -/ def tacticToDischarge (tacticCode : TSyntax `tactic) : Expr → MetaM (Option Expr) := fun e => withTraceNode `Meta.Tactic.fun_prop (fun r => do pure s!"[{ExceptToEmoji.toEmoji r}] discharging: {← ppExpr e}") do let mvar ← mkFreshExprSyntheticOpaqueMVar e `funProp.discharger let runTac? : TermElabM (Option Expr) := try withoutModifyingStateWithInfoAndMessages do instantiateMVarDeclMVars mvar.mvarId! let _ ← withSynthesize (postpone := .no) do Tactic.run mvar.mvarId! (Tactic.evalTactic tacticCode *> Tactic.pruneSolvedGoals) let result ← instantiateMVars mvar if result.hasExprMVar then return none else return some result catch _ => return none let (result?, _) ← runTac?.run {} {} return result?
Tactic\FunProp\Differentiable.lean
/- Copyright (c) 2024 Tomáš Skřivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomáš Skřivan -/ import Mathlib.Analysis.Calculus.FDeriv.Basic import Mathlib.Analysis.Calculus.FDeriv.Comp import Mathlib.Analysis.Calculus.FDeriv.Prod import Mathlib.Analysis.Calculus.FDeriv.Pi import Mathlib.Analysis.Calculus.FDeriv.Add import Mathlib.Analysis.Calculus.FDeriv.Mul import Mathlib.Analysis.Calculus.Deriv.Inv import Mathlib.Analysis.SpecialFunctions.ExpDeriv import Mathlib.Analysis.SpecialFunctions.Log.Deriv import Mathlib.Tactic.FunProp /-! ## `funProp` minimal setup for Differentiable(At/On) -/ section Missing section lambda_rules variable {K : Type*} [NontriviallyNormedField K] variable {E : Type*} [NormedAddCommGroup E] [NormedSpace K E] variable {F : Type*} [NormedAddCommGroup F] [NormedSpace K F] variable {G : Type*} [NormedAddCommGroup G] [NormedSpace K G] variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace K G'] variable {f f₀ f₁ g : E → F} {x} {s t} theorem differentiableOn_id' : DifferentiableOn K (fun x : E => x) s := differentiable_id.differentiableOn theorem Differentiable.comp' {g : F → G} (hg : Differentiable K g) (hf : Differentiable K f) : Differentiable K (fun x => g (f x)) := fun x => DifferentiableAt.comp x (hg (f x)) (hf x) theorem DifferentiableAt.comp' {f : E → F} {g : F → G} (hg : DifferentiableAt K g (f x)) (hf : DifferentiableAt K f x) : DifferentiableAt K (fun x => g (f x)) x := (hg.hasFDerivAt.comp x hf.hasFDerivAt).differentiableAt theorem DifferentiableOn.comp' {g : F → G} {t : Set F} (hg : DifferentiableOn K g t) (hf : DifferentiableOn K f s) (st : Set.MapsTo f s t) : DifferentiableOn K (fun x => g (f x)) s := fun x hx => DifferentiableWithinAt.comp x (hg (f x) (st hx)) (hf x hx) st end lambda_rules end Missing -- mark definition attribute [fun_prop] Differentiable DifferentiableAt DifferentiableOn -- lambda rules attribute [fun_prop] differentiable_id' differentiable_const Differentiable.comp' differentiableAt_id' differentiableAt_const DifferentiableAt.comp' differentiableOn_id' differentiableOn_const DifferentiableOn.comp' -- product attribute [fun_prop] Differentiable.prod Differentiable.fst Differentiable.snd DifferentiableAt.prod DifferentiableAt.fst DifferentiableAt.snd DifferentiableOn.prod DifferentiableOn.fst DifferentiableOn.snd -- transitions attribute [fun_prop] Differentiable.differentiableAt Differentiable.differentiableOn DifferentiableAt.continuousAt DifferentiableOn.continuousOn -- algebra attribute [fun_prop] Differentiable.add Differentiable.sub Differentiable.neg Differentiable.mul Differentiable.smul Differentiable.div Differentiable.inv' Differentiable.inv DifferentiableAt.add DifferentiableAt.sub DifferentiableAt.neg DifferentiableAt.mul DifferentiableAt.smul DifferentiableAt.div DifferentiableAt.inv' DifferentiableAt.inv DifferentiableOn.add DifferentiableOn.sub DifferentiableOn.neg DifferentiableOn.mul DifferentiableOn.smul DifferentiableOn.div DifferentiableOn.inv' DifferentiableOn.inv -- special function attribute [fun_prop] Differentiable.exp Differentiable.log Differentiable.pow DifferentiableAt.exp DifferentiableAt.log DifferentiableAt.pow DifferentiableOn.exp DifferentiableOn.log DifferentiableOn.pow
Tactic\FunProp\Elab.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean import Mathlib.Tactic.FunProp.Core /-! ## `funProp` tactic syntax -/ namespace Mathlib open Lean Meta Elab Tactic namespace Meta.FunProp open Lean.Parser.Tactic /-- Tactic to prove function properties -/ syntax (name := funPropTacStx) "fun_prop" (discharger)? (" [" withoutPosition(ident,*,?) "]")? : tactic private def emptyDischarge : Expr → MetaM (Option Expr) := fun e => withTraceNode `Meta.Tactic.fun_prop (fun r => do pure s!"[{ExceptToEmoji.toEmoji r}] discharging: {← ppExpr e}") do pure none /-- Tactic to prove function properties -/ @[tactic funPropTacStx] def funPropTac : Tactic | `(tactic| fun_prop $[$d]? $[[$names,*]]?) => do let disch ← show MetaM (Expr → MetaM (Option Expr)) from do match d with | none => pure emptyDischarge | some d => match d with | `(discharger| (discharger := $tac)) => pure <| tacticToDischarge (← `(tactic| ($tac))) | _ => pure emptyDischarge let namesToUnfold : Array Name := match names with | none => #[] | .some ns => ns.getElems.map (fun n => n.getId) let namesToUnfold := namesToUnfold.append defaultNamesToUnfold let goal ← getMainGoal goal.withContext do let goalType ← goal.getType let cfg : Config := {disch := disch, constToUnfold := .ofArray namesToUnfold _} let (r?, s) ← funProp goalType cfg |>.run {} if let .some r := r? then goal.assign r.proof else let mut msg := s!"`fun_prop` was unable to prove `{← Meta.ppExpr goalType}`\n\n" if d.isSome then msg := msg ++ "Try running with a different discharger tactic like \ `aesop`, `assumption`, `linarith`, `omega` etc.\n" else msg := msg ++ "Try running with discharger `fun_prop (disch:=aesop)` or with a different \ discharger tactic like `assumption`, `linarith`, `omega`.\n" msg := msg ++ "Sometimes it is useful to run `fun_prop (disch:=trace_state; sorry)` \ which will print all the necessary subgoals for `fun_prop` to succeed.\n" msg := msg ++ "\nPotential issues to fix:" msg := s.msgLog.foldl (init := msg) (fun msg m => msg ++ "\n " ++ m) msg := msg ++ "\n\nFor more detailed information use \ `set_option trace.Meta.Tactic.fun_prop true`" throwError msg | _ => throwUnsupportedSyntax
Tactic\FunProp\FunctionData.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean import Qq import Mathlib.Tactic.FunProp.Mor /-! ## `funProp` data structure holding information about a function `FunctionData` holds data about function in the form `fun x => f x₁ ... xₙ`. -/ namespace Mathlib open Lean Meta namespace Meta.FunProp /-- Structure storing parts of a function in funProp-normal form. -/ structure FunctionData where /-- local context where `mainVar` exists -/ lctx : LocalContext /-- local instances -/ insts : LocalInstances /-- main function -/ fn : Expr /-- applied function arguments -/ args : Array Mor.Arg /-- main variable -/ mainVar : Expr /-- indices of `args` that contain `mainVars` -/ mainArgs : Array Nat /-- Turn function data back to expression. -/ def FunctionData.toExpr (f : FunctionData) : MetaM Expr := do withLCtx f.lctx f.insts do let body := Mor.mkAppN f.fn f.args mkLambdaFVars #[f.mainVar] body /-- Is `f` an indentity function? -/ def FunctionData.isIdentityFun (f : FunctionData) : Bool := (f.args.size = 0 && f.fn == f.mainVar) /-- Is `f` a constant function? -/ def FunctionData.isConstantFun (f : FunctionData) : Bool := ((f.mainArgs.size = 0) && !(f.fn.containsFVar f.mainVar.fvarId!)) /-- Domain type of `f`. -/ def FunctionData.domainType (f : FunctionData) : MetaM Expr := withLCtx f.lctx f.insts do inferType f.mainVar /-- Is head function of `f` a constant? If the head of `f` is a projection return the name of corresponding projection function. -/ def FunctionData.getFnConstName? (f : FunctionData) : MetaM (Option Name) := do match f.fn with | .const n _ => return n | .proj typeName idx _ => let .some info := getStructureInfo? (← getEnv) typeName | return none let .some projName := info.getProjFn? idx | return none return projName | _ => return none /-- Get `FunctionData` for `f`. Throws if `f` can't be put into funProp-normal form. -/ def getFunctionData (f : Expr) : MetaM FunctionData := do lambdaTelescope f fun xs b => do let xId := xs[0]!.fvarId! Mor.withApp b fun fn args => do let mainArgs := args |>.mapIdx (fun i ⟨arg,_⟩ => if arg.containsFVar xId then some i.1 else none) |>.filterMap id return { lctx := ← getLCtx insts := ← getLocalInstances fn := fn args := args mainVar := xs[0]! mainArgs := mainArgs } /-- Result of `getFunctionData?`. It returns function data if the function is in the form `fun x => f y₁ ... yₙ`. Two other cases are `fun x => let y := ...` or `fun x y => ...` -/ inductive MaybeFunctionData where /-- Can't generate function data as function body has let binder. -/ | letE (f : Expr) /-- Can't generate function data as function body has lambda binder. -/ | lam (f : Expr) /-- Function data has been successfully generated. -/ | data (fData : FunctionData) /-- Turn `MaybeFunctionData` to the function. -/ def MaybeFunctionData.get (fData : MaybeFunctionData) : MetaM Expr := match fData with | .letE f | .lam f => pure f | .data d => d.toExpr /-- Get `FunctionData` for `f`. -/ def getFunctionData? (f : Expr) (unfoldPred : Name → Bool := fun _ => false) (cfg : WhnfCoreConfig := {}) : MetaM MaybeFunctionData := do let unfold := fun e : Expr => if let .some n := e.getAppFn'.constName? then pure (unfoldPred n) else pure false let .forallE xName xType _ _ ← inferType f | throwError "fun_prop bug: function expected" withLocalDeclD xName xType fun x => do let fx' ← Mor.whnfPred (f.beta #[x]).eta unfold cfg let f' ← mkLambdaFVars #[x] fx' match fx' with | .letE .. => return .letE f' | .lam .. => return .lam f' | _ => return .data (← getFunctionData f') /-- If head function is a let-fvar unfold it and return resulting function. Return `none` otherwise. -/ def FunctionData.unfoldHeadFVar? (fData : FunctionData) : MetaM (Option Expr) := do let .fvar id := fData.fn | return none let .some val ← id.getValue? | return none let f ← withLCtx fData.lctx fData.insts do mkLambdaFVars #[fData.mainVar] (Mor.mkAppN val fData.args) return f /-- Type of morphism application. -/ inductive MorApplication where /-- Of the form `⇑f` i.e. missing argument. -/ | underApplied /-- Of the form `⇑f x` i.e. morphism and one argument is provided. -/ | exact /-- Of the form `⇑f x y ...` i.e. additional applied arguments `y ...`. -/ | overApplied /-- Not a morphism application. -/ | none deriving Inhabited, BEq /-- Is function body of `f` a morphism application? What kind? -/ def FunctionData.isMorApplication (f : FunctionData) : MetaM MorApplication := do if let .some name := f.fn.constName? then if ← Mor.isCoeFunName name then let info ← getConstInfo name let arity := info.type.forallArity match compare arity f.args.size with | .eq => return .exact | .lt => return .overApplied | .gt => return .underApplied match f.args.size with | 0 => return .none | _ => let n := f.args.size if f.args[n-1]!.coe.isSome then return .exact else if f.args.any (fun a => a.coe.isSome) then return .overApplied else return .none /-- Decomposes `fun x => f y₁ ... yₙ` into `(fun g => g yₙ) ∘ (fun x y => f y₁ ... yₙ₋₁ y)` Returns none if: - `n=0` - `yₙ` contains `x` - `n=1` and `(fun x y => f y)` is identity function i.e. `x=f` -/ def FunctionData.peeloffArgDecomposition (fData : FunctionData) : MetaM (Option (Expr × Expr)) := do unless fData.args.size > 0 do return none withLCtx fData.lctx fData.insts do let n := fData.args.size let x := fData.mainVar let yₙ := fData.args[n-1]! if yₙ.expr.containsFVar x.fvarId! then return none if fData.args.size = 1 && fData.mainVar == fData.fn then return none let gBody' := Mor.mkAppN fData.fn fData.args[:n-1] let gBody' := if let .some coe := yₙ.coe then coe.app gBody' else gBody' let g' ← mkLambdaFVars #[x] gBody' let f' := Expr.lam `f (← inferType gBody') (.app (.bvar 0) (yₙ.expr)) default return (f',g') /-- Decompose function `f = (← fData.toExpr)` into composition of two functions. Returns none if the decomposition would produce composition with identity function. -/ def FunctionData.nontrivialDecomposition (fData : FunctionData) : MetaM (Option (Expr × Expr)) := do let mut lctx := fData.lctx let insts := fData.insts let x := fData.mainVar let xId := x.fvarId! let xName ← withLCtx lctx insts xId.getUserName let fn := fData.fn let mut args := fData.args if fn.containsFVar xId then return ← fData.peeloffArgDecomposition let mut yVals : Array Expr := #[] let mut yVars : Array Expr := #[] for argId in fData.mainArgs do let yVal := args[argId]! let yVal' := yVal.expr let yId ← withLCtx lctx insts mkFreshFVarId let yType ← withLCtx lctx insts (inferType yVal') if yType.containsFVar fData.mainVar.fvarId! then return none lctx := lctx.mkLocalDecl yId (xName.appendAfter (toString argId)) yType let yVar := Expr.fvar yId yVars := yVars.push yVar yVals := yVals.push yVal' args := args.set! argId ⟨yVar, yVal.coe⟩ let g ← withLCtx lctx insts do mkLambdaFVars #[x] (← mkProdElem yVals) let f ← withLCtx lctx insts do (mkLambdaFVars yVars (Mor.mkAppN fn args)) >>= mkUncurryFun yVars.size -- check if is non-triviality let f' ← fData.toExpr if (← isDefEq f' f) || (← isDefEq f' g) then return none return (f, g) /-- Decompose function `fun x => f y₁ ... yₙ` over specified argument indices `#[i, j, ...]`. The result is: ``` (fun (yᵢ',yⱼ',...) => f y₁ .. yᵢ' .. yⱼ' .. yₙ) ∘ (fun x => (yᵢ, yⱼ, ...)) ``` This is not possible if `yₗ` for `l ∉ #[i,j,...]` still contains `x`. In such case `none` is returned. -/ def FunctionData.decompositionOverArgs (fData : FunctionData) (args : Array Nat) : MetaM (Option (Expr × Expr)) := do unless isOrderedSubsetOf fData.mainArgs args do return none unless ¬(fData.fn.containsFVar fData.mainVar.fvarId!) do return none withLCtx fData.lctx fData.insts do let gxs := args.map (fun i => fData.args[i]!.expr) try let gx ← mkProdElem gxs -- this can crash if we have dependent types let g ← withLCtx fData.lctx fData.insts <| mkLambdaFVars #[fData.mainVar] gx withLocalDeclD `y (← inferType gx) fun y => do let ys ← mkProdSplitElem y gxs.size let args' := (args.zip ys).foldl (init := fData.args) (fun args' (i,y) => args'.set! i { expr := y, coe := args'[i]!.coe }) let f ← mkLambdaFVars #[y] (Mor.mkAppN fData.fn args') return (f,g) catch _ => return none
Tactic\FunProp\Mor.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean import Mathlib.Data.FunLike.Basic import Mathlib.Tactic.FunProp.ToBatteries /-! ## `funProp` Meta programming functions like in Lean.Expr.* but for working with bundled morphisms. Function application in normal lean expression looks like `.app f x` but when we work with bundled morphism `f` it looks like `.app (.app coe f) x` where `f`. In mathlib `coe` is usually `DFunLike.coe` but it can be any coercion that is registered with the `coe` attribute. The main difference when working with expression involving morphisms is that the notion the head of expression changes. For example in: ``` coe (f a) b ``` the head of expression is considered to be `f` and not `coe`. -/ namespace Mathlib open Lean Meta namespace Meta.FunProp namespace Mor /-- Is `name` a coerction from some function space to functiosn? -/ def isCoeFunName (name : Name) : CoreM Bool := do let .some info ← getCoeFnInfo? name | return false return info.type == .coeFun /-- Is `e` a coerction from some function space to functiosn? -/ def isCoeFun (e : Expr) : MetaM Bool := do let .some (name,_) := e.getAppFn.const? | return false let .some info ← getCoeFnInfo? name | return false return e.getAppNumArgs' + 1 == info.numArgs /-- Morphism application -/ structure App where /-- morphism coercion -/ coe : Expr /-- bundled morphism -/ fn : Expr /-- morphism argument -/ arg : Expr /-- Is `e` morphism application? -/ def isMorApp? (e : Expr) : MetaM (Option App) := do let .app (.app coe f) x := e | return none if ← isCoeFun coe then return .some { coe := coe, fn := f, arg := x } else return none /-- Weak normal head form of an expression involving morphism applications. Additionally, `pred` can specify which when to unfold definitions. For example calling this on `coe (f a) b` will put `f` in weak normal head form instead of `coe`. -/ partial def whnfPred (e : Expr) (pred : Expr → MetaM Bool) (cfg : WhnfCoreConfig := {}) : MetaM Expr := do whnfEasyCases e fun e => do let e ← whnfCore e cfg if let .some ⟨coe,f,x⟩ ← isMorApp? e then let f ← whnfPred f pred cfg if cfg.zeta then return (coe.app f).app x else return ← letTelescope f fun xs f' => mkLambdaFVars xs ((coe.app f').app x) if (← pred e) then match (← unfoldDefinition? e) with | some e => whnfPred e pred cfg | none => return e else return e /-- Weak normal head form of an expression involving morphism applications. For example calling this on `coe (f a) b` will put `f` in weak normal head form instead of `coe`. -/ def whnf (e : Expr) (cfg : WhnfCoreConfig := {}) : MetaM Expr := whnfPred e (fun _ => return false) cfg /-- Argument of morphism application that stores corresponding coercion if necessary -/ structure Arg where /-- argument of type `α` -/ expr : Expr /-- coercion `F → α → β` -/ coe : Option Expr := none deriving Inhabited /-- Morphism application -/ def app (f : Expr) (arg : Arg) : Expr := match arg.coe with | .none => f.app arg.expr | .some coe => (coe.app f).app arg.expr /-- Given `e = f a₁ a₂ ... aₙ`, returns `k f #[a₁, ..., aₙ]` where `f` can be bundled morphism. -/ partial def withApp {α} (e : Expr) (k : Expr → Array Arg → MetaM α) : MetaM α := go e #[] where /-- -/ go : Expr → Array Arg → MetaM α | .mdata _ b, as => go b as | .app (.app c f) x, as => do if ← isCoeFun c then go f (as.push { coe := c, expr := x}) else go (.app c f) (as.push { expr := x}) | .app f a, as => go f (as.push { expr := a }) | f , as => k f as.reverse /-- If the given expression is a sequence of morphism applications `f a₁ .. aₙ`, return `f`. Otherwise return the input expression. -/ def getAppFn (e : Expr) : MetaM Expr := match e with | .mdata _ b => getAppFn b | .app (.app c f) _ => do if ← isCoeFun c then getAppFn f else getAppFn (.app c f) | .app f _ => getAppFn f | e => return e /-- Given `f a₁ a₂ ... aₙ`, returns `#[a₁, ..., aₙ]` where `f` can be bundled morphism. -/ def getAppArgs (e : Expr) : MetaM (Array Arg) := withApp e fun _ xs => return xs /-- `mkAppN f #[a₀, ..., aₙ]` ==> `f a₀ a₁ .. aₙ` where `f` can be bundled morphism. -/ def mkAppN (f : Expr) (xs : Array Arg) : Expr := xs.foldl (init := f) (fun f x => match x with | ⟨x, .none⟩ => (f.app x) | ⟨x, .some coe⟩ => (coe.app f).app x)
Tactic\FunProp\RefinedDiscrTree.lean
/- Copyright (c) 2023 J. W. Gerbscheid. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: J. W. Gerbscheid -/ import Mathlib.Tactic.FunProp.StateList import Mathlib.Algebra.Group.Pi.Basic /-! We define discrimination trees for the purpose of unifying local expressions with library results. This structure is based on `Lean.Meta.DiscrTree`. I document here what features are not in the original: - The keys `Key.lam`, `Key.forall` and `Key.bvar` have been introduced in order to allow for matching under lambda and forall binders. `Key.lam` has arity 1 and indexes the body. `Key.forall` has arity 2 and indexes the domain and the body. The reason for not indexing the domain of a lambda expression is that it is usually already determined, for example in `∃ a : α, p`, which is `@Exists α fun a : α => p`, we don't want to index the domain `α` twice. In a forall expression it is necessary to index the domain, because in an implication `p → q` we need to index both `p` and `q`. `Key.bvar` works the same as `Key.fvar`, but stores the De Bruijn index to identify it. For example, this allows for more specific matching with the left hand side of `∑ i ∈ range n, i = n * (n - 1) / 2`, which is indexed by `[⟨Finset.sum, 5⟩, ⟨Nat, 0⟩, ⟨Nat, 0⟩, *0, ⟨Finset.Range, 1⟩, *1, λ, ⟨#0, 0⟩]`. - The key `Key.star` takes a `Nat` identifier as an argument. For example, the library pattern `?a + ?a` is encoded as `[⟨HAdd.hAdd, 6⟩, *0, *0, *0, *1, *2, *2]`. `*0` corresponds to the type of `a`, `*1` to the `HAdd` instance, and `*2` to `a`. This means that it will only match an expression `x + y` if `x` is definitionally equal to `y`. The matching algorithm requires that the same stars from the discrimination tree match with the same patterns in the lookup expression, and similarly requires that the same metavariables form the lookup expression match with the same pattern in the discrimination tree. - The key `Key.opaque` has been introduced in order to index existential variables in lemmas like `Nat.exists_prime_and_dvd {n : ℕ} (hn : n ≠ 1) : ∃ p, Prime p ∧ p ∣ n`, where the part `Prime p` gets the pattern `[⟨Nat.Prime, 1⟩, ◾]`. (◾ represents `Key.opaque`) When matching, `Key.opaque` can only be matched by `Key.star`. Using the `WhnfCoreConfig` argument, it is possible to disable β-reduction and ζ-reduction. As a result, we may get a lambda expression applied to an argument or a let-expression. Since there is no support for indexing these, they will be indexed by `Key.opaque`. - We keep track of the matching score of a unification. This score represents the number of keys that had to be the same for the unification to succeed. For example, matching `(1 + 2) + 3` with `add_comm` gives a score of 2, since the pattern of commutativity is [⟨HAdd.hAdd, 6⟩, *0, *0, *0, *1, *2, *3], so matching `⟨HAdd.hAdd, 6⟩` gives 1 point, and matching `*0` after its first appearence gives another point, but the third argument is an outParam, so this gets ignored. Similarly, matching it with `add_assoc` gives a score of 5. - Patterns that have the potential to be η-reduced are put into the `RefinedDiscrTree` under all possible reduced key sequences. This is for terms of the form `fun x => f (?m x₁ .. xₙ)`, where `?m` is a metavariable, and one of `x₁, .., xₙ` in `x`. For example, the pattern `Continuous fun y => Real.exp (f y)])` is indexed by both `[⟨Continuous, 5⟩, *0, ⟨Real, 0⟩, *1, *2, λ, ⟨Real.exp⟩, *3]` and `[⟨Continuous, 5⟩, *0, ⟨Real, 0⟩, *1, *2, ⟨Real.exp⟩]` so that it also comes up if you search with `Continuous Real.exp`. Similarly, `Continuous fun x => f x + g x` is indexed by both `[⟨Continuous, 1⟩, λ, ⟨HAdd.hAdd, 6⟩, *0, *0, *0, *1, *2, *3]` and `[⟨Continuous, 1⟩, ⟨HAdd.hAdd, 5⟩, *0, *0, *0, *1, *2]`. - For sub-expressions not at the root of the original expression we have some additional reductions: - Any combination of `ofNat`, `Nat.zero`, `Nat.succ` and number literals is stored as just a number literal. - The expression `fun a : α => a` is stored as `@id α`. - This makes lemmata such as `continuous_id'` redundant, which is the same as `continuous_id`, with `id` replaced by `fun x => x`. - Any expressions involving `+`, `*`, `-`, `/` or `⁻¹` is normalized to not have a lambda in front and to always have the default amount of arguments. e.g. `(f + g) a` is stored as `f a + g a` and `fun x => f x + g x` is stored as `f + g`. - This makes lemmata such as `MeasureTheory.integral_integral_add'` redundant, which is the same as `MeasureTheory.integral_integral_add`, with `f a + g a` replaced by `(f + g) a` - it also means that a lemma like `Continuous.mul` can be stated as talking about `f * g` instead of `fun x => f x + g x`. I have also made some changes in the implementation: - Instead of directly converting from `Expr` to `Array Key` during insertion, and directly looking up from an `Expr` during lookup, I defined the intermediate structure `DTExpr`, which is a form of `Expr` that only contains information relevant for the discrimination tree. Each `Expr` is transformed into a `DTExpr` before insertion or lookup. For insertion there could be multiple `DTExpr` representations due to potential η-reductions as mentioned above. TODO: - More thought could be put into the matching algorithm for non-trivial unifications. For example, when looking up the expression `?a + ?a` (for rewriting), there will only be results like `n + n = 2 * n` or `a + b = b + a`, but not like `n + 1 = n.succ`, even though this would still unify. - The reason why implicit arguments are not ignored by the discrimination tree is that they provide important type information. Because of this it seems more natural to index the types of expressions instead of indexing the implicit type arguments. Then each key would additionally index the type of that expression. So instead of indexing `?a + ?b` as `[⟨HAdd.hAdd, 6⟩, *0, *0, *0, *1, *2, *3]`, it would be indexed by something like `[(*0, ⟨HAdd.hAdd, 6⟩), _, _, _, _, (*0, *1), (*0, *2)]`. The advantage of this would be that there will be less duplicate indexing of types, because many functions index the types of their arguments and their return type with implicit arguments, meaning that types unnecessarily get indexed multiple times. This modification can be explored, but it could very well not be an improvement. -/ open Lean Meta namespace Mathlib.Meta.FunProp.RefinedDiscrTree /-! ## Definitions -/ /-- Discrimination tree key. -/ inductive Key where /-- A metavariable. This key matches with anything. It stores an index. -/ | star : Nat → Key /-- An opaque variable. This key only matches with itself or `Key.star`. -/ | opaque : Key /-- A constant. It stores the name and the arity. -/ | const : Name → Nat → Key /-- A free variable. It stores the `FVarId` and the arity. -/ | fvar : FVarId → Nat → Key /-- A bound variable, from a lambda or forall binder. It stores the De Bruijn index and the arity. -/ | bvar : Nat → Nat → Key /-- A literal. -/ | lit : Literal → Key /-- A sort. Universe levels are ignored. -/ | sort : Key /-- A lambda function. -/ | lam : Key /-- A dependent arrow. -/ | forall : Key /-- A projection. It stores the structure name, the projection index and the arity. -/ | proj : Name → Nat → Nat → Key deriving Inhabited, BEq, Repr private nonrec def Key.hash : Key → UInt64 | .star i => mixHash 7883 $ hash i | .opaque => 342 | .const n a => mixHash 5237 $ mixHash (hash n) (hash a) | .fvar n a => mixHash 8765 $ mixHash (hash n) (hash a) | .bvar i a => mixHash 4323 $ mixHash (hash i) (hash a) | .lit v => mixHash 1879 $ hash v | .sort => 2411 | .lam => 4742 | .«forall» => 9752 | .proj s i a => mixHash (hash a) $ mixHash (hash s) (hash i) instance : Hashable Key := ⟨Key.hash⟩ /-- Constructor index used for ordering `Key`. Note that the index of the star pattern is 0, so that when looking up in a `Trie`, we can look at the start of the sorted array for all `.star` patterns. -/ def Key.ctorIdx : Key → Nat | .star .. => 0 | .opaque .. => 1 | .const .. => 2 | .fvar .. => 3 | .bvar .. => 4 | .lit .. => 5 | .sort => 6 | .lam => 7 | .forall => 8 | .proj .. => 9 /-- The order on `Key` used in the `RefinedDiscrTree`. -/ private def Key.lt : Key → Key → Bool | .star i₁, .star i₂ => i₁ < i₂ | .const n₁ a₁, .const n₂ a₂ => Name.quickLt n₁ n₂ || (n₁ == n₂ && a₁ < a₂) | .fvar f₁ a₁, .fvar f₂ a₂ => Name.quickLt f₁.name f₂.name || (f₁ == f₂ && a₁ < a₂) | .bvar i₁ a₁, .bvar i₂ a₂ => i₁ < i₂ || (i₁ == i₂ && a₁ < a₂) | .lit v₁, .lit v₂ => v₁ < v₂ | .proj s₁ i₁ a₁, .proj s₂ i₂ a₂ => Name.quickLt s₁ s₂ || (s₁ == s₂ && (i₁ < i₂ || (i₁ == i₂ && a₁ < a₂))) | k₁, k₂ => k₁.ctorIdx < k₂.ctorIdx instance : LT Key := ⟨fun a b => Key.lt a b⟩ instance (a b : Key) : Decidable (a < b) := inferInstanceAs (Decidable (Key.lt a b)) private def Key.format : Key → Format | .star i => "*" ++ Std.format i | .opaque => "◾" | .const k a => "⟨" ++ Std.format k ++ ", " ++ Std.format a ++ "⟩" | .fvar k a => "⟨" ++ Std.format k.name ++ ", " ++ Std.format a ++ "⟩" | .lit (Literal.natVal v) => Std.format v | .lit (Literal.strVal v) => repr v | .sort => "sort" | .bvar i a => "⟨" ++ "#" ++ Std.format i ++ ", " ++ Std.format a ++ "⟩" | .lam => "λ" | .forall => "∀" | .proj s i a => "⟨" ++ Std.format s ++"."++ Std.format i ++", "++ Std.format a ++ "⟩" instance : ToFormat Key := ⟨Key.format⟩ /-- Return the number of arguments that the `Key` takes. -/ def Key.arity : Key → Nat | .const _ a => a | .fvar _ a => a | .bvar _ a => a | .lam => 1 | .forall => 2 | .proj _ _ a => 1 + a | _ => 0 variable {α : Type} /-- Discrimination tree trie. See `RefinedDiscrTree`. -/ inductive Trie (α : Type) where /-- Map from `Key` to `Trie`. Children is an `Array` of size at least 2, sorted in increasing order using `Key.lt`. -/ | node (children : Array (Key × Trie α)) /-- Sequence of nodes with only one child. `keys` is an `Array` of size at least 1. -/ | path (keys : Array Key) (child : Trie α) /-- Leaf of the Trie. `values` is an `Array` of size at least 1. -/ | values (vs : Array α) instance : Inhabited (Trie α) := ⟨.node #[]⟩ /-- `Trie.path` constructor that only inserts the path if it is non-empty. -/ def Trie.mkPath (keys : Array Key) (child : Trie α) := if keys.isEmpty then child else Trie.path keys child /-- `Trie` constructor for a single value, taking the keys starting at index `i`. -/ def Trie.singleton (keys : Array Key) (value : α) (i : Nat) : Trie α := mkPath keys[i:] (values #[value]) /-- `Trie.node` constructor for combining two `Key`, `Trie α` pairs. -/ def Trie.mkNode2 (k1 : Key) (t1 : Trie α) (k2 : Key) (t2 : Trie α) : Trie α := if k1 < k2 then .node #[(k1, t1), (k2, t2)] else .node #[(k2, t2), (k1, t1)] /-- Return the values from a `Trie α`, assuming that it is a leaf -/ def Trie.values! : Trie α → Array α | .values vs => vs | _ => panic! "expected .values constructor" /-- Return the children of a `Trie α`, assuming that it is not a leaf. The result is sorted by the `Key`'s -/ def Trie.children! : Trie α → Array (Key × Trie α) | .node cs => cs | .path ks c => #[(ks[0]!, mkPath ks[1:] c)] | .values _ => panic! "did not expect .values constructor" private partial def Trie.format [ToFormat α] : Trie α → Format | .node cs => Format.group $ Format.paren $ "node " ++ Format.join (cs.toList.map fun (k, c) => Format.line ++ Format.paren (format (prepend k c))) | .values vs => if vs.isEmpty then Format.nil else Std.format vs | .path ks c => Format.sbracket (Format.joinSep ks.toList (", ")) ++ " => " ++ Format.line ++ format c where prepend (k : Key) (t : Trie α) : Trie α := match t with | .path ks c => .path (#[k] ++ ks) c | t => .path #[k] t instance [ToFormat α] : ToFormat (Trie α) := ⟨Trie.format⟩ /-- Discrimination tree. It is an index from expressions to values of type `α`. -/ structure _root_.Mathlib.Meta.FunProp.RefinedDiscrTree (α : Type) where /-- The underlying `PersistentHashMap` of a `RefinedDiscrTree`. -/ root : PersistentHashMap Key (Trie α) := {} instance : Inhabited (RefinedDiscrTree α) := ⟨{}⟩ private partial def format [ToFormat α] (d : RefinedDiscrTree α) : Format := let (_, r) := d.root.foldl (fun (p : Bool × Format) k c => (false, p.2 ++ (if p.1 then Format.nil else Format.line) ++ Format.paren (Std.format k ++ " => " ++ Std.format c))) (true, Format.nil) Format.group r instance [ToFormat α] : ToFormat (RefinedDiscrTree α) := ⟨format⟩ /-- `DTExpr` is a simplified form of `Expr`. It is the intermediate step for converting from `Expr` to `Array Key`. -/ inductive DTExpr where /-- A metavariable. It optionally stores an `MVarId`. -/ | star : Option MVarId → DTExpr /-- An opaque variable or a let-expression in the case `WhnfCoreConfig.zeta := false`. -/ | opaque : DTExpr /-- A constant. It stores the name and the arguments. -/ | const : Name → Array DTExpr → DTExpr /-- A free variable. It stores the `FVarId` and the argumenst -/ | fvar : FVarId → Array DTExpr → DTExpr /-- A bound variable. It stores the De Bruijn index and the arguments -/ | bvar : Nat → Array DTExpr → DTExpr /-- A literal. -/ | lit : Literal → DTExpr /-- A sort. -/ | sort : DTExpr /-- A lambda function. It stores the body. -/ | lam : DTExpr → DTExpr /-- A dependent arrow. It stores the domain and body. -/ | forall : DTExpr → DTExpr → DTExpr /-- A projection. It stores the structure name, projection index, struct body and arguments. -/ | proj : Name → Nat → DTExpr → Array DTExpr → DTExpr deriving Inhabited, BEq, Repr private partial def DTExpr.format : DTExpr → Format | .star _ => "*" | .opaque => "◾" | .const n as => Std.format n ++ formatArgs as | .fvar n as => Std.format n.name ++ formatArgs as | .bvar i as => "#" ++ Std.format i ++ formatArgs as | .lit (Literal.natVal v) => Std.format v | .lit (Literal.strVal v) => repr v | .sort => "Sort" | .lam b => "λ " ++ DTExpr.format b | .forall d b => DTExpr.format d ++ " → " ++ DTExpr.format b | .proj _ i a as => DTExpr.format a ++ "." ++ Std.format i ++ formatArgs as where formatArgs (as : Array DTExpr) := if as.isEmpty then .nil else " " ++ Format.paren (@Format.joinSep _ ⟨DTExpr.format⟩ as.toList ", ") instance : ToFormat DTExpr := ⟨DTExpr.format⟩ /-- Return the size of the `DTExpr`. This is used for calculating the matching score when two expressions are equal. The score is not incremented at a lambda, which is so that the expressions `∀ x, p[x]` and `∃ x, p[x]` get the same size. -/ partial def DTExpr.size : DTExpr → Nat | .const _ args | .fvar _ args | .bvar _ args => args.foldl (init := 1) (· + ·.size) | .lam b => b.size | .forall d b => 1 + d.size + b.size | _ => 1 private def DTExpr.eqv (a b : DTExpr) : Bool := (go a b).run' {} where go (a b : DTExpr) : StateM (HashMap MVarId MVarId) Bool := match a, b with | .opaque , .opaque => pure true | .const n₁ as₁ , .const n₂ as₂ => pure (n₁ == n₂) <&&> goArray as₁ as₂ | .fvar n₁ as₁ , .fvar n₂ as₂ => pure (n₁ == n₂) <&&> goArray as₁ as₂ | .bvar i₁ as₁ , .bvar i₂ as₂ => pure (i₁ == i₂) <&&> goArray as₁ as₂ | .lit li₁ , .lit li₂ => pure (li₁ == li₂) | .sort , .sort => pure true | .lam b₁ , .lam b₂ => go b₁ b₂ | .forall d₁ b₁ , .forall d₂ b₂ => go d₁ d₂ <&&> go b₁ b₂ | .proj n₁ i₁ a₁ as₁, .proj n₂ i₂ a₂ as₂ => pure (n₁ == n₂ && i₁ == i₂) <&&> go a₁ a₂ <&&> goArray as₁ as₂ | .star none , .star none => pure true | .star (some id₁) , .star (some id₂) => modifyGet fun map => match map.find? id₁ with | some id => (id == id₂, map) | none => (true, map.insert id₁ id₂) | _ , _ => return false goArray (as bs : Array DTExpr) : StateM (HashMap MVarId MVarId) Bool := do if h : as.size = bs.size then for g : i in [:as.size] do unless ← go as[i] (bs[i]'(h ▸ g.2)) do return false return true else return false /-! ## Encoding an Expr -/ /-- This state is used to turn the indexing by `MVarId` and `FVarId` in `DTExpr` into indexing by `Nat` in `Key`. -/ private structure Flatten.State where stars : Array MVarId := #[] private def getStar (mvarId? : Option MVarId) : StateM Flatten.State Nat := modifyGet fun s => match mvarId? with | some mvarId => match s.stars.findIdx? (· == mvarId) with | some idx => (idx, s) | none => (s.stars.size, { s with stars := s.stars.push mvarId }) | none => (s.stars.size, { s with stars := s.stars.push ⟨.anonymous⟩ }) private partial def DTExpr.flattenAux (todo : Array Key) : DTExpr → StateM Flatten.State (Array Key) | .star i => return todo.push (.star (← getStar i)) | .opaque => return todo.push .opaque | .const n as => as.foldlM flattenAux (todo.push (.const n as.size)) | .fvar f as => as.foldlM flattenAux (todo.push (.fvar f as.size)) | .bvar i as => as.foldlM flattenAux (todo.push (.bvar i as.size)) | .lit l => return todo.push (.lit l) | .sort => return todo.push .sort | .lam b => flattenAux (todo.push .lam) b | .«forall» d b => do flattenAux (← flattenAux (todo.push .forall) d) b | .proj n i e as => do as.foldlM flattenAux (← flattenAux (todo.push (.proj n i as.size)) e) /-- Given a `DTExpr`, return the linearized encoding in terms of `Key`, which is used for `RefinedDiscrTree` indexing. -/ def DTExpr.flatten (e : DTExpr) (initCapacity := 16) : Array Key := (DTExpr.flattenAux (.mkEmpty initCapacity) e).run' {} /-- Return true if `e` is one of the following - A nat literal (numeral) - `Nat.zero` - `Nat.succ x` where `isNumeral x` - `OfNat.ofNat _ x _` where `isNumeral x` -/ private partial def isNumeral (e : Expr) : Bool := if e.isRawNatLit then true else let f := e.getAppFn if !f.isConst then false else let fName := f.constName! if fName == ``Nat.succ && e.getAppNumArgs == 1 then isNumeral e.appArg! else if fName == ``OfNat.ofNat && e.getAppNumArgs == 3 then isNumeral (e.getArg! 1) else if fName == ``Nat.zero && e.getAppNumArgs == 0 then true else false /-- Return `some n` if `e` is definitionally equal to the natural number `n`. -/ private partial def toNatLit? (e : Expr) : Option Literal := if isNumeral e then if let some n := loop e then some (.natVal n) else none else none where loop (e : Expr) : Option Nat := do let f := e.getAppFn match f with | .lit (.natVal n) => return n | .const fName .. => if fName == ``Nat.succ && e.getAppNumArgs == 1 then let r ← loop e.appArg! return r+1 else if fName == ``OfNat.ofNat && e.getAppNumArgs == 3 then loop (e.getArg! 1) else if fName == ``Nat.zero && e.getAppNumArgs == 0 then return 0 else failure | _ => failure /-- Reduction procedure for the `RefinedDiscrTree` indexing. -/ partial def reduce (e : Expr) (config : WhnfCoreConfig) : MetaM Expr := do let e ← whnfCore e config match (← unfoldDefinition? e) with | some e => reduce e config | none => match e.etaExpandedStrict? with | some e => reduce e config | none => return e /-- Repeatedly apply reduce while stripping lambda binders and introducing their variables -/ @[specialize] partial def lambdaTelescopeReduce {m} [Monad m] [MonadLiftT MetaM m] [MonadControlT MetaM m] [Inhabited α] (e : Expr) (fvars : List FVarId) (config : WhnfCoreConfig) (k : Expr → List FVarId → m α) : m α := do match ← reduce e config with | .lam n d b bi => withLocalDecl n bi d fun fvar => lambdaTelescopeReduce (b.instantiate1 fvar) (fvar.fvarId! :: fvars) config k | e => k e fvars /-- Check whether the expression is represented by `Key.star`. -/ def isStar : Expr → Bool | .mvar .. => true | .app f _ => isStar f | _ => false /-- Check whether the expression is represented by `Key.star` and has `arg` as an argument. -/ def isStarWithArg (arg : Expr) : Expr → Bool | .app f a => if a == arg then isStar f else isStarWithArg arg f | _ => false private partial def DTExpr.hasLooseBVarsAux (i : Nat) : DTExpr → Bool | .const _ as => as.any (hasLooseBVarsAux i) | .fvar _ as => as.any (hasLooseBVarsAux i) | .bvar j as => j ≥ i || as.any (hasLooseBVarsAux i) | .proj _ _ a as => a.hasLooseBVarsAux i || as.any (hasLooseBVarsAux i) | .forall d b => d.hasLooseBVarsAux i || b.hasLooseBVarsAux (i+1) | .lam b => b.hasLooseBVarsAux (i+1) | _ => false /-- Return `true` if `e` contains a loose bound variable. -/ def DTExpr.hasLooseBVars (e : DTExpr) : Bool := e.hasLooseBVarsAux 0 namespace MkDTExpr private structure Context where /-- Variables that come from a lambda or forall binder. The list index gives the De Bruijn index. -/ bvars : List FVarId := [] /-- Variables that come from a lambda that has been removed via η-reduction. -/ forbiddenVars : List FVarId := [] config : WhnfCoreConfig fvarInContext : FVarId → Bool /-- Return for each argument whether it should be ignored. -/ def getIgnores (fn : Expr) (args : Array Expr) : MetaM (Array Bool) := do let mut fnType ← inferType fn let mut result := Array.mkEmpty args.size let mut j := 0 for i in [:args.size] do unless fnType matches .forallE .. do fnType ← whnfD (fnType.instantiateRevRange j i args) j := i let .forallE _ d b bi := fnType | throwError m! "expected function type {indentExpr fnType}" fnType := b result := result.push (← isIgnoredArg args[i]! d bi) return result where /-- Return whether the argument should be ignored. -/ isIgnoredArg (arg domain : Expr) (binderInfo : BinderInfo) : MetaM Bool := do if domain.isOutParam then return true match binderInfo with | .instImplicit => return true | .implicit | .strictImplicit => return !(← isType arg) | .default => isProof arg /-- Introduce new lambdas by η-expansion. -/ @[specialize] def etaExpand (args : Array Expr) (type : Expr) (lambdas : List FVarId) (goalArity : Nat) (k : Array Expr → List FVarId → MetaM α) : MetaM α := do if args.size < goalArity then withLocalDeclD `_η type fun fvar => etaExpand (args.push fvar) type (fvar.fvarId! :: lambdas) goalArity k else k args lambdas termination_by goalArity - args.size /-- Normalize an application of a heterogenous binary operator like `HAdd.hAdd`, using: - `f = fun x => f x` to increase the arity to 6 - `(f + g) a = f a + g a` to decrease the arity to 6 - `(fun x => f x + g x) = f + g` to get rid of any lambdas in front -/ def reduceHBinOpAux (args : Array Expr) (lambdas : List FVarId) (instH instPi : Name) : OptionT MetaM (Expr × Expr × Expr × List FVarId) := do let some (mkApp2 (.const instH' _) type inst) := args[3]? | failure guard (instH == instH') if args.size ≤ 6 then etaExpand args type lambdas 6 fun args lambdas => distributeLambdas lambdas type args[4]! args[5]! else /- use that `(f + g) a = f a + g a` -/ let mut type := type let mut inst := inst let mut lhs := args[4]! let mut rhs := args[5]! for arg in args[6:] do let mkApp3 (.const i _) _ f inst' := inst | return (type, lhs, rhs, lambdas) unless i == instPi do return (type, lhs, rhs, lambdas) type := .app f arg inst := inst' lhs := .app lhs arg rhs := .app rhs arg distributeLambdas lambdas type lhs rhs where /-- use that `(fun x => f x + g x) = f + g` -/ distributeLambdas (lambdas : List FVarId) (type lhs rhs : Expr) : MetaM (Expr × Expr × Expr × List FVarId) := match lambdas with | fvarId :: lambdas => do let decl ← fvarId.getDecl let type := .forallE decl.userName decl.type (type.abstract #[.fvar fvarId]) decl.binderInfo let lhs := .lam decl.userName decl.type (lhs.abstract #[.fvar fvarId]) decl.binderInfo let rhs := .lam decl.userName decl.type (rhs.abstract #[.fvar fvarId]) decl.binderInfo distributeLambdas lambdas type lhs rhs | [] => return (type, lhs, rhs, []) /-- Normalize an application if the head is `+`, `*`, `-` or `/`. Optionally return the `(type, lhs, rhs, lambdas)`. -/ @[inline] def reduceHBinOp (n : Name) (args : Array Expr) (lambdas : List FVarId) : MetaM (Option (Expr × Expr × Expr × List FVarId)) := match n with | ``HAdd.hAdd => reduceHBinOpAux args lambdas ``instHAdd ``Pi.instAdd | ``HMul.hMul => reduceHBinOpAux args lambdas ``instHMul ``Pi.instMul | ``HSub.hSub => reduceHBinOpAux args lambdas ``instHSub ``Pi.instSub | ``HDiv.hDiv => reduceHBinOpAux args lambdas ``instHDiv ``Pi.instDiv | _ => return none /-- Normalize an application of a unary operator like `Inv.inv`, using: - `f⁻¹ a = (f a)⁻¹` to decrease the arity to 3 - `(fun x => (f a)⁻¹) = f⁻¹` to get rid of any lambdas in front -/ def reduceUnOpAux (args : Array Expr) (lambdas : List FVarId) (instPi : Name) : OptionT MetaM (Expr × Expr × List FVarId) := do guard (args.size ≥ 3) let mut type := args[0]! let mut inst := args[1]! let mut arg := args[2]! if args.size == 3 then distributeLambdas lambdas type arg else /- use that `f⁻¹ a = (f a)⁻¹` -/ for arg' in args[3:] do let mkApp3 (.const i _) _ f inst' := inst | return (type, arg, lambdas) unless i == instPi do return (type, arg, lambdas) type := .app f arg' inst := inst' arg := .app arg arg' distributeLambdas lambdas type arg where /-- use that `(fun x => (f x)⁻¹) = f⁻¹` -/ distributeLambdas (lambdas : List FVarId) (type arg : Expr) : MetaM (Expr × Expr × List FVarId) := match lambdas with | fvarId :: lambdas => do let decl ← fvarId.getDecl let type := .forallE decl.userName decl.type (type.abstract #[.fvar fvarId]) decl.binderInfo let arg := .lam decl.userName decl.type (arg.abstract #[.fvar fvarId]) decl.binderInfo distributeLambdas lambdas type arg | [] => return (type, arg, []) /-- Normalize an application if the head is `⁻¹` or `-`. Optionally return the `(type, arg, lambdas)`. -/ @[inline] def reduceUnOp (n : Name) (args : Array Expr) (lambdas : List FVarId) : MetaM (Option (Expr × Expr × List FVarId)) := match n with | ``Neg.neg => reduceUnOpAux args lambdas ``Pi.instNeg | ``Inv.inv => reduceUnOpAux args lambdas ``Pi.instInv | _ => return none @[specialize] private def withLams {m} [Monad m] [MonadWithReader Context m] (lambdas : List FVarId) (k : m DTExpr) : m DTExpr := if lambdas.isEmpty then k else do let e ← withReader (fun c => { c with bvars := lambdas ++ c.bvars }) k return lambdas.foldl (fun _ => ·.lam) e /-- Return the encoding of `e` as a `DTExpr`. If `root = false`, then `e` is a strict sub expression of the original expression. -/ partial def mkDTExprAux (e : Expr) (root : Bool) : ReaderT Context MetaM DTExpr := do lambdaTelescopeReduce e [] (← read).config fun e lambdas => e.withApp fun fn args => do let argDTExpr (arg : Expr) (ignore : Bool) : ReaderT Context MetaM DTExpr := if ignore then pure (.star none) else mkDTExprAux arg false let argDTExprs : ReaderT Context MetaM (Array DTExpr) := do let ignores ← getIgnores fn args args.mapIdxM fun i arg => argDTExpr arg ignores[i]! match fn with | .const n _ => unless root do if let some (type, lhs, rhs, lambdas') ← reduceHBinOp n args lambdas then return ← withLams lambdas' do let type ← mkDTExprAux type false let lhs ← mkDTExprAux lhs false let rhs ← mkDTExprAux rhs false return .const n #[type, type, .star none, .star none, lhs, rhs] if let some (type, arg, lambdas') ← reduceUnOp n e.getAppArgs lambdas then return ← withLams lambdas' do let type ← mkDTExprAux type false let arg ← mkDTExprAux arg false return .const n #[type, .star none, arg] /- since `(fun _ => 0) = 0` and `(fun _ => 1) = 1`, we don't index lambdas before literals -/ if let some v := toNatLit? e then return .lit v withLams lambdas do return .const n (← argDTExprs) | .proj s i a => withLams lambdas do let a ← argDTExpr a (isClass (← getEnv) s) return .proj s i a (← argDTExprs) | .fvar fvarId => /- we index `fun x => x` as `id` when not at the root -/ if let fvarId' :: lambdas' := lambdas then if fvarId' == fvarId && args.isEmpty && !root then return ← withLams lambdas' do let type ← mkDTExprAux (← fvarId.getType) false return .const ``id #[type] withLams lambdas do if let some idx := (← read).bvars.findIdx? (· == fvarId) then return .bvar idx (← argDTExprs) if (← read).fvarInContext fvarId then return .fvar fvarId (← argDTExprs) else return .opaque | .mvar mvarId => /- When the mvarId has arguments, index it with `[*]` instead of `[λ,*]`, because the star could depend on the bound variables. As a result, something indexed `[λ,*]` has that the `*` cannot depend on the λ-bound variables -/ if args.isEmpty then withLams lambdas do return .star (some mvarId) else return .star none | .forallE n d b bi => withLams lambdas do let d' ← mkDTExprAux d false let b' ← withLocalDecl n bi d fun fvar => withReader (fun c => { c with bvars := fvar.fvarId! :: c.bvars }) do mkDTExprAux (b.instantiate1 fvar) false return .forall d' b' | .lit v => withLams lambdas do return .lit v | .sort _ => withLams lambdas do return .sort | .letE .. => withLams lambdas do return .opaque | .lam .. => withLams lambdas do return .opaque | _ => unreachable! private abbrev M := StateListT (AssocList Expr DTExpr) $ ReaderT Context MetaM /- Caching values is a bit dangerous, because when two expressions are be equal and they live under a different number of binders, then the resulting De Bruijn indices are offset. In practice, getting a `.bvar` in a `DTExpr` is very rare, so we exclude such values from the cache. -/ instance : MonadCache Expr DTExpr M where findCached? e := do let s ← get return s.find? e cache e e' := if e'.hasLooseBVars then return else modify (·.insert e e') /-- Return all pairs of body, bound variables that could possibly appear due to η-reduction -/ @[specialize] def etaPossibilities (e : Expr) (lambdas : List FVarId) (k : Expr → List FVarId → M α) : M α := k e lambdas <|> do match e, lambdas with | .app f a, fvarId :: lambdas => if isStarWithArg (.fvar fvarId) a then withReader (fun c => { c with forbiddenVars := fvarId :: c.forbiddenVars }) do etaPossibilities f lambdas k else failure | _, _ => failure /-- run `etaPossibilities`, and cache the result if there are multiple possibilities. -/ @[specialize] def cacheEtaPossibilities (e original : Expr) (lambdas : List FVarId) (k : Expr → List FVarId → M DTExpr) : M DTExpr := match e, lambdas with | .app _ a, fvarId :: _ => if isStarWithArg (.fvar fvarId) a then checkCache original fun _ => etaPossibilities e lambdas k else k e lambdas | _, _ => k e lambdas /-- Return all encodings of `e` as a `DTExpr`, taking possible η-reductions into account. If `root = false`, then `e` is a strict sub expression of the original expression. -/ partial def mkDTExprsAux (original : Expr) (root : Bool) : M DTExpr := do lambdaTelescopeReduce original [] (← read).config fun e lambdas => do if !root then if let .const n _ := e.getAppFn then if let some (type, lhs, rhs, lambdas') ← reduceHBinOp n e.getAppArgs lambdas then return ← withLams lambdas' do let type ← mkDTExprsAux type false let lhs ← mkDTExprsAux lhs false let rhs ← mkDTExprsAux rhs false return .const n #[type, type, .star none, .star none, lhs, rhs] if let some (type, arg, lambdas') ← reduceUnOp n e.getAppArgs lambdas then return ← withLams lambdas' do let type ← mkDTExprsAux type false let arg ← mkDTExprsAux arg false return .const n #[type, .star none, arg] cacheEtaPossibilities e original lambdas fun e lambdas => e.withApp fun fn args => do let argDTExpr (arg : Expr) (ignore : Bool) : M DTExpr := if ignore then pure (.star none) else mkDTExprsAux arg false let argDTExprs : M (Array DTExpr) := do let ignores ← getIgnores fn args args.mapIdxM fun i arg => argDTExpr arg ignores[i]! match fn with | .const n _ => unless root do /- since `(fun _ => 0) = 0` and `(fun _ => 1) = 1`, we don't index lambdas before nat literals -/ if let some v := toNatLit? e then return .lit v withLams lambdas do return .const n (← argDTExprs) | .proj s i a => withLams lambdas do let a ← argDTExpr a (isClass (← getEnv) s) return .proj s i a (← argDTExprs) | .fvar fvarId => /- we index `fun x => x` as `id` when not at the root -/ if let fvarId' :: lambdas' := lambdas then if fvarId' == fvarId && args.isEmpty && !root then return ← withLams lambdas' do let type ← mkDTExprAux (← fvarId.getType) false return .const ``id #[type] withLams lambdas do let c ← read if let some idx := c.bvars.findIdx? (· == fvarId) then return .bvar idx (← argDTExprs) guard !(c.forbiddenVars.contains fvarId) if c.fvarInContext fvarId then return .fvar fvarId (← argDTExprs) else return .opaque | .mvar mvarId => /- When the mvarId has arguments, index it with `[*]` instead of `[λ,*]`, because the star could depend on the bound variables. As a result, something indexed `[λ,*]` has that the `*` cannot depend on the λ-bound variables -/ if args.isEmpty then withLams lambdas do return .star (some mvarId) else return .star none | .forallE n d b bi => withLams lambdas do let d' ← mkDTExprsAux d false let b' ← withLocalDecl n bi d fun fvar => withReader (fun c => { c with bvars := fvar.fvarId! :: c.bvars }) do mkDTExprsAux (b.instantiate1 fvar) false return .forall d' b' | .lit v => withLams lambdas do return .lit v | .sort _ => withLams lambdas do return .sort | .letE .. => withLams lambdas do return .opaque | .lam .. => withLams lambdas do return .opaque | _ => unreachable! end MkDTExpr /-- -/ def DTExpr.isSpecific : DTExpr → Bool | .star _ | .const ``Eq #[.star _, .star _, .star _] => false | _ => true /-- Return the encoding of `e` as a `DTExpr`. Warning: to account for potential η-reductions of `e`, use `mkDTExprs` instead. The argument `fvarInContext` allows you to specify which free variables in `e` will still be in the context when the `RefinedDiscrTree` is being used for lookup. It should return true only if the `RefinedDiscrTree` is built and used locally. -/ def mkDTExpr (e : Expr) (config : WhnfCoreConfig) (fvarInContext : FVarId → Bool := fun _ => false) : MetaM DTExpr := withReducible do (MkDTExpr.mkDTExprAux e true |>.run {config, fvarInContext}) /-- Similar to `mkDTExpr`. Return all encodings of `e` as a `DTExpr`, taking potential further η-reductions into account. -/ def mkDTExprs (e : Expr) (config : WhnfCoreConfig) (onlySpecific : Bool) (fvarInContext : FVarId → Bool := fun _ => false) : MetaM (List DTExpr) := withReducible do let es ← (MkDTExpr.mkDTExprsAux e true).run' {} |>.run {config, fvarInContext} return if onlySpecific then es.filter (·.isSpecific) else es /-! ## Inserting intro a RefinedDiscrTree -/ variable {α : Type} /-- If `vs` contains an element `v'` such that `v == v'`, then replace `v'` with `v`. Otherwise, push `v`. See issue #2155 Recall that `BEq α` may not be Lawful. -/ private def insertInArray [BEq α] (vs : Array α) (v : α) : Array α := loop 0 where loop (i : Nat) : Array α := if h : i < vs.size then if v == vs[i] then vs.set ⟨i,h⟩ v else loop (i+1) else vs.push v termination_by vs.size - i /-- Insert the value `v` at index `keys : Array Key` in a `Trie`. -/ partial def insertInTrie [BEq α] (keys : Array Key) (v : α) (i : Nat) : Trie α → Trie α | .node cs => let k := keys[i]! let c := Id.run $ cs.binInsertM (fun a b => a.1 < b.1) (fun (k', s) => (k', insertInTrie keys v (i+1) s)) (fun _ => (k, Trie.singleton keys v (i+1))) (k, default) .node c | .values vs => .values (insertInArray vs v) | .path ks c => Id.run do for n in [:ks.size] do let k1 := keys[i+n]! let k2 := ks[n]! if k1 != k2 then let shared := ks[:n] let rest := ks[n+1:] return .mkPath shared (.mkNode2 k1 (.singleton keys v (i+n+1)) k2 (.mkPath rest c)) return .path ks (insertInTrie keys v (i + ks.size) c) /-- Insert the value `v` at index `keys : Array Key` in a `RefinedDiscrTree`. Warning: to accound for η-reduction, an entry may need to be added at multiple indexes, so it is recommended to use `RefinedDiscrTree.insert` for insertion. -/ def insertInRefinedDiscrTree [BEq α] (d : RefinedDiscrTree α) (keys : Array Key) (v : α) : RefinedDiscrTree α := let k := keys[0]! match d.root.find? k with | none => let c := .singleton keys v 1 { root := d.root.insert k c } | some c => let c := insertInTrie keys v 1 c { root := d.root.insert k c } /-- Insert the value `v` at index `e : DTExpr` in a `RefinedDiscrTree`. Warning: to accound for η-reduction, an entry may need to be added at multiple indexes, so it is recommended to use `RefinedDiscrTree.insert` for insertion. -/ def insertDTExpr [BEq α] (d : RefinedDiscrTree α) (e : DTExpr) (v : α) : RefinedDiscrTree α := insertInRefinedDiscrTree d e.flatten v /-- Insert the value `v` at index `e : Expr` in a `RefinedDiscrTree`. The argument `fvarInContext` allows you to specify which free variables in `e` will still be in the context when the `RefinedDiscrTree` is being used for lookup. It should return true only if the `RefinedDiscrTree` is built and used locally. if `onlySpecific := true`, then we filter out the patterns `*` and `Eq * * *`. -/ def insert [BEq α] (d : RefinedDiscrTree α) (e : Expr) (v : α) (onlySpecific : Bool := true) (config : WhnfCoreConfig := {}) (fvarInContext : FVarId → Bool := fun _ => false) : MetaM (RefinedDiscrTree α) := do let keys ← mkDTExprs e config onlySpecific fvarInContext return keys.foldl (insertDTExpr · · v) d /-- Insert the value `vLhs` at index `lhs`, and if `rhs` is indexed differently, then also insert the value `vRhs` at index `rhs`. -/ def insertEqn [BEq α] (d : RefinedDiscrTree α) (lhs rhs : Expr) (vLhs vRhs : α) (onlySpecific : Bool := true) (config : WhnfCoreConfig := {}) (fvarInContext : FVarId → Bool := fun _ => false) : MetaM (RefinedDiscrTree α) := do let keysLhs ← mkDTExprs lhs config onlySpecific fvarInContext let keysRhs ← mkDTExprs rhs config onlySpecific fvarInContext let d := keysLhs.foldl (insertDTExpr · · vLhs) d if @List.beq _ ⟨DTExpr.eqv⟩ keysLhs keysRhs then return d else return keysRhs.foldl (insertDTExpr · · vRhs) d /-! ## Matching with a RefinedDiscrTree We use a very simple unification algorithm. For all star/metavariable patterns in the `RefinedDiscrTree` and in the target, we store the assignment, and when it is assigned again, we check that it is the same assignment. -/ namespace GetUnify /-- If `k` is a key in `children`, return the corresponding `Trie α`. Otherwise return `none`. -/ def findKey (children : Array (Key × Trie α)) (k : Key) : Option (Trie α) := (·.2) <$> children.binSearch (k, default) (fun a b => a.1 < b.1) private structure Context where unify : Bool config : WhnfCoreConfig private structure State where /-- Score representing how good the match is. -/ score : Nat := 0 /-- Metavariable assignments for the `Key.star` patterns in the `RefinedDiscrTree`. -/ starAssignments : HashMap Nat DTExpr := {} /-- Metavariable assignments for the `Expr.mvar` in the expression. -/ mvarAssignments : HashMap MVarId (Array Key) := {} private abbrev M := ReaderT Context $ StateListM State /-- Return all values from `x` in an array, together with their scores. -/ private def M.run (unify : Bool) (config : WhnfCoreConfig) (x : M (Trie α)) : Array (Array α × Nat) := ((x.run { unify, config }).run {}).toArray.map (fun (t, s) => (t.values!, s.score)) /-- Increment the score by `n`. -/ private def incrementScore (n : Nat) : M Unit := modify fun s => { s with score := s.score + n } /-- Log a metavariable assignment in the `State`. -/ private def insertStarAssignment (n : Nat) (e : DTExpr) : M Unit := modify fun s => { s with starAssignments := s.starAssignments.insert n e } /-- Log a metavariable assignment in the `State`. -/ private def assignMVar (mvarId : MVarId) (e : Array Key) : M Unit := do let { mvarAssignments, .. } ← get match mvarAssignments.find? mvarId with | some e' => guard (e == e') | none => modify fun s => { s with mvarAssignments := s.mvarAssignments.insert mvarId e } /-- Return the possible `Trie α` that match with `n` metavariable. -/ partial def skipEntries (t : Trie α) (skipped : Array Key) : Nat → M (Array Key × Trie α) | 0 => pure (skipped, t) | skip+1 => t.children!.foldr (init := failure) fun (k, c) x => (skipEntries c (skipped.push k) (skip + k.arity)) <|> x /-- Return the possible `Trie α` that match with anything. We add 1 to the matching score when the key is `.opaque`, since this pattern is "harder" to match with. -/ def matchTargetStar (mvarId? : Option MVarId) (t : Trie α) : M (Trie α) := do let (keys, t) ← t.children!.foldr (init := failure) fun (k, c) x => (do if k == .opaque then incrementScore 1 skipEntries c #[k] k.arity ) <|> x if let some mvarId := mvarId? then assignMVar mvarId keys return t /-- Return the possible `Trie α` that come from a `Key.star`, while keeping track of the `Key.star` assignments. -/ def matchTreeStars (e : DTExpr) (t : Trie α) : M (Trie α) := do let {starAssignments, ..} ← get let mut result := failure /- The `Key.star` are at the start of the `t.children!`, so this loops through all of them. -/ for (k, c) in t.children! do let .star i := k | break if let some assignment := starAssignments.find? i then if e == assignment then result := (incrementScore e.size *> pure c) <|> result else result := (insertStarAssignment i e *> pure c) <|> result result mutual /-- Return the possible `Trie α` that match with `e`. -/ partial def matchExpr (e : DTExpr) (t : Trie α) : M (Trie α) := do if let .star mvarId? := e then if (← read).unify then matchTargetStar mvarId? t else matchTreeStars e t else matchTreeStars e t <|> exactMatch e (findKey t.children!) /-- If `e` is not a metavariable, return the possible `Trie α` that exactly match with `e`. -/ @[specialize] partial def exactMatch (e : DTExpr) (find? : Key → Option (Trie α)) : M (Trie α) := do let findKey (k : Key) (x : Trie α → M (Trie α) := pure) (score := 1) : M (Trie α) := match find? k with | none => failure | some trie => do incrementScore score x trie let matchArgs (args : Array DTExpr) : Trie α → M (Trie α) := args.foldlM (fun t e => matchExpr e t) match e with | .opaque => failure | .const c args => findKey (.const c args.size) (matchArgs args) | .fvar fvarId args => findKey (.fvar fvarId args.size) (matchArgs args) | .bvar i args => findKey (.bvar i args.size) (matchArgs args) | .lit v => findKey (.lit v) | .sort => findKey .sort | .lam b => findKey .lam (matchExpr b) 0 | .forall d b => findKey .forall (matchExpr d >=> matchExpr b) | .proj n i a args => findKey (.proj n i args.size) (matchExpr a >=> matchArgs args) | _ => unreachable! end private partial def getMatchWithScoreAux (d : RefinedDiscrTree α) (e : DTExpr) (unify : Bool) (config : WhnfCoreConfig) (allowRootStar : Bool := false) : Array (Array α × Nat) := (do if e matches .star _ then guard allowRootStar d.root.foldl (init := failure) fun x k c => (do if k == Key.opaque then GetUnify.incrementScore 1 let (_, t) ← GetUnify.skipEntries c #[k] k.arity return t) <|> x else GetUnify.exactMatch e d.root.find? <|> do guard allowRootStar let some c := d.root.find? (.star 0) | failure return c ).run unify config end GetUnify /-- Return the results from the `RefinedDiscrTree` that match the given expression, together with their matching scores, in decreasing order of score. Each entry of type `Array α × Nat` corresponds to one pattern. If `unify := false`, then metavariables in `e` are treated as opaque variables. This is for when you don't want to instantiate metavariables in `e`. If `allowRootStar := false`, then we don't allow `e` or the matched key in `d` to be a star pattern. -/ def getMatchWithScore (d : RefinedDiscrTree α) (e : Expr) (unify : Bool) (config : WhnfCoreConfig) (allowRootStar : Bool := false) : MetaM (Array (Array α × Nat)) := do let e ← mkDTExpr e config let result := GetUnify.getMatchWithScoreAux d e unify config allowRootStar return result.qsort (·.2 > ·.2) /-- Similar to `getMatchWithScore`, but also returns matches with prefixes of `e`. We store the score, followed by the number of ignored arguments. -/ partial def getMatchWithScoreWithExtra (d : RefinedDiscrTree α) (e : Expr) (unify : Bool) (config : WhnfCoreConfig) (allowRootStar : Bool := false) : MetaM (Array (Array α × Nat × Nat)) := do let result ← go e 0 return result.qsort (·.2.1 > ·.2.1) where /-- go -/ go (e : Expr) (numIgnored : Nat) : MetaM (Array (Array α × Nat × Nat)) := do let result ← getMatchWithScore d e unify config allowRootStar let result := result.map fun (a, b) => (a, b, numIgnored) match e with | .app e _ => return (← go e (numIgnored + 1)) ++ result | _ => return result variable {β : Type} {m : Type → Type} [Monad m] /-- Apply a monadic function to the array of values at each node in a `RefinedDiscrTree`. -/ partial def Trie.mapArraysM (t : RefinedDiscrTree.Trie α) (f : Array α → m (Array β)) : m (Trie β) := do match t with | .node children => return .node (← children.mapM fun (k, t') => do pure (k, ← t'.mapArraysM f)) | .values vs => return .values (← f vs) | .path ks c => return .path ks (← c.mapArraysM f) /-- Apply a monadic function to the array of values at each node in a `RefinedDiscrTree`. -/ def mapArraysM (d : RefinedDiscrTree α) (f : Array α → m (Array β)) : m (RefinedDiscrTree β) := return { root := ← d.root.mapM (·.mapArraysM f) } /-- Apply a function to the array of values at each node in a `RefinedDiscrTree`. -/ def mapArrays (d : RefinedDiscrTree α) (f : Array α → Array β) : RefinedDiscrTree β := d.mapArraysM (m := Id) f
Tactic\FunProp\StateList.lean
/- Copyright (c) 2023 J. W. Gerbscheid. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: J. W. Gerbscheid The combined state and list monad transformer. `StateListT σ α` is equivalent to `StateT σ (ListT α)` but more efficient. WARNING: `StateListT σ α m` is only a monad if `m` is a commutative monad. For example, ``` def problem : StateListT Unit (StateM (Array Nat)) Unit := do Alternative.orElse (pure ()) (fun _ => pure ()) StateListT.lift $ modify (·.push 0) StateListT.lift $ modify (·.push 1) #eval ((problem.run' ()).run #[]).2 ``` will yield either `#[0,1,0,1]`, or `#[0,0,1,1]`, depending on the order in which the actions in the do block are combined. -/ /-! StateList -/ namespace Mathlib.Meta.FunProp universe u v /-- `StateList` is a List with a state associated to each element. This is used instead of `List (α × σ)` as it is more efficient. -/ inductive StateList (σ α : Type u) where /-- .nil is the empty list. -/ | nil : StateList σ α /-- If `a : α`, `s : σ` and `l : List α`, then `.cons a s l`, is the list with first element `a` with state `s` and `l` as the rest of the list. -/ | cons : α → σ → StateList σ α → StateList σ α variable {α β σ : Type u} namespace StateList private def toList : StateList σ α → List (α × σ) | .cons a s l => (a, s) :: l.toList | .nil => [] private def toList' : StateList σ α → List α | .cons a _ l => a :: l.toList' | .nil => [] private def map (f : α → β) : StateList σ α → StateList σ β | .cons a s l => .cons (f a) s (l.map f) | .nil => .nil private def append : (xs ys : StateList σ α) → StateList σ α | .nil, bs => bs | .cons a s l, bs => .cons a s (l.append bs) instance : Append (StateList σ α) := ⟨StateList.append⟩ @[specialize] private def foldrM {m} [Monad m] : (f : α → σ → β → m β) → (init : β) → StateList σ α → m β | _, b, .nil => pure b | f, b, .cons a s l => do f a s (← l.foldrM f b) end StateList /-- The combined state and list monad transformer. -/ def StateListT (σ : Type u) (m : Type u → Type v) (α : Type u) : Type (max u v) := σ → m (StateList σ α) variable {m : Type u → Type v} [Monad m] /-- Run `x` on a given state `s`, returning the list of values with corresponding states. -/ @[always_inline, inline] def StateListT.run [Functor m] (x : StateListT σ m α) (s : σ) : m (List (α × σ)) := StateList.toList <$> x s /-- Run `x` on a given state `s`, returning the list of values. -/ @[always_inline, inline] def StateListT.run' [Functor m] (x : StateListT σ m α) (s : σ) : m (List α) := StateList.toList' <$> x s /-- The combined state and list monad. -/ abbrev StateListM (σ α : Type u) : Type u := StateListT σ Id α namespace StateListT section @[always_inline, inline] private def pure (a : α) : StateListT σ m α := fun s => return StateList.nil.cons a s /-- Separately handling lists of length 1 is important to avoid a stack overflow. -/ @[always_inline, inline] private def bind (x : StateListT σ m α) (f : α → StateListT σ m β) : StateListT σ m β := fun s => do match ← x s with | .nil => return .nil | .cons a s .nil => f a s | x => x.foldrM (fun a s bs => return (← f a s) ++ bs) .nil @[always_inline, inline] private def map (f : α → β) (x : StateListT σ m α) : StateListT σ m β := fun s => StateList.map f <$> x s @[always_inline] instance : Monad (StateListT σ m) where pure := StateListT.pure bind := StateListT.bind map := StateListT.map @[always_inline, inline] private def orElse (x : StateListT σ m α) (y : Unit → StateListT σ m α) : StateListT σ m α := fun s => (· ++ ·) <$> x s <*> y () s @[always_inline, inline] private def failure : StateListT σ m α := fun _ => return .nil instance : Alternative (StateListT σ m) where failure := StateListT.failure orElse := StateListT.orElse /-- Return the state from `StateListT σ m`. -/ @[always_inline, inline] protected def get : StateListT σ m σ := fun s => return StateList.nil.cons s s /-- Set the state in `StateListT σ m`. -/ @[always_inline, inline] protected def set : σ → StateListT σ m PUnit := fun s' _ => return StateList.nil.cons ⟨⟩ s' /-- Modify and get the state in `StateListT σ m`. -/ @[always_inline, inline] protected def modifyGet (f : σ → α × σ) : StateListT σ m α := fun s => let a := f s; return StateList.nil.cons a.1 a.2 /-- Lift an action from `m α` to `StateListT σ m α`. -/ @[always_inline, inline] protected def lift (t : m α) : StateListT σ m α := fun s => do let a ← t; return StateList.nil.cons a s instance : MonadLift m (StateListT σ m) := ⟨StateListT.lift⟩ @[always_inline] instance : MonadFunctor m (StateListT σ m) := ⟨fun f x s => f (x s)⟩ @[always_inline] instance{ε} [MonadExceptOf ε m] : MonadExceptOf ε (StateListT σ m) := { throw := StateListT.lift ∘ throwThe ε tryCatch := fun x c s => tryCatchThe ε (x s) (fun e => c e s) } end end StateListT instance : MonadStateOf σ (StateListT σ m) where get := StateListT.get set := StateListT.set modifyGet := StateListT.modifyGet @[always_inline] instance StateListT.monadControl : MonadControl m (StateListT σ m) where stM := StateList σ liftWith := fun f => do let s ← get; liftM (f (fun x => x s)) restoreM := fun x _ => x
Tactic\FunProp\Theorems.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Mathlib.Tactic.FunProp.Decl import Mathlib.Tactic.FunProp.Types import Mathlib.Tactic.FunProp.FunctionData import Mathlib.Tactic.FunProp.RefinedDiscrTree import Batteries.Data.RBMap.Alter /-! ## `fun_prop` environment extensions storing theorems for `fun_prop` -/ namespace Mathlib open Lean Meta namespace Meta.FunProp /-- Stores important argument indices of lambda theorems For example ``` theorem Continuous_const {α β} [TopologicalSpace α] [TopologicalSpace β] (y : β) : Continuous fun _ : α => y ``` is represented by ``` .const 0 4 ``` -/ inductive LambdaTheoremArgs | id (X : Nat) | const (X y : Nat) | proj (x Y : Nat) | projDep (x Y : Nat) | comp (f g : Nat) | letE (f g : Nat) | pi (f : Nat) deriving Inhabited, BEq, Repr, Hashable /-- There are 5(+1) basic lambda theorems - id `Continuous fun x => x` - const `Continuous fun x => y` - proj `Continuous fun (f : X → Y) => f x` - projDep `Continuous fun (f : (x : X) → Y x => f x)` - comp `Continuous f → Continuous g → Continuous fun x => f (g x)` - letE `Continuous f → Continuous g → Continuous fun x => let y := g x; f x y` - pi `∀ y, Continuous (f · y) → Continuous fun x y => f x y` -/ inductive LambdaTheoremType | id | const | proj| projDep | comp | letE | pi deriving Inhabited, BEq, Repr, Hashable /-- -/ def LambdaTheoremArgs.type (t : LambdaTheoremArgs) : LambdaTheoremType := match t with | .id .. => .id | .const .. => .const | .proj .. => .proj | .projDep .. => .projDep | .comp .. => .comp | .letE .. => .letE | .pi .. => .pi set_option linter.unusedVariables false in /-- -/ def detectLambdaTheoremArgs (f : Expr) (ctxVars : Array Expr) : MetaM (Option LambdaTheoremArgs) := do -- eta expand but beta reduce body let f ← forallTelescope (← inferType f) fun xs b => mkLambdaFVars xs (mkAppN f xs).headBeta match f with | .lam xName xType xBody xBi => match xBody with | .bvar 0 => -- fun x => x let .some argId_X := ctxVars.findIdx? (fun x => x == xType) | return none return .some (.id argId_X) | .fvar yId => -- fun x => y let .some argId_X := ctxVars.findIdx? (fun x => x == xType) | return none let .some argId_y := ctxVars.findIdx? (fun x => x == (.fvar yId)) | return none return .some (.const argId_X argId_y) | .app (.bvar 0) (.fvar xId) => -- fun f => f x let fType := xType let .some argId_x := ctxVars.findIdx? (fun x => x == (.fvar xId)) | return none match fType with | .forallE xName' xType' (.fvar yId) xBi' => let .some argId_Y := ctxVars.findIdx? (fun x => x == (.fvar yId)) | return none return .some <| .proj argId_x argId_Y | .forallE xName' xType' (.app (.fvar yId) (.bvar 0)) xBi' => let .some argId_Y := ctxVars.findIdx? (fun x => x == (.fvar yId)) | return none return .some <| .projDep argId_x argId_Y | _ => return none | .app (.fvar fId) (.app (.fvar gId) (.bvar 0)) => -- fun x => f (g x) let .some argId_f := ctxVars.findIdx? (fun x => x == (.fvar fId)) | return none let .some argId_g := ctxVars.findIdx? (fun x => x == (.fvar gId)) | return none return .some <| .comp argId_f argId_g | .letE yName yType (.app (.fvar gId) (.bvar 0)) (.app (.app (.fvar fId) (.bvar 1)) (.bvar 0)) dep => let .some argId_f := ctxVars.findIdx? (fun x => x == (.fvar fId)) | return none let .some argId_g := ctxVars.findIdx? (fun x => x == (.fvar gId)) | return none return .some <| .letE argId_f argId_g | .lam Name yType (.app (.app (.fvar fId) (.bvar 1)) (.bvar 0)) yBi => -- fun x y => f x y let .some argId_f := ctxVars.findIdx? (fun x => x == (.fvar fId)) | return none return .some <| .pi argId_f | _ => return none | _ => return none /-- -/ structure LambdaTheorem where /-- Name of function property -/ funPropName : Name /-- Name of lambda theorem -/ thmName : Name /-- Type and important argument of the theorem. -/ thmArgs : LambdaTheoremArgs deriving Inhabited, BEq /-- -/ structure LambdaTheorems where /-- map: function property name × theorem type → lambda theorem -/ theorems : HashMap (Name × LambdaTheoremType) LambdaTheorem := {} deriving Inhabited /-- return proof of lambda theorem -/ def LambdaTheorem.getProof (thm : LambdaTheorem) : MetaM Expr := do mkConstWithFreshMVarLevels thm.thmName /-- -/ abbrev LambdaTheoremsExt := SimpleScopedEnvExtension LambdaTheorem LambdaTheorems /-- Extension storing all lambda theorems. -/ initialize lambdaTheoremsExt : LambdaTheoremsExt ← registerSimpleScopedEnvExtension { name := by exact decl_name% initial := {} addEntry := fun d e => {d with theorems := d.theorems.insert (e.funPropName, e.thmArgs.type) e} } /-- -/ def getLambdaTheorem (funPropName : Name) (type : LambdaTheoremType) : CoreM (Option LambdaTheorem) := do return (lambdaTheoremsExt.getState (← getEnv)).theorems.find? (funPropName,type) -------------------------------------------------------------------------------- /-- Function theorems are stated in uncurried or compositional form. uncurried ``` theorem Continuous_add : Continuous (fun x => x.1 + x.2) ``` compositional ``` theorem Continuous_add (hf : Continuous f) (hg : Continuous g) : Continuous (fun x => (f x) + (g x)) ``` -/ inductive TheoremForm where | uncurried | comp deriving Inhabited, BEq, Repr /-- theorem about specific function (either declared constant or free variable) -/ structure FunctionTheorem where /-- function property name -/ funPropName : Name /-- theorem name -/ thmOrigin : Origin /-- function name -/ funOrigin : Origin /-- array of argument indices about which this theorem is about -/ mainArgs : Array Nat /-- total number of arguments applied to the function -/ appliedArgs : Nat /-- priority -/ priority : Nat := eval_prio default /-- form of the theorem, see documentation of TheoremForm -/ form : TheoremForm deriving Inhabited, BEq private local instance : Ord Name := ⟨Name.quickCmp⟩ /-- -/ structure FunctionTheorems where /-- map: function name → function property → function theorem -/ theorems : Batteries.RBMap Name (Batteries.RBMap Name (Array FunctionTheorem) compare) compare := {} deriving Inhabited /-- return proof of function theorem -/ def FunctionTheorem.getProof (thm : FunctionTheorem) : MetaM Expr := do match thm.thmOrigin with | .decl name => mkConstWithFreshMVarLevels name | .fvar id => return .fvar id /-- -/ abbrev FunctionTheoremsExt := SimpleScopedEnvExtension FunctionTheorem FunctionTheorems /-- Extension storing all function theorems. -/ initialize functionTheoremsExt : FunctionTheoremsExt ← registerSimpleScopedEnvExtension { name := by exact decl_name% initial := {} addEntry := fun d e => {d with theorems := d.theorems.alter e.funOrigin.name fun funProperties => let funProperties := funProperties.getD {} funProperties.alter e.funPropName fun thms => let thms := thms.getD #[] thms.push e} } /-- -/ def getTheoremsForFunction (funName : Name) (funPropName : Name) : CoreM (Array FunctionTheorem) := do return (functionTheoremsExt.getState (← getEnv)).theorems.findD funName {} |>.findD funPropName #[] -------------------------------------------------------------------------------- /-- General theorem about function property used for transition and morphism theorems -/ structure GeneralTheorem where /-- function property name -/ funPropName : Name /-- theorem name -/ thmName : Name /-- discrimination tree keys used to index this theorem -/ keys : List RefinedDiscrTree.DTExpr /-- priority -/ priority : Nat := eval_prio default deriving Inhabited, BEq /-- Get proof of a theorem. -/ def GeneralTheorem.getProof (thm : GeneralTheorem) : MetaM Expr := do mkConstWithFreshMVarLevels thm.thmName /-- -/ structure GeneralTheorems where /-- -/ theorems : RefinedDiscrTree GeneralTheorem := {} deriving Inhabited /-- -/ abbrev GeneralTheoremsExt := SimpleScopedEnvExtension GeneralTheorem GeneralTheorems /-- -/ initialize transitionTheoremsExt : GeneralTheoremsExt ← registerSimpleScopedEnvExtension { name := by exact decl_name% initial := {} addEntry := fun d e => {d with theorems := e.keys.foldl (RefinedDiscrTree.insertDTExpr · · e) d.theorems} } /-- -/ initialize morTheoremsExt : GeneralTheoremsExt ← registerSimpleScopedEnvExtension { name := by exact decl_name% initial := {} addEntry := fun d e => {d with theorems := e.keys.foldl (RefinedDiscrTree.insertDTExpr · · e) d.theorems} } -------------------------------------------------------------------------------- /-- There are four types of theorems: - lam - theorem about basic lambda calculus terms - function - theorem about a specific function(declared or free variable) in specific arguments - mor - special theorems talking about bundled morphisms/DFunLike.coe - transition - theorems inferring one function property from another Examples: - lam ``` theorem Continuous_id : Continuous fun x => x theorem Continuous_comp (hf : Continuous f) (hg : Continuous g) : Continuous fun x => f (g x) ``` - function ``` theorem Continuous_add : Continuous (fun x => x.1 + x.2) theorem Continuous_add (hf : Continuous f) (hg : Continuous g) : Continuous (fun x => (f x) + (g x)) ``` - mor - the head of function body has to be ``DFunLike.code ``` theorem ContDiff.clm_apply {f : E → F →L[𝕜] G} {g : E → F} (hf : ContDiff 𝕜 n f) (hg : ContDiff 𝕜 n g) : ContDiff 𝕜 n fun x => (f x) (g x) theorem clm_linear {f : E →L[𝕜] F} : IsLinearMap 𝕜 f ``` - transition - the conclusion has to be in the form `P f` where `f` is a free variable ``` theorem linear_is_continuous [FiniteDimensional ℝ E] {f : E → F} (hf : IsLinearMap 𝕜 f) : Continuous f ``` -/ inductive Theorem where | lam (thm : LambdaTheorem) | function (thm : FunctionTheorem) | mor (thm : GeneralTheorem) | transition (thm : GeneralTheorem) /-- -/ def getTheoremFromConst (declName : Name) (prio : Nat := eval_prio default) : MetaM Theorem := do let info ← getConstInfo declName forallTelescope info.type fun xs b => do let .some (decl,f) ← getFunProp? b | throwError "unrecognized function property `{← ppExpr b}`" let funPropName := decl.funPropName let fData? ← getFunctionData? f defaultUnfoldPred {zeta := false} if let .some thmArgs ← detectLambdaTheoremArgs (← fData?.get) xs then return .lam { funPropName := funPropName thmName := declName thmArgs := thmArgs } let .data fData := fData? | throwError s!"function in invalid form {← ppExpr f}" match fData.fn with | .const funName _ => -- todo: more robust detection of compositional and uncurried form!!! -- I think this detects `Continuous fun x => x + c` as compositional ... let dec ← fData.nontrivialDecomposition let form : TheoremForm := if dec.isSome || funName == ``Prod.mk then .comp else .uncurried return .function { -- funPropName funName fData.mainArgs fData.args.size thmForm funPropName := funPropName thmOrigin := .decl declName funOrigin := .decl funName mainArgs := fData.mainArgs appliedArgs := fData.args.size priority := prio form := form } | .fvar .. => let (_,_,b') ← forallMetaTelescope info.type let keys := ← RefinedDiscrTree.mkDTExprs b' {} false let thm : GeneralTheorem := { funPropName := funPropName thmName := declName keys := keys priority := prio } -- todo: maybe do a little bit more careful detection of morphism and transition theorems match (← fData.isMorApplication) with | .exact => return .mor thm | .underApplied | .overApplied => throwError "fun_prop theorem about morphism coercion has to be in fully applied form" | .none => if fData.fn.isFVar && (fData.args.size == 1) && (fData.args[0]!.expr == fData.mainVar) then return .transition thm throwError "Not a valid `fun_prop` theorem!" | _ => throwError "unrecognized theoremType `{← ppExpr b}`" /-- -/ def addTheorem (declName : Name) (attrKind : AttributeKind := .global) (prio : Nat := eval_prio default) : MetaM Unit := do match (← getTheoremFromConst declName prio) with | .lam thm => trace[Meta.Tactic.fun_prop.attr] "\ lambda theorem: {thm.thmName} function property: {thm.funPropName} type: {repr thm.thmArgs.type}" lambdaTheoremsExt.add thm attrKind | .function thm => trace[Meta.Tactic.fun_prop.attr] "\ function theorem: {thm.thmOrigin.name} function property: {thm.funPropName} function name: {thm.funOrigin.name} main arguments: {thm.mainArgs} applied arguments: {thm.appliedArgs} form: {repr thm.form}" functionTheoremsExt.add thm attrKind | .mor thm => trace[Meta.Tactic.fun_prop.attr] "\ morphism theorem: {thm.thmName} function property: {thm.funPropName}" morTheoremsExt.add thm attrKind | .transition thm => trace[Meta.Tactic.fun_prop.attr] "\ transition theorem: {thm.thmName} function property: {thm.funPropName}" transitionTheoremsExt.add thm attrKind
Tactic\FunProp\ToBatteries.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Lean /-! ## `funProp` missing function from standard library -/ namespace Mathlib open Lean Meta namespace Meta.FunProp /-- Check if `a` can be obtained by removing elements from `b`. -/ def isOrderedSubsetOf {α} [Inhabited α] [DecidableEq α] (a b : Array α) : Bool := Id.run do if a.size > b.size then return false let mut i := 0 for j in [0:b.size] do if i = a.size then break if a[i]! = b[j]! then i := i+1 if i = a.size then return true else return false private def letTelescopeImpl {α} (e : Expr) (k : Array Expr → Expr → MetaM α) : MetaM α := lambdaLetTelescope e λ xs b => do if let .some i ← xs.findIdxM? (fun x ↦ do pure ¬(← x.fvarId!.isLetVar)) then k xs[0:i] (← mkLambdaFVars xs[i:] b) else k xs b /-- Telescope consuming only let bindings -/ def letTelescope {α n} [MonadControlT MetaM n] [Monad n] (e : Expr) (k : Array Expr → Expr → n α) : n α := map2MetaM (fun k => letTelescopeImpl e k) k /-- Swaps bvars indices `i` and `j` NOTE: the indices `i` and `j` do not correspond to the `n` in `bvar n`. Rather they behave like indices in `Expr.lowerLooseBVars`, `Expr.liftLooseBVars`, etc. TODO: This has to have a better implementation, but I'm still beyond confused with how bvar indices work -/ def _root_.Lean.Expr.swapBVars (e : Expr) (i j : Nat) : Expr := let swapBVarArray : Array Expr := Id.run do let mut a : Array Expr := .mkEmpty e.looseBVarRange for k in [0:e.looseBVarRange] do a := a.push (.bvar (if k = i then j else if k = j then i else k)) a e.instantiate swapBVarArray /-- For `#[x₁, .., xₙ]` create `(x₁, .., xₙ)`. -/ def mkProdElem (xs : Array Expr) : MetaM Expr := do match xs.size with | 0 => return default | 1 => return xs[0]! | _ => let n := xs.size xs[0:n-1].foldrM (init := xs[n-1]!) fun x p => mkAppM ``Prod.mk #[x,p] /-- For `(x₀, .., xₙ₋₁)` return `xᵢ` but as a product projection. We need to know the total size of the product to be considered. For example for `xyz : X × Y × Z` - `mkProdProj xyz 1 3` returns `xyz.snd.fst`. - `mkProdProj xyz 1 2` returns `xyz.snd`. -/ def mkProdProj (x : Expr) (i : Nat) (n : Nat) : MetaM Expr := do -- let X ← inferType x -- if X.isAppOfArity ``Prod 2 then match i, n with | _, 0 => pure x | _, 1 => pure x | 0, _ => mkAppM ``Prod.fst #[x] | i'+1, n'+1 => mkProdProj (← withTransparency .all <| mkAppM ``Prod.snd #[x]) i' n' /-- For an element of a product type(of size`n`) `xs` create an array of all possible projections i.e. `#[xs.1, xs.2.1, xs.2.2.1, ..., xs.2..2]` -/ def mkProdSplitElem (xs : Expr) (n : Nat) : MetaM (Array Expr) := (Array.range n) |>.mapM (λ i => mkProdProj xs i n) /-- Uncurry function `f` in `n` arguments. -/ def mkUncurryFun (n : Nat) (f : Expr) : MetaM Expr := do if n ≤ 1 then return f forallBoundedTelescope (← inferType f) n λ xs _ => do let xProdName : String ← xs.foldlM (init:="") λ n x => do return (n ++ toString (← x.fvarId!.getUserName).eraseMacroScopes) let xProdType ← inferType (← mkProdElem xs) withLocalDecl (.mkSimple xProdName) default xProdType λ xProd => do let xs' ← mkProdSplitElem xProd n mkLambdaFVars #[xProd] (← mkAppM' f xs').headBeta /-- Eta expand `f` in only one variable and reduce in others. Examples: ``` f ==> fun x => f x fun x y => f x y ==> fun x => f x HAdd.hAdd y ==> fun x => HAdd.hAdd y x HAdd.hAdd ==> fun x => HAdd.hAdd x ``` -/ def etaExpand1 (f : Expr) : MetaM Expr := do let f := f.eta if f.isLambda then return f else withDefault do forallBoundedTelescope (← inferType f) (.some 1) fun xs _ => do mkLambdaFVars xs (mkAppN f xs)
Tactic\FunProp\Types.lean
/- Copyright (c) 2024 Tomas Skrivan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Tomas Skrivan -/ import Mathlib.Tactic.FunProp.FunctionData import Batteries.Data.RBMap.Basic /-! ## `funProp` this file defines environment extension for `funProp` -/ namespace Mathlib open Lean Meta namespace Meta.FunProp initialize registerTraceClass `Meta.Tactic.fun_prop.attr initialize registerTraceClass `Meta.Tactic.fun_prop initialize registerTraceClass `Meta.Tactic.fun_prop.step initialize registerTraceClass `Meta.Tactic.fun_prop.unify initialize registerTraceClass `Meta.Tactic.fun_prop.discharge initialize registerTraceClass `Meta.Tactic.fun_prop.apply initialize registerTraceClass `Meta.Tactic.fun_prop.unfold initialize registerTraceClass `Meta.Tactic.fun_prop.cache /-- Indicated origin of a function or a statement. -/ inductive Origin where /-- It is a constant defined in the environment. -/ | decl (name : Name) /-- It is a free variable in the local context. -/ | fvar (fvarId : FVarId) deriving Inhabited, BEq /-- Name of the origin. -/ def Origin.name (origin : Origin) : Name := match origin with | .decl name => name | .fvar id => id.name /-- Get the expression specified by `origin`. -/ def Origin.getValue (origin : Origin) : MetaM Expr := do match origin with | .decl name => mkConstWithFreshMVarLevels name | .fvar id => pure (.fvar id) /-- Pretty print `FunProp.Origin`. -/ def ppOrigin {m} [Monad m] [MonadEnv m] [MonadError m] : Origin → m MessageData | .decl n => return m!"{← mkConstWithLevelParams n}" | .fvar n => return mkFVar n /-- Pretty print `FunProp.Origin`. Returns string unlike `ppOrigin`. -/ def ppOrigin' (origin : Origin) : MetaM String := do match origin with | .fvar id => return s!"{← ppExpr (.fvar id)} : {← ppExpr (← inferType (.fvar id))}" | _ => pure (toString origin.name) /-- Get origin of the head function. -/ def FunctionData.getFnOrigin (fData : FunctionData) : Origin := match fData.fn with | .fvar id => .fvar id | .const name _ => .decl name | _ => .decl Name.anonymous /-- Default names to be considered reducible by `fun_prop` -/ def defaultNamesToUnfold : Array Name := #[`id, `Function.comp, `Function.HasUncurry.uncurry, `Function.uncurry] /-- `fun_prop` configuration -/ structure Config where /-- Name to unfold -/ constToUnfold : Batteries.RBSet Name Name.quickCmp := .ofArray defaultNamesToUnfold _ /-- Custom discharger to satisfy theorem hypotheses. -/ disch : Expr → MetaM (Option Expr) := fun _ => pure .none /-- Maximal number of transitions between function properties e.g. inferring differentiability from linearity -/ maxDepth := 200 /-- current depth -/ depth := 0 /-- Stack of used theorem, used to prevent trivial loops. -/ thmStack : List Origin := [] /-- Maximum number of steps `fun_prop` can take. -/ maxSteps := 100000 deriving Inhabited /-- `fun_prop` state -/ structure State where /-- Simp's cache is used as the `funProp` tactic is designed to be used inside of simp and utilize its cache -/ cache : Simp.Cache := {} /-- Count the number of steps and stop when maxSteps is reached. -/ numSteps := 0 /-- Log progress and failures messages that should be displayed to the user at the end. -/ msgLog : List String := [] /-- Log used theorem -/ def Config.addThm (cfg : Config) (thmId : Origin) : Config := {cfg with thmStack := thmId :: cfg.thmStack} /-- Increase depth -/ def Config.increaseDepth (cfg : Config) : Config := {cfg with depth := cfg.depth + 1} /-- -/ abbrev FunPropM := ReaderT FunProp.Config $ StateT FunProp.State MetaM /-- Result of `funProp`, it is a proof of function property `P f` -/ structure Result where /-- -/ proof : Expr /-- Check if previously used theorem was `thmOrigin`. -/ def previouslyUsedThm (thmOrigin : Origin) : FunPropM Bool := do match (← read).thmStack.head? with | .some thmOrigin' => return thmOrigin == thmOrigin' | _ => return false /-- Puts the theorem to the stack of used theorems. -/ def withTheorem {α} (thmOrigin : Origin) (go : FunPropM α) : FunPropM α := do let cfg ← read if cfg.depth > cfg.maxDepth then throwError s!"fun_prop error, maximum depth({cfg.maxDepth}) reached!" withReader (fun cfg => cfg.addThm thmOrigin |>.increaseDepth) do go /-- Default names to unfold -/ def defaultUnfoldPred : Name → Bool := defaultNamesToUnfold.contains /-- Get predicate on names indicating if theys shoulds be unfolded. -/ def unfoldNamePred : FunPropM (Name → Bool) := do let toUnfold := (← read).constToUnfold return fun n => toUnfold.contains n /-- Increase heartbeat, throws error when `maxSteps` was reached -/ def increaseSteps : FunPropM Unit := do let numSteps := (← get).numSteps let maxSteps := (← read).maxSteps if numSteps > maxSteps then throwError s!"fun_prop failed, maximum number({maxSteps}) of steps exceeded" modify (fun s => {s with numSteps := s.numSteps + 1}) /-- Log error message that will displayed to the user at the end. -/ def logError (msg : String) : FunPropM Unit := do modify fun s => {s with msgLog := msg::s.msgLog}
Tactic\GCongr\Core.lean
/- Copyright (c) 2023 Mario Carneiro, Heather Macbeth. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Heather Macbeth -/ import Mathlib.Order.Defs import Mathlib.Tactic.Core import Mathlib.Tactic.GCongr.ForwardAttr import Batteries.Lean.Except import Batteries.Tactic.Exact /-! # The `gcongr` ("generalized congruence") tactic The `gcongr` tactic applies "generalized congruence" rules, reducing a relational goal between a LHS and RHS matching the same pattern to relational subgoals between the differing inputs to the pattern. For example, ``` example {a b x c d : ℝ} (h1 : a + 1 ≤ b + 1) (h2 : c + 2 ≤ d + 2) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by gcongr · linarith · linarith ``` This example has the goal of proving the relation `≤` between a LHS and RHS both of the pattern ``` x ^ 2 * ?_ + ?_ ``` (with inputs `a`, `c` on the left and `b`, `d` on the right); after the use of `gcongr`, we have the simpler goals `a ≤ b` and `c ≤ d`. A pattern can be provided explicitly; this is useful if a non-maximal match is desired: ``` example {a b c d x : ℝ} (h : a + c + 1 ≤ b + d + 1) : x ^ 2 * (a + c) + 5 ≤ x ^ 2 * (b + d) + 5 := by gcongr x ^ 2 * ?_ + 5 linarith ``` ## Sourcing the generalized congruence lemmas Relevant "generalized congruence" lemmas are declared using the attribute `@[gcongr]`. For example, the first example constructs the proof term ``` add_le_add (mul_le_mul_of_nonneg_left _ (pow_bit0_nonneg x 1)) _ ``` using the generalized congruence lemmas `add_le_add` and `mul_le_mul_of_nonneg_left`. The term `pow_bit0_nonneg x 1` is automatically generated by a discharger (see below). When a lemma is tagged `@[gcongr]`, it is verified that that lemma is of "generalized congruence" form, `f x₁ y z₁ ∼ f x₂ y z₂`, that is, a relation between the application of a function to two argument lists, in which the "varying argument" pairs (here `x₁`/`x₂` and `z₁`/`z₂`) are all free variables. The "varying"/non-"varying" classification of the arguments is recorded (as an array of booleans), and the `gcongr` tactic will try a lemma only if it matches the goal in relation `∼`, head function `f` and "varying"/non-"varying" classification for each of the inputs to `f`. Thus, for example, all three of the following lemmas are tagged `@[gcongr]` and are used in different situations according to whether the goal compares constant-left-multiplications, constant-right-multiplications, or fully varying multiplications: ``` theorem mul_le_mul_of_nonneg_left [Mul α] [Zero α] [Preorder α] [PosMulMono α] {a b c : α} (h : b ≤ c) (a0 : 0 ≤ a) : a * b ≤ a * c theorem mul_le_mul_of_nonneg_right [Mul α] [Zero α] [Preorder α] [MulPosMono α] {a b c : α} (h : b ≤ c) (a0 : 0 ≤ a) : b * a ≤ c * a theorem mul_le_mul [MulZeroClass α] [Preorder α] [PosMulMono α] [MulPosMono α] {a b c d : α} (h₁ : a ≤ b) (h₂ : c ≤ d) (c0 : 0 ≤ c) (b0 : 0 ≤ b) : a * c ≤ b * d ``` The advantage of this approach is that the lemmas with fewer "varying" input pairs typically require fewer side conditions, so the tactic becomes more useful by special-casing them. There can also be more than one generalized congruence lemma dealing with the same relation, head function and "varying"/non-"varying" configuration, for example with purely notational head functions which have different theories when different typeclass assumptions apply. For example, the following lemma is stored with the same `@[gcongr]` data as `mul_le_mul` above, and the two lemmas are simply tried in succession to determine which has the typeclasses relevant to the goal: ``` theorem mul_le_mul' [Mul α] [Preorder α] [CovariantClass α α (· * ·) (· ≤ ·)] [CovariantClass α α (Function.swap (· * ·)) (· ≤ ·)] {a b c d : α} (h₁ : a ≤ b) (h₂ : c ≤ d) : a * c ≤ b * d ``` ## Resolving goals The tactic attempts to discharge side goals to the "generalized congruence" lemmas (such as the side goal `0 ≤ x ^ 2` in the above application of `mul_le_mul_of_nonneg_left`) using the tactic `gcongr_discharger`, which wraps `positivity` but can also be extended. Side goals not discharged in this way are left for the user. The tactic also attempts to discharge "main" goals using the available hypotheses, as well as a limited amount of forward reasoning. Such attempts are made *before* descending further into matching by congruence. The built-in forward-reasoning includes reasoning by symmetry and reflexivity, and this can be extended by writing tactic extensions tagged with the `@[gcongr_forward]` attribute. ## Introducing variables and hypotheses Some natural generalized congruence lemmas have "main" hypotheses which are universally quantified or have the structure of an implication, for example ``` theorem GCongr.Finset.sum_le_sum [OrderedAddCommMonoid N] {f g : ι → N} {s : Finset ι} (h : ∀ (i : ι), i ∈ s → f i ≤ g i) : s.sum f ≤ s.sum g ``` The tactic automatically introduces the variable `i✝ : ι` and hypothesis `hi✝ : i✝ ∈ s` in the subgoal `∀ (i : ι), i ∈ s → f i ≤ g i` generated by applying this lemma. By default this is done anonymously, so they are inaccessible in the goal state which results. The user can name them if needed using the syntax `gcongr with i hi`. ## Variants The tactic `rel` is a variant of `gcongr`, intended for teaching. Local hypotheses are not used automatically to resolve main goals, but must be invoked by name: ``` example {a b x c d : ℝ} (h1 : a ≤ b) (h2 : c ≤ d) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by rel [h1, h2] ``` The `rel` tactic is finishing-only: if fails if any main or side goals are not resolved. -/ namespace Mathlib.Tactic.GCongr open Lean Meta /-- Structure recording the data for a "generalized congruence" (`gcongr`) lemma. -/ structure GCongrLemma where declName : Name mainSubgoals : Array (Nat × Nat) varyingArgs : Array Bool deriving Inhabited, Repr /-- Environment extension for "generalized congruence" (`gcongr`) lemmas. -/ initialize gcongrExt : SimpleScopedEnvExtension ((Name × Name × Array Bool) × GCongrLemma) (HashMap (Name × Name × Array Bool) (Array GCongrLemma)) ← registerSimpleScopedEnvExtension { addEntry := fun m (n, lem) => m.insert n ((m.findD n #[]).push lem) initial := {} } /-- Attribute marking "generalized congruence" (`gcongr`) lemmas. Such lemmas must have a conclusion of a form such as `f x₁ y z₁ ∼ f x₂ y z₂`; that is, a relation between the application of a function to two argument lists, in which the "varying argument" pairs (here `x₁`/`x₂` and `z₁`/`z₂`) are all free variables. The antecedents of such a lemma are classified as generating "main goals" if they are of the form `x₁ ≈ x₂` for some "varying argument" pair `x₁`/`x₂` (and a possibly different relation `≈` to `∼`), or more generally of the form `∀ i h h' j h'', f₁ i j ≈ f₂ i j` (say) for some "varying argument" pair `f₁`/`f₂`. (Other antecedents are considered to generate "side goals".) The index of the "varying argument" pair corresponding to each "main" antecedent is recorded. Lemmas involving `<` or `≤` can also be marked `@[bound]` for use in the related `bound` tactic. -/ initialize registerBuiltinAttribute { name := `gcongr descr := "generalized congruence" add := fun decl _ kind ↦ MetaM.run' do let declTy := (← getConstInfo decl).type withReducible <| forallTelescopeReducing declTy fun xs targetTy => do let fail (m : MessageData) := throwError "\ @[gcongr] attribute only applies to lemmas proving f x₁ ... xₙ ∼ f x₁' ... xₙ'.\n \ {m} in the conclusion of {declTy}" -- verify that conclusion of the lemma is of the form `f x₁ ... xₙ ∼ f x₁' ... xₙ'` let .app (.app rel lhs) rhs ← whnf targetTy | fail "No relation with at least two arguments found" let some relName := rel.getAppFn.constName? | fail "No relation found" let (some head, lhsArgs) := lhs.withApp fun e a => (e.constName?, a) | fail "LHS is not a function" let (some head', rhsArgs) := rhs.withApp fun e a => (e.constName?, a) | fail "RHS is not a function" unless head == head' && lhsArgs.size == rhsArgs.size do fail "LHS and RHS do not have the same head function and arity" let mut varyingArgs := #[] let mut pairs := #[] -- iterate through each pair of corresponding (LHS/RHS) inputs to the head function `head` in -- the conclusion of the lemma for e1 in lhsArgs, e2 in rhsArgs do -- we call such a pair a "varying argument" pair if the LHS/RHS inputs are not defeq -- (and not proofs) let isEq := (← isDefEq e1 e2) || ((← isProof e1) && (← isProof e2)) if !isEq then let e1 := e1.eta let e2 := e2.eta -- verify that the "varying argument" pairs are free variables (after eta-reduction) unless e1.isFVar && e2.isFVar do fail "Not all arguments are free variables" -- add such a pair to the `pairs` array pairs := pairs.push (varyingArgs.size, e1, e2) -- record in the `varyingArgs` array a boolean (true for varying, false if LHS/RHS are defeq) varyingArgs := varyingArgs.push !isEq let mut mainSubgoals := #[] let mut i := 0 -- iterate over antecedents `hyp` to the lemma for hyp in xs do mainSubgoals ← forallTelescopeReducing (← inferType hyp) fun _args hypTy => do let mut mainSubgoals := mainSubgoals -- pull out the conclusion `hypTy` of the antecedent, and check whether it is of the form -- `lhs₁ _ ... _ ≈ rhs₁ _ ... _` (for a possibly different relation `≈` than the relation -- `rel` above) if let .app (.app _ lhs₁) rhs₁ ← whnf hypTy then let lhs₁ := lhs₁.getAppFn let rhs₁ := rhs₁.getAppFn -- check whether `(lhs₁, rhs₁)` is in some order one of the "varying argument" pairs from -- the conclusion to the lemma if let some j ← pairs.findM? fun (_, e1, e2) => isDefEq lhs₁ e1 <&&> isDefEq rhs₁ e2 <||> isDefEq lhs₁ e2 <&&> isDefEq rhs₁ e1 then -- if yes, record the index of this antecedent as a "main subgoal", together with the -- index of the "varying argument" pair it corresponds to mainSubgoals := mainSubgoals.push (i, j.1) pure mainSubgoals i := i + 1 -- store all the information from this parse of the lemma's structure in a `GCongrLemma` gcongrExt.add ((relName, head, varyingArgs), { declName := decl, mainSubgoals, varyingArgs }) kind } initialize registerTraceClass `Meta.gcongr syntax "gcongr_discharger" : tactic /-- This is used as the default side-goal discharger, it calls the `gcongr_discharger` extensible tactic. -/ def gcongrDischarger (goal : MVarId) : MetaM Unit := Elab.Term.TermElabM.run' do trace[Meta.gcongr] "Attempting to discharge side goal {goal}" let [] ← Elab.Tactic.run goal <| Elab.Tactic.evalTactic (Unhygienic.run `(tactic| gcongr_discharger)) | failure open Elab Tactic /-- See if the term is `a = b` and the goal is `a ∼ b` or `b ∼ a`, with `∼` reflexive. -/ @[gcongr_forward] def exactRefl : ForwardExt where eval h goal := do let m ← mkFreshExprMVar none goal.assignIfDefeq (← mkAppOptM ``Eq.subst #[h, m]) goal.applyRfl /-- See if the term is `a < b` and the goal is `a ≤ b`. -/ @[gcongr_forward] def exactLeOfLt : ForwardExt where eval h goal := do goal.assignIfDefeq (← mkAppM ``le_of_lt #[h]) /-- See if the term is `a ∼ b` with `∼` symmetric and the goal is `b ∼ a`. -/ @[gcongr_forward] def symmExact : ForwardExt where eval h goal := do (← goal.applySymm).assignIfDefeq h @[gcongr_forward] def exact : ForwardExt where eval e m := m.assignIfDefeq e /-- Attempt to resolve an (implicitly) relational goal by one of a provided list of hypotheses, either with such a hypothesis directly or by a limited palette of relational forward-reasoning from these hypotheses. -/ def _root_.Lean.MVarId.gcongrForward (hs : Array Expr) (g : MVarId) : MetaM Unit := withReducible do let s ← saveState withTraceNode `Meta.gcongr (fun _ => return m!"gcongr_forward: ⊢ {← g.getType}") do -- Iterate over a list of terms let tacs := (forwardExt.getState (← getEnv)).2 for h in hs do try tacs.firstM fun (n, tac) => withTraceNode `Meta.gcongr (return m!"{·.emoji} trying {n} on {h} : {← inferType h}") do tac.eval h g return catch _ => s.restore throwError "gcongr_forward failed" /-- This is used as the default main-goal discharger, consisting of running `Lean.MVarId.gcongrForward` (trying a term together with limited forward-reasoning on that term) on each nontrivial hypothesis. -/ def gcongrForwardDischarger (goal : MVarId) : MetaM Unit := Elab.Term.TermElabM.run' do let mut hs := #[] -- collect the nontrivial hypotheses for h in ← getLCtx do if !h.isImplementationDetail then hs := hs.push (.fvar h.fvarId) -- run `Lean.MVarId.gcongrForward` on each one goal.gcongrForward hs /-- The core of the `gcongr` tactic. Parse a goal into the form `(f _ ... _) ∼ (f _ ... _)`, look up any relevant @[gcongr] lemmas, try to apply them, recursively run the tactic itself on "main" goals which are generated, and run the discharger on side goals which are generated. If there is a user-provided template, first check that the template asks us to descend this far into the match. -/ partial def _root_.Lean.MVarId.gcongr (g : MVarId) (template : Option Expr) (names : List (TSyntax ``binderIdent)) (mainGoalDischarger : MVarId → MetaM Unit := gcongrForwardDischarger) (sideGoalDischarger : MVarId → MetaM Unit := gcongrDischarger) : MetaM (Bool × List (TSyntax ``binderIdent) × Array MVarId) := g.withContext do withTraceNode `Meta.gcongr (fun _ => return m!"gcongr: ⊢ {← g.getType}") do match template with | none => -- A. If there is no template, try to resolve the goal by the provided tactic -- `mainGoalDischarger`, and continue on if this fails. try mainGoalDischarger g; return (true, names, #[]) catch _ => pure () | some tpl => -- B. If there is a template: -- (i) if the template is `?_` (or `?_ x1 x2`, created by entering binders) -- then try to resolve the goal by the provided tactic `mainGoalDischarger`; -- if this fails, stop and report the existing goal. if let .mvar mvarId := tpl.getAppFn then if let .syntheticOpaque ← mvarId.getKind then try mainGoalDischarger g; return (true, names, #[]) catch _ => return (false, names, #[g]) -- (ii) if the template is *not* `?_` then continue on. -- Check that the goal is of the form `rel (lhsHead _ ... _) (rhsHead _ ... _)` let .app (.app rel lhs) rhs ← withReducible g.getType' | throwError "gcongr failed, not a relation" let some relName := rel.getAppFn.constName? | throwError "gcongr failed, relation head {rel} is not a constant" let (some lhsHead, lhsArgs) := lhs.withApp fun e a => (e.constName?, a) | if template.isNone then return (false, names, #[g]) throwError "gcongr failed, {lhs} is not a constant" let (some rhsHead, rhsArgs) := rhs.withApp fun e a => (e.constName?, a) | if template.isNone then return (false, names, #[g]) throwError "gcongr failed, {rhs} is not a constant" -- B. If there is a template, check that it is of the form `tplHead _ ... _` and that -- `tplHead = lhsHead = rhsHead` let tplArgs ← if let some tpl := template then let (some tplHead, tplArgs) := tpl.withApp fun e a => (e.constName?, a) | throwError "gcongr failed, {tpl} is not a constant" unless tplHead == lhsHead && tplArgs.size == rhsArgs.size do throwError "expected {tplHead}, got {lhsHead}\n{lhs}" unless tplHead == rhsHead && tplArgs.size == rhsArgs.size do throwError "expected {tplHead}, got {rhsHead}\n{rhs}" -- and also build an array of `Expr` corresponding to the arguments `_ ... _` to `tplHead` in -- the template (these will be used in recursive calls later), and an array of booleans -- according to which of these contain `?_` tplArgs.mapM fun tpl => do let mctx ← getMCtx let hasMVar := tpl.findMVar? fun mvarId => if let some mdecl := mctx.findDecl? mvarId then mdecl.kind matches .syntheticOpaque else false pure (some tpl, hasMVar.isSome) -- A. If there is no template, check that `lhs = rhs` else unless lhsHead == rhsHead && lhsArgs.size == rhsArgs.size do -- (if not, stop and report the existing goal) return (false, names, #[g]) -- and also build an array of booleans according to which arguments `_ ... _` to the head -- function differ between the LHS and RHS. We treat always treat proofs as being the same -- (even if they have differing types). (lhsArgs.zip rhsArgs).mapM fun (lhsArg, rhsArg) => do let isSame ← withReducibleAndInstances <| return (← isDefEq lhsArg rhsArg) || ((← isProof lhsArg) && (← isProof rhsArg)) return (none, !isSame) -- Name the array of booleans `varyingArgs`: this records which arguments to the head function are -- supposed to vary, according to the template (if there is one), and in the absence of a template -- to record which arguments to the head function differ between the two sides of the goal. let varyingArgs := tplArgs.map (·.2) if varyingArgs.all not then throwError "try rfl" let s ← saveState let mut ex? := none -- Look up the `@[gcongr]` lemmas whose conclusion has the same relation and head function as -- the goal and whether the boolean-array of varying/nonvarying arguments of such -- a lemma matches `varyingArgs`. for lem in (gcongrExt.getState (← getEnv)).findD (relName, lhsHead, varyingArgs) #[] do let gs ← try -- Try `apply`-ing such a lemma to the goal. Except.ok <$> g.apply (← mkConstWithFreshMVarLevels lem.declName) catch e => pure (Except.error e) match gs with | .error e => -- If the `apply` fails, go on to try to apply the next matching lemma. -- If all the matching lemmas fail to `apply`, we will report (somewhat arbitrarily) the -- error message on the first failure, so stash that. ex? := ex? <|> (some (← saveState, e)) s.restore | .ok gs => let some e ← getExprMVarAssignment? g | panic! "unassigned?" let args := e.getAppArgs let mut subgoals := #[] let mut names := names -- If the `apply` succeeds, iterate over `(i, j)` belonging to the lemma's `mainSubgoal` -- list: here `i` is an index in the lemma's array of antecedents, and `j` is an index in -- the array of arguments to the head function in the conclusion of the lemma (this should -- be the same as the head function of the LHS and RHS of our goal), such that the `i`-th -- antecedent to the lemma is a relation between the LHS and RHS `j`-th inputs to the head -- function in the goal. for (i, j) in lem.mainSubgoals do -- We anticipate that such a "main" subgoal should not have been solved by the `apply` by -- unification ... let some (.mvar mvarId) := args[i]? | panic! "what kind of lemma is this?" -- Introduce all variables and hypotheses in this subgoal. let (names2, _vs, mvarId) ← mvarId.introsWithBinderIdents names -- B. If there is a template, look up the part of the template corresponding to the `j`-th -- input to the head function let tpl ← tplArgs[j]!.1.mapM fun e => do let (_vs, _, e) ← lambdaMetaTelescope e pure e -- Recurse: call ourself (`Lean.MVarId.gcongr`) on the subgoal with (if available) the -- appropriate template let (_, names2, subgoals2) ← mvarId.gcongr tpl names2 mainGoalDischarger sideGoalDischarger (names, subgoals) := (names2, subgoals ++ subgoals2) let mut out := #[] -- Also try the discharger on any "side" (i.e., non-"main") goals which were not resolved -- by the `apply`. for g in gs do if !(← g.isAssigned) && !subgoals.contains g then try sideGoalDischarger g catch _ => out := out.push g -- Return all unresolved subgoals, "main" or "side" return (true, names, out ++ subgoals) -- A. If there is no template, and there was no `@[gcongr]` lemma which matched the goal, -- report this goal back. if template.isNone then return (false, names, #[g]) let some (sErr, e) := ex? -- B. If there is a template, and there was no `@[gcongr]` lemma which matched the template, -- fail. | throwError "gcongr failed, no @[gcongr] lemma applies for the template portion \ {template} and the relation {rel}" -- B. If there is a template, and there was a `@[gcongr]` lemma which matched the template, but -- it was not possible to `apply` that lemma, then report the error message from `apply`-ing that -- lemma. sErr.restore throw e /-- The `gcongr` tactic applies "generalized congruence" rules, reducing a relational goal between a LHS and RHS matching the same pattern to relational subgoals between the differing inputs to the pattern. For example, ``` example {a b x c d : ℝ} (h1 : a + 1 ≤ b + 1) (h2 : c + 2 ≤ d + 2) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by gcongr · linarith · linarith ``` This example has the goal of proving the relation `≤` between a LHS and RHS both of the pattern ``` x ^ 2 * ?_ + ?_ ``` (with inputs `a`, `c` on the left and `b`, `d` on the right); after the use of `gcongr`, we have the simpler goals `a ≤ b` and `c ≤ d`. A pattern can be provided explicitly; this is useful if a non-maximal match is desired: ``` example {a b c d x : ℝ} (h : a + c + 1 ≤ b + d + 1) : x ^ 2 * (a + c) + 5 ≤ x ^ 2 * (b + d) + 5 := by gcongr x ^ 2 * ?_ + 5 linarith ``` The "generalized congruence" rules used are the library lemmas which have been tagged with the attribute `@[gcongr]`. For example, the first example constructs the proof term ``` add_le_add (mul_le_mul_of_nonneg_left _ (pow_bit0_nonneg x 1)) _ ``` using the generalized congruence lemmas `add_le_add` and `mul_le_mul_of_nonneg_left`. The tactic attempts to discharge side goals to these "generalized congruence" lemmas (such as the side goal `0 ≤ x ^ 2` in the above application of `mul_le_mul_of_nonneg_left`) using the tactic `gcongr_discharger`, which wraps `positivity` but can also be extended. Side goals not discharged in this way are left for the user. -/ elab "gcongr" template:(colGt term)? withArg:((" with " (colGt binderIdent)+)?) : tactic => do let g ← getMainGoal g.withContext do let .app (.app _rel lhs) _rhs ← withReducible g.getType' | throwError "gcongr failed, not a relation" -- Elaborate the template (e.g. `x * ?_ + _`), if the user gave one let template ← template.mapM fun e => do Term.elabTerm e (← inferType lhs) -- Get the names from the `with x y z` list let names := (withArg.raw[1].getArgs.map TSyntax.mk).toList -- Time to actually run the core tactic `Lean.MVarId.gcongr`! let (progress, _, unsolvedGoalStates) ← g.gcongr template names if progress then replaceMainGoal unsolvedGoalStates.toList else throwError "gcongr did not make progress" /-- The `rel` tactic applies "generalized congruence" rules to solve a relational goal by "substitution". For example, ``` example {a b x c d : ℝ} (h1 : a ≤ b) (h2 : c ≤ d) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by rel [h1, h2] ``` In this example we "substitute" the hypotheses `a ≤ b` and `c ≤ d` into the LHS `x ^ 2 * a + c` of the goal and obtain the RHS `x ^ 2 * b + d`, thus proving the goal. The "generalized congruence" rules used are the library lemmas which have been tagged with the attribute `@[gcongr]`. For example, the first example constructs the proof term ``` add_le_add (mul_le_mul_of_nonneg_left h1 (pow_bit0_nonneg x 1)) h2 ``` using the generalized congruence lemmas `add_le_add` and `mul_le_mul_of_nonneg_left`. If there are no applicable generalized congruence lemmas, the tactic fails. The tactic attempts to discharge side goals to these "generalized congruence" lemmas (such as the side goal `0 ≤ x ^ 2` in the above application of `mul_le_mul_of_nonneg_left`) using the tactic `gcongr_discharger`, which wraps `positivity` but can also be extended. If the side goals cannot be discharged in this way, the tactic fails. -/ syntax "rel" " [" term,* "]" : tactic elab_rules : tactic | `(tactic| rel [$hyps,*]) => do let g ← getMainGoal g.withContext do let hyps ← hyps.getElems.mapM (elabTerm · none) let .app (.app _rel lhs) rhs ← withReducible g.getType' | throwError "rel failed, goal not a relation" unless ← isDefEq (← inferType lhs) (← inferType rhs) do throwError "rel failed, goal not a relation" -- The core tactic `Lean.MVarId.gcongr` will be run with main-goal discharger being the tactic -- consisting of running `Lean.MVarId.gcongrForward` (trying a term together with limited -- forward-reasoning on that term) on each of the listed terms. let assum g := g.gcongrForward hyps -- Time to actually run the core tactic `Lean.MVarId.gcongr`! let (_, _, unsolvedGoalStates) ← g.gcongr none [] (mainGoalDischarger := assum) match unsolvedGoalStates.toList with -- if all goals are solved, succeed! | [] => pure () -- if not, fail and report the unsolved goals | unsolvedGoalStates => do let unsolvedGoals ← @List.mapM MetaM _ _ _ MVarId.getType unsolvedGoalStates let g := Lean.MessageData.joinSep (unsolvedGoals.map Lean.MessageData.ofExpr) Format.line throwError "rel failed, cannot prove goal by 'substituting' the listed relationships. \ The steps which could not be automatically justified were:\n{g}"
Tactic\GCongr\ForwardAttr.lean
/- Copyright (c) 2023 Mario Carneiro, Heather Macbeth. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Heather Macbeth -/ import Batteries.Tactic.Basic /-! # Environment extension for the forward-reasoning part of the `gcongr` tactic -/ open Lean Meta Elab Tactic namespace Mathlib.Tactic.GCongr /-- An extension for `gcongr_forward`. -/ structure ForwardExt where eval (h : Expr) (goal : MVarId) : MetaM Unit /-- Read a `gcongr_forward` extension from a declaration of the right type. -/ def mkForwardExt (n : Name) : ImportM ForwardExt := do let { env, opts, .. } ← read IO.ofExcept <| unsafe env.evalConstCheck ForwardExt opts ``ForwardExt n /-- Environment extensions for `gcongrForward` declarations -/ initialize forwardExt : PersistentEnvExtension Name (Name × ForwardExt) (List Name × List (Name × ForwardExt)) ← registerPersistentEnvExtension { mkInitial := pure ([], {}) addImportedFn := fun s => do let dt ← s.foldlM (init := {}) fun dt s => s.foldlM (init := dt) fun dt n => do return (n, ← mkForwardExt n) :: dt pure ([], dt) addEntryFn := fun (entries, s) (n, ext) => (n :: entries, (n, ext) :: s) exportEntriesFn := fun s => s.1.reverse.toArray } initialize registerBuiltinAttribute { name := `gcongr_forward descr := "adds a gcongr_forward extension" applicationTime := .afterCompilation add := fun declName stx kind => match stx with | `(attr| gcongr_forward) => do unless kind == AttributeKind.global do throwError "invalid attribute 'gcongr_forward', must be global" let env ← getEnv unless (env.getModuleIdxFor? declName).isNone do throwError "invalid attribute 'gcongr_forward', declaration is in an imported module" if (IR.getSorryDep env declName).isSome then return -- ignore in progress definitions let ext ← mkForwardExt declName setEnv <| forwardExt.addEntry env (declName, ext) | _ => throwUnsupportedSyntax }
Tactic\Linarith\Datatypes.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Tactic.Linarith.Lemmas import Mathlib.Tactic.Ring.Basic import Mathlib.Util.SynthesizeUsing /-! # Datatypes for `linarith` Some of the data structures here are used in multiple parts of the tactic. We split them into their own file. This file also contains a few convenient auxiliary functions. -/ open Lean Elab Tactic Meta Qq initialize registerTraceClass `linarith initialize registerTraceClass `linarith.detail namespace Linarith /-- A shorthand for tracing the types of a list of proof terms when the `trace.linarith` option is set to true. -/ def linarithTraceProofs {α} [ToMessageData α] (s : α) (l : List Expr) : MetaM Unit := do trace[linarith] "{s}" trace[linarith] (← l.mapM fun e => do instantiateMVars (← inferType e)) /-! ### Linear expressions -/ /-- A linear expression is a list of pairs of variable indices and coefficients, representing the sum of the products of each coefficient with its corresponding variable. Some functions on `Linexp` assume that `n : Nat` occurs at most once as the first element of a pair, and that the list is sorted in decreasing order of the first argument. This is not enforced by the type but the operations here preserve it. -/ abbrev Linexp : Type := List (Nat × Int) namespace Linexp /-- Add two `Linexp`s together componentwise. Preserves sorting and uniqueness of the first argument. -/ partial def add : Linexp → Linexp → Linexp | [], a => a | a, [] => a | (a@(n1,z1)::t1), (b@(n2,z2)::t2) => if n1 < n2 then b::add (a::t1) t2 else if n2 < n1 then a::add t1 (b::t2) else let sum := z1 + z2 if sum = 0 then add t1 t2 else (n1, sum)::add t1 t2 /-- `l.scale c` scales the values in `l` by `c` without modifying the order or keys. -/ def scale (c : Int) (l : Linexp) : Linexp := if c = 0 then [] else if c = 1 then l else l.map fun ⟨n, z⟩ => (n, z*c) /-- `l.get n` returns the value in `l` associated with key `n`, if it exists, and `none` otherwise. This function assumes that `l` is sorted in decreasing order of the first argument, that is, it will return `none` as soon as it finds a key smaller than `n`. -/ def get (n : Nat) : Linexp → Option Int | [] => none | ((a, b)::t) => if a < n then none else if a = n then some b else get n t /-- `l.contains n` is true iff `n` is the first element of a pair in `l`. -/ def contains (n : Nat) : Linexp → Bool := Option.isSome ∘ get n /-- `l.zfind n` returns the value associated with key `n` if there is one, and 0 otherwise. -/ def zfind (n : Nat) (l : Linexp) : Int := match l.get n with | none => 0 | some v => v /-- `l.vars` returns the list of variables that occur in `l`. -/ def vars (l : Linexp) : List Nat := l.map Prod.fst /-- Defines a lex ordering on `Linexp`. This function is performance critical. -/ def cmp : Linexp → Linexp → Ordering | [], [] => Ordering.eq | [], _ => Ordering.lt | _, [] => Ordering.gt | ((n1,z1)::t1), ((n2,z2)::t2) => if n1 < n2 then Ordering.lt else if n2 < n1 then Ordering.gt else if z1 < z2 then Ordering.lt else if z2 < z1 then Ordering.gt else cmp t1 t2 end Linexp /-! ### Inequalities -/ /-- The three-element type `Ineq` is used to represent the strength of a comparison between terms. -/ inductive Ineq : Type | eq | le | lt deriving DecidableEq, Inhabited, Repr namespace Ineq /-- `max R1 R2` computes the strength of the sum of two inequalities. If `t1 R1 0` and `t2 R2 0`, then `t1 + t2 (max R1 R2) 0`. -/ def max : Ineq → Ineq → Ineq | lt, _ => lt | _, lt => lt | le, _ => le | _, le => le | eq, eq => eq /-- `Ineq` is ordered `eq < le < lt`. -/ def cmp : Ineq → Ineq → Ordering | eq, eq => Ordering.eq | eq, _ => Ordering.lt | le, le => Ordering.eq | le, lt => Ordering.lt | lt, lt => Ordering.eq | _, _ => Ordering.gt /-- Prints an `Ineq` as the corresponding infix symbol. -/ def toString : Ineq → String | eq => "=" | le => "≤" | lt => "<" /-- Finds the name of a multiplicative lemma corresponding to an inequality strength. -/ def toConstMulName : Ineq → Name | lt => ``mul_neg | le => ``mul_nonpos | eq => ``mul_eq instance : ToString Ineq := ⟨toString⟩ instance : ToFormat Ineq := ⟨fun i => Ineq.toString i⟩ end Ineq /-! ### Comparisons with 0 -/ /-- The main datatype for FM elimination. Variables are represented by natural numbers, each of which has an integer coefficient. Index 0 is reserved for constants, i.e. `coeffs.find 0` is the coefficient of 1. The represented term is `coeffs.sum (fun ⟨k, v⟩ ↦ v * Var[k])`. str determines the strength of the comparison -- is it < 0, ≤ 0, or = 0? -/ structure Comp : Type where /-- The strength of the comparison, `<`, `≤`, or `=`. -/ str : Ineq /-- The coefficients of the comparison, stored as list of pairs `(i, a)`, where `i` is the index of a recorded atom, and `a` is the coefficient. -/ coeffs : Linexp deriving Inhabited, Repr /-- `c.vars` returns the list of variables that appear in the linear expression contained in `c`. -/ def Comp.vars : Comp → List Nat := Linexp.vars ∘ Comp.coeffs /-- `c.coeffOf a` projects the coefficient of variable `a` out of `c`. -/ def Comp.coeffOf (c : Comp) (a : Nat) : Int := c.coeffs.zfind a /-- `c.scale n` scales the coefficients of `c` by `n`. -/ def Comp.scale (c : Comp) (n : Nat) : Comp := { c with coeffs := c.coeffs.scale n } /-- `Comp.add c1 c2` adds the expressions represented by `c1` and `c2`. The coefficient of variable `a` in `c1.add c2` is the sum of the coefficients of `a` in `c1` and `c2`. -/ def Comp.add (c1 c2 : Comp) : Comp := ⟨c1.str.max c2.str, c1.coeffs.add c2.coeffs⟩ /-- `Comp` has a lex order. First the `ineq`s are compared, then the `coeff`s. -/ def Comp.cmp : Comp → Comp → Ordering | ⟨str1, coeffs1⟩, ⟨str2, coeffs2⟩ => match str1.cmp str2 with | Ordering.lt => Ordering.lt | Ordering.gt => Ordering.gt | Ordering.eq => coeffs1.cmp coeffs2 /-- A `Comp` represents a contradiction if its expression has no coefficients and its strength is <, that is, it represents the fact `0 < 0`. -/ def Comp.isContr (c : Comp) : Bool := c.coeffs.isEmpty && c.str = Ineq.lt instance Comp.ToFormat : ToFormat Comp := ⟨fun p => format p.coeffs ++ toString p.str ++ "0"⟩ /-! ### Parsing into linear form -/ /-! ### Control -/ /-- A preprocessor transforms a proof of a proposition into a proof of a different proposition. The return type is `List Expr`, since some preprocessing steps may create multiple new hypotheses, and some may remove a hypothesis from the list. A "no-op" preprocessor should return its input as a singleton list. -/ structure Preprocessor : Type where /-- The name of the preprocessor, used in trace output. -/ name : String /-- Replace a hypothesis by a list of hypotheses. These expressions are the proof terms. -/ transform : Expr → MetaM (List Expr) /-- Some preprocessors need to examine the full list of hypotheses instead of working item by item. As with `Preprocessor`, the input to a `GlobalPreprocessor` is replaced by, not added to, its output. -/ structure GlobalPreprocessor : Type where /-- The name of the global preprocessor, used in trace output. -/ name : String /-- Replace the collection of all hypotheses with new hypotheses. These expressions are proof terms. -/ transform : List Expr → MetaM (List Expr) /-- Some preprocessors perform branching case splits. A `Branch` is used to track one of these case splits. The first component, an `MVarId`, is the goal corresponding to this branch of the split, given as a metavariable. The `List Expr` component is the list of hypotheses for `linarith` in this branch. -/ def Branch : Type := MVarId × List Expr /-- Some preprocessors perform branching case splits. A `GlobalBranchingPreprocessor` produces a list of branches to run. Each branch is independent, so hypotheses that appear in multiple branches should be duplicated. The preprocessor is responsible for making sure that each branch contains the correct goal metavariable. -/ structure GlobalBranchingPreprocessor : Type where /-- The name of the global branching preprocessor, used in trace output. -/ name : String /-- Given a goal, and a list of hypotheses, produce a list of pairs (consisting of a goal and list of hypotheses). -/ transform : MVarId → List Expr → MetaM (List Branch) /-- A `Preprocessor` lifts to a `GlobalPreprocessor` by folding it over the input list. -/ def Preprocessor.globalize (pp : Preprocessor) : GlobalPreprocessor where name := pp.name transform := List.foldrM (fun e ret => do return (← pp.transform e) ++ ret) [] /-- A `GlobalPreprocessor` lifts to a `GlobalBranchingPreprocessor` by producing only one branch. -/ def GlobalPreprocessor.branching (pp : GlobalPreprocessor) : GlobalBranchingPreprocessor where name := pp.name transform := fun g l => do return [⟨g, ← pp.transform l⟩] /-- `process pp l` runs `pp.transform` on `l` and returns the result, tracing the result if `trace.linarith` is on. -/ def GlobalBranchingPreprocessor.process (pp : GlobalBranchingPreprocessor) (g : MVarId) (l : List Expr) : MetaM (List Branch) := g.withContext do let branches ← pp.transform g l if branches.length > 1 then trace[linarith] "Preprocessing: {pp.name} has branched, with branches:" for ⟨goal, hyps⟩ in branches do goal.withContext do linarithTraceProofs m!"Preprocessing: {pp.name}" hyps return branches instance PreprocessorToGlobalBranchingPreprocessor : Coe Preprocessor GlobalBranchingPreprocessor := ⟨GlobalPreprocessor.branching ∘ Preprocessor.globalize⟩ instance GlobalPreprocessorToGlobalBranchingPreprocessor : Coe GlobalPreprocessor GlobalBranchingPreprocessor := ⟨GlobalPreprocessor.branching⟩ /-- A `CertificateOracle` provides a function `produceCertificate : List Comp → Nat → MetaM (HashMap Nat Nat)`. The default `CertificateOracle` used by `linarith` is `Linarith.CertificateOracle.simplexAlgorithmSparse`. `Linarith.CertificateOracle.simplexAlgorithmDense` and `Linarith.CertificateOracle.fourierMotzkin` are also available (though the Fourier-Motzkin oracle has some bugs). -/ structure CertificateOracle : Type where /-- `produceCertificate hyps max_var` tries to derive a contradiction from the comparisons in `hyps` by eliminating all variables ≤ `max_var`. If successful, it returns a map `coeff : Nat → Nat` as a certificate. This map represents that we can find a contradiction by taking the sum `∑ (coeff i) * hyps[i]`. -/ produceCertificate (hyps : List Comp) (max_var : Nat) : MetaM (Batteries.HashMap Nat Nat) /-! ### Auxiliary functions These functions are used by multiple modules, so we put them here for accessibility. -/ /-- `getRelSides e` returns the left and right hand sides of `e` if `e` is a comparison, and fails otherwise. This function is more naturally in the `Option` monad, but it is convenient to put in `MetaM` for compositionality. -/ def getRelSides (e : Expr) : MetaM (Expr × Expr) := do let e ← instantiateMVars e match e.getAppFnArgs with | (``LT.lt, #[_, _, a, b]) => return (a, b) | (``LE.le, #[_, _, a, b]) => return (a, b) | (``Eq, #[_, a, b]) => return (a, b) | (``GE.ge, #[_, _, a, b]) => return (a, b) | (``GT.gt, #[_, _, a, b]) => return (a, b) | _ => throwError "Not a comparison (getRelSides) : {e}" /-- `parseCompAndExpr e` checks if `e` is of the form `t < 0`, `t ≤ 0`, or `t = 0`. If it is, it returns the comparison along with `t`. -/ def parseCompAndExpr (e : Expr) : MetaM (Ineq × Expr) := do let e ← instantiateMVars e match e.getAppFnArgs with | (``LT.lt, #[_, _, e, z]) => if z.zero? then return (Ineq.lt, e) else throwNotZero z | (``LE.le, #[_, _, e, z]) => if z.zero? then return (Ineq.le, e) else throwNotZero z | (``Eq, #[_, e, z]) => if z.zero? then return (Ineq.eq, e) else throwNotZero z | _ => throwError "invalid comparison: {e}" where /-- helper function for error message -/ throwNotZero (z : Expr) := throwError "invalid comparison, rhs not zero: {z}" /-- `mkSingleCompZeroOf c h` assumes that `h` is a proof of `t R 0`. It produces a pair `(R', h')`, where `h'` is a proof of `c*t R' 0`. Typically `R` and `R'` will be the same, except when `c = 0`, in which case `R'` is `=`. If `c = 1`, `h'` is the same as `h` -- specifically, it does *not* change the type to `1*t R 0`. -/ def mkSingleCompZeroOf (c : Nat) (h : Expr) : MetaM (Ineq × Expr) := do let tp ← inferType h let (iq, e) ← parseCompAndExpr tp if c = 0 then do let e' ← mkAppM ``zero_mul #[e] return (Ineq.eq, e') else if c = 1 then return (iq, h) else do let tp ← inferType (← getRelSides (← inferType h)).2 let cpos : Q(Prop) ← mkAppM ``GT.gt #[(← tp.ofNat c), (← tp.ofNat 0)] let ex ← synthesizeUsingTactic' cpos (← `(tactic| norm_num)) let e' ← mkAppM iq.toConstMulName #[h, ex] return (iq, e') end Linarith
Tactic\Linarith\Frontend.lean
/- Copyright (c) 2018 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Control.Basic import Mathlib.Tactic.Linarith.Verification import Mathlib.Tactic.Linarith.Preprocessing import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm /-! # `linarith`: solving linear arithmetic goals `linarith` is a tactic for solving goals with linear arithmetic. Suppose we have a set of hypotheses in `n` variables `S = {a₁x₁ + a₂x₂ + ... + aₙxₙ R b₁x₁ + b₂x₂ + ... + bₙxₙ}`, where `R ∈ {<, ≤, =, ≥, >}`. Our goal is to determine if the inequalities in `S` are jointly satisfiable, that is, if there is an assignment of values to `x₁, ..., xₙ` such that every inequality in `S` is true. Specifically, we aim to show that they are *not* satisfiable. This amounts to proving a contradiction. If our goal is also a linear inequality, we negate it and move it to a hypothesis before trying to prove `False`. When the inequalities are over a dense linear order, `linarith` is a decision procedure: it will prove `False` if and only if the inequalities are unsatisfiable. `linarith` will also run on some types like `ℤ` that are not dense orders, but it will fail to prove `False` on some unsatisfiable problems. It will run over concrete types like `ℕ`, `ℚ`, and `ℝ`, as well as abstract types that are instances of `LinearOrderedCommRing`. ## Algorithm sketch First, the inequalities in the set `S` are rearranged into the form `tᵢ Rᵢ 0`, where `Rᵢ ∈ {<, ≤, =}` and each `tᵢ` is of the form `∑ cⱼxⱼ`. `linarith` uses an untrusted oracle to search for a certificate of unsatisfiability. The oracle searches for a list of natural number coefficients `kᵢ` such that `∑ kᵢtᵢ = 0`, where for at least one `i`, `kᵢ > 0` and `Rᵢ = <`. Given a list of such coefficients, `linarith` verifies that `∑ kᵢtᵢ = 0` using a normalization tactic such as `ring`. It proves that `∑ kᵢtᵢ < 0` by transitivity, since each component of the sum is either equal to, less than or equal to, or less than zero by hypothesis. This produces a contradiction. ## Preprocessing `linarith` does some basic preprocessing before running. Most relevantly, inequalities over natural numbers are cast into inequalities about integers, and rational division by numerals is canceled into multiplication. We do this so that we can guarantee the coefficients in the certificate are natural numbers, which allows the tactic to solve goals over types that are not fields. Preprocessors are allowed to branch, that is, to case split on disjunctions. `linarith` will succeed overall if it succeeds in all cases. This leads to exponential blowup in the number of `linarith` calls, and should be used sparingly. The default preprocessor set does not include case splits. ## Oracles There are two oracles that can be used in `linarith` so far. 1. **Fourier-Motzkin elimination.** This technique transforms a set of inequalities in `n` variables to an equisatisfiable set in `n - 1` variables. Once all variables have been eliminated, we conclude that the original set was unsatisfiable iff the comparison `0 < 0` is in the resulting set. While performing this elimination, we track the history of each derived comparison. This allows us to represent any comparison at any step as a positive combination of comparisons from the original set. In particular, if we derive `0 < 0`, we can find our desired list of coefficients by counting how many copies of each original comparison appear in the history. This oracle was historically implemented earlier, and is sometimes faster on small states, but it has [bugs](https://github.com/leanprover-community/mathlib4/issues/2717) and can not handle large problems. You can use it with `linarith (config := { oracle := .fourierMotzkin })`. 2. **Simplex Algorithm (default).** This oracle reduces the search for a unsatisfiability certificate to some Linear Programming problem. The problem is then solved by a standard Simplex Algorithm. We use [Bland's pivot rule](https://en.wikipedia.org/wiki/Bland%27s_rule) to guarantee that the algorithm terminates. The default version of the algorithm operates with sparse matrices as it is usually faster. You can invoke the dense version by `linarith (config := { oracle := .simplexAlgorithmDense })`. ## Implementation details `linarith` homogenizes numerical constants: the expression `1` is treated as a variable `t₀`. Often `linarith` is called on goals that have comparison hypotheses over multiple types. This creates multiple `linarith` problems, each of which is handled separately; the goal is solved as soon as one problem is found to be contradictory. Disequality hypotheses `t ≠ 0` do not fit in this pattern. `linarith` will attempt to prove equality goals by splitting them into two weak inequalities and running twice. But it does not split disequality hypotheses, since this would lead to a number of runs exponential in the number of disequalities in the context. The oracle is very modular. It can easily be replaced with another function of type `List Comp → ℕ → MetaM ((Batteries.HashMap ℕ ℕ))`, which takes a list of comparisons and the largest variable index appearing in those comparisons, and returns a map from comparison indices to coefficients. An alternate oracle can be specified in the `LinarithConfig` object. A variant, `nlinarith`, adds an extra preprocessing step to handle some basic nonlinear goals. There is a hook in the `LinarithConfig` configuration object to add custom preprocessing routines. The certificate checking step is *not* by reflection. `linarith` converts the certificate into a proof term of type `False`. Some of the behavior of `linarith` can be inspected with the option `set_option trace.linarith true`. However, both oracles mainly runs outside the tactic monad, so we cannot trace intermediate steps there. ## File structure The components of `linarith` are spread between a number of files for the sake of organization. * `Lemmas.lean` contains proofs of some arithmetic lemmas that are used in preprocessing and in verification. * `Datatypes.lean` contains data structures that are used across multiple files, along with some useful auxiliary functions. * `Preprocessing.lean` contains functions used at the beginning of the tactic to transform hypotheses into a shape suitable for the main routine. * `Parsing.lean` contains functions used to compute the linear structure of an expression. * The `Oracle` folder contains files implementing the oracles that can be used to produce a certificate of unsatisfiability. * `Verification.lean` contains the certificate checking functions that produce a proof of `False`. * `Frontend.lean` contains the control methods and user-facing components of the tactic. ## Tags linarith, nlinarith, lra, nra, Fourier-Motzkin, linear arithmetic, linear programming -/ open Lean Elab Tactic Meta open Batteries namespace Linarith /-! ### Config objects The config object is defined in the frontend, instead of in `Datatypes.lean`, since the oracles must be in context to choose a default. -/ section open Meta /-- A configuration object for `linarith`. -/ structure LinarithConfig : Type where /-- Discharger to prove that a candidate linear combination of hypothesis is zero. -/ -- TODO There should be a def for this, rather than calling `evalTactic`? discharger : TacticM Unit := do evalTactic (← `(tactic| ring1)) -- We can't actually store a `Type` here, -- as we want `LinarithConfig : Type` rather than ` : Type 1`, -- so that we can define `elabLinarithConfig : Lean.Syntax → Lean.Elab.TermElabM LinarithConfig`. -- For now, we simply don't support restricting the type. -- (restrict_type : Option Type := none) /-- Prove goals which are not linear comparisons by first calling `exfalso`. -/ exfalso : Bool := true /-- Transparency mode for identifying atomic expressions in comparisons. -/ transparency : TransparencyMode := .reducible /-- Split conjunctions in hypotheses. -/ splitHypotheses : Bool := true /-- Split `≠` in hypotheses, by branching in cases `<` and `>`. -/ splitNe : Bool := false /-- Override the list of preprocessors. -/ preprocessors : List GlobalBranchingPreprocessor := defaultPreprocessors /-- Specify an oracle for identifying candidate contradictions. `.simplexAlgorithmSparse`, `.simplexAlgorithmSparse`, and `.fourierMotzkin` are available. -/ oracle : CertificateOracle := .simplexAlgorithmSparse /-- `cfg.updateReducibility reduce_default` will change the transparency setting of `cfg` to `default` if `reduce_default` is true. In this case, it also sets the discharger to `ring!`, since this is typically needed when using stronger unification. -/ def LinarithConfig.updateReducibility (cfg : LinarithConfig) (reduce_default : Bool) : LinarithConfig := if reduce_default then { cfg with transparency := .default, discharger := do evalTactic (← `(tactic| ring1!)) } else cfg end /-! ### Control -/ /-- If `e` is a comparison `a R b` or the negation of a comparison `¬ a R b`, found in the target, `getContrLemma e` returns the name of a lemma that will change the goal to an implication, along with the type of `a` and `b`. For example, if `e` is `(a : ℕ) < b`, returns ``(`lt_of_not_ge, ℕ)``. -/ def getContrLemma (e : Expr) : Option (Name × Expr) := match e.getAppFnArgs with | (``LT.lt, #[t, _, _, _]) => (``lt_of_not_ge, t) | (``LE.le, #[t, _, _, _]) => (``le_of_not_gt, t) | (``Eq, #[t, _, _]) => (``eq_of_not_lt_of_not_gt, t) | (``Ne, #[t, _, _]) => (``Not.intro, t) | (``GE.ge, #[t, _, _, _]) => (``le_of_not_gt, t) | (``GT.gt, #[t, _, _, _]) => (``lt_of_not_ge, t) | (``Not, #[e']) => match e'.getAppFnArgs with | (``LT.lt, #[t, _, _, _]) => (``Not.intro, t) | (``LE.le, #[t, _, _, _]) => (``Not.intro, t) | (``Eq, #[t, _, _]) => (``Not.intro, t) | (``GE.ge, #[t, _, _, _]) => (``Not.intro, t) | (``GT.gt, #[t, _, _, _]) => (``Not.intro, t) | _ => none | _ => none /-- `applyContrLemma` inspects the target to see if it can be moved to a hypothesis by negation. For example, a goal `⊢ a ≤ b` can become `a > b ⊢ false`. If this is the case, it applies the appropriate lemma and introduces the new hypothesis. It returns the type of the terms in the comparison (e.g. the type of `a` and `b` above) and the newly introduced local constant. Otherwise returns `none`. -/ def applyContrLemma (g : MVarId) : MetaM (Option (Expr × Expr) × MVarId) := do match getContrLemma (← withReducible g.getType') with | some (nm, tp) => do let [g] ← g.apply (← mkConst' nm) | failure let (f, g) ← g.intro1P return (some (tp, .fvar f), g) | none => return (none, g) /-- A map of keys to values, where the keys are `Expr` up to defeq and one key can be associated to multiple values. -/ abbrev ExprMultiMap α := Array (Expr × List α) /-- Retrieves the list of values at a key, as well as the index of the key for later modification. (If the key is not in the map it returns `self.size` as the index.) -/ def ExprMultiMap.find {α : Type} (self : ExprMultiMap α) (k : Expr) : MetaM (Nat × List α) := do for h : i in [:self.size] do let (k', vs) := self[i]'h.2 if ← isDefEq k' k then return (i, vs) return (self.size, []) /-- Insert a new value into the map at key `k`. This does a defeq check with all other keys in the map. -/ def ExprMultiMap.insert {α : Type} (self : ExprMultiMap α) (k : Expr) (v : α) : MetaM (ExprMultiMap α) := do for h : i in [:self.size] do if ← isDefEq (self[i]'h.2).1 k then return self.modify i fun (k, vs) => (k, v::vs) return self.push (k, [v]) /-- `partitionByType l` takes a list `l` of proofs of comparisons. It sorts these proofs by the type of the variables in the comparison, e.g. `(a : ℚ) < 1` and `(b : ℤ) > c` will be separated. Returns a map from a type to a list of comparisons over that type. -/ def partitionByType (l : List Expr) : MetaM (ExprMultiMap Expr) := l.foldlM (fun m h => do m.insert (← typeOfIneqProof h) h) #[] /-- Given a list `ls` of lists of proofs of comparisons, `findLinarithContradiction cfg ls` will try to prove `False` by calling `linarith` on each list in succession. It will stop at the first proof of `False`, and fail if no contradiction is found with any list. -/ def findLinarithContradiction (cfg : LinarithConfig) (g : MVarId) (ls : List (List Expr)) : MetaM Expr := try ls.firstM (fun L => proveFalseByLinarith cfg.transparency cfg.oracle cfg.discharger g L) catch e => throwError "linarith failed to find a contradiction\n{g}\n{e.toMessageData}" /-- Given a list `hyps` of proofs of comparisons, `runLinarith cfg hyps prefType` preprocesses `hyps` according to the list of preprocessors in `cfg`. This results in a list of branches (typically only one), each of which must succeed in order to close the goal. In each branch, we partition the list of hypotheses by type, and run `linarith` on each class in the partition; one of these must succeed in order for `linarith` to succeed on this branch. If `prefType` is given, it will first use the class of proofs of comparisons over that type. -/ -- If it succeeds, the passed metavariable should have been assigned. def runLinarith (cfg : LinarithConfig) (prefType : Option Expr) (g : MVarId) (hyps : List Expr) : MetaM Unit := do let singleProcess (g : MVarId) (hyps : List Expr) : MetaM Expr := g.withContext do linarithTraceProofs s!"after preprocessing, linarith has {hyps.length} facts:" hyps let hyp_set ← partitionByType hyps trace[linarith] "hypotheses appear in {hyp_set.size} different types" if let some t := prefType then let (i, vs) ← hyp_set.find t proveFalseByLinarith cfg.transparency cfg.oracle cfg.discharger g vs <|> findLinarithContradiction cfg g ((hyp_set.eraseIdx i).toList.map (·.2)) else findLinarithContradiction cfg g (hyp_set.toList.map (·.2)) let mut preprocessors := cfg.preprocessors if cfg.splitNe then preprocessors := Linarith.removeNe :: preprocessors if cfg.splitHypotheses then preprocessors := Linarith.splitConjunctions.globalize.branching :: preprocessors let branches ← preprocess preprocessors g hyps for (g, es) in branches do let r ← singleProcess g es g.assign r -- Verify that we closed the goal. Failure here should only result from a bad `Preprocessor`. (Expr.mvar g).ensureHasNoMVars -- /-- -- `filterHyps restr_type hyps` takes a list of proofs of comparisons `hyps`, and filters it -- to only those that are comparisons over the type `restr_type`. -- -/ -- def filterHyps (restr_type : Expr) (hyps : List Expr) : MetaM (List Expr) := -- hyps.filterM (fun h => do -- let ht ← inferType h -- match getContrLemma ht with -- | some (_, htype) => isDefEq htype restr_type -- | none => return false) /-- `linarith only_on hyps cfg` tries to close the goal using linear arithmetic. It fails if it does not succeed at doing this. * `hyps` is a list of proofs of comparisons to include in the search. * If `only_on` is true, the search will be restricted to `hyps`. Otherwise it will use all comparisons in the local context. * If `cfg.transparency := semireducible`, it will unfold semireducible definitions when trying to match atomic expressions. -/ partial def linarith (only_on : Bool) (hyps : List Expr) (cfg : LinarithConfig := {}) (g : MVarId) : MetaM Unit := g.withContext do -- if the target is an equality, we run `linarith` twice, to prove ≤ and ≥. if (← whnfR (← instantiateMVars (← g.getType))).isEq then trace[linarith] "target is an equality: splitting" if let some [g₁, g₂] ← try? (g.apply (← mkConst' ``eq_of_not_lt_of_not_gt)) then linarith only_on hyps cfg g₁ linarith only_on hyps cfg g₂ return /- If we are proving a comparison goal (and not just `False`), we consider the type of the elements in the comparison to be the "preferred" type. That is, if we find comparison hypotheses in multiple types, we will run `linarith` on the goal type first. In this case we also receive a new variable from moving the goal to a hypothesis. Otherwise, there is no preferred type and no new variable; we simply change the goal to `False`. -/ let (g, target_type, new_var) ← match ← applyContrLemma g with | (none, g) => if cfg.exfalso then trace[linarith] "using exfalso" pure (← g.exfalso, none, none) else pure (g, none, none) | (some (t, v), g) => pure (g, some t, some v) g.withContext do -- set up the list of hypotheses, considering the `only_on` and `restrict_type` options let hyps ← (if only_on then return new_var.toList ++ hyps else return (← getLocalHyps).toList ++ hyps) -- TODO in mathlib3 we could specify a restriction to a single type. -- I haven't done that here because I don't know how to store a `Type` in `LinarithConfig`. -- There's only one use of the `restrict_type` configuration option in mathlib3, -- and it can be avoided just by using `linarith only`. linarithTraceProofs "linarith is running on the following hypotheses:" hyps runLinarith cfg target_type g hyps end Linarith /-! ### User facing functions -/ open Parser Tactic Syntax /-- Syntax for the arguments of `linarith`, after the optional `!`. -/ syntax linarithArgsRest := (config)? (&" only")? (" [" term,* "]")? /-- `linarith` attempts to find a contradiction between hypotheses that are linear (in)equalities. Equivalently, it can prove a linear inequality by assuming its negation and proving `False`. In theory, `linarith` should prove any goal that is true in the theory of linear arithmetic over the rationals. While there is some special handling for non-dense orders like `Nat` and `Int`, this tactic is not complete for these theories and will not prove every true goal. It will solve goals over arbitrary types that instantiate `LinearOrderedCommRing`. An example: ```lean example (x y z : ℚ) (h1 : 2*x < 3*y) (h2 : -4*x + 2*z < 0) (h3 : 12*y - 4* z < 0) : False := by linarith ``` `linarith` will use all appropriate hypotheses and the negation of the goal, if applicable. Disequality hypotheses require case splitting and are not normally considered (see the `splitNe` option below). `linarith [t1, t2, t3]` will additionally use proof terms `t1, t2, t3`. `linarith only [h1, h2, h3, t1, t2, t3]` will use only the goal (if relevant), local hypotheses `h1`, `h2`, `h3`, and proofs `t1`, `t2`, `t3`. It will ignore the rest of the local context. `linarith!` will use a stronger reducibility setting to try to identify atoms. For example, ```lean example (x : ℚ) : id x ≥ x := by linarith ``` will fail, because `linarith` will not identify `x` and `id x`. `linarith!` will. This can sometimes be expensive. `linarith (config := { .. })` takes a config object with five optional arguments: * `discharger` specifies a tactic to be used for reducing an algebraic equation in the proof stage. The default is `ring`. Other options include `simp` for basic problems. * `transparency` controls how hard `linarith` will try to match atoms to each other. By default it will only unfold `reducible` definitions. * If `splitHypotheses` is true, `linarith` will split conjunctions in the context into separate hypotheses. * If `splitNe` is `true`, `linarith` will case split on disequality hypotheses. For a given `x ≠ y` hypothesis, `linarith` is run with both `x < y` and `x > y`, and so this runs linarith exponentially many times with respect to the number of disequality hypotheses. (`false` by default.) * If `exfalso` is `false`, `linarith` will fail when the goal is neither an inequality nor `False`. (`true` by default.) * `restrict_type` (not yet implemented in mathlib4) will only use hypotheses that are inequalities over `tp`. This is useful if you have e.g. both integer and rational valued inequalities in the local context, which can sometimes confuse the tactic. A variant, `nlinarith`, does some basic preprocessing to handle some nonlinear goals. The option `set_option trace.linarith true` will trace certain intermediate stages of the `linarith` routine. -/ syntax (name := linarith) "linarith" "!"? linarithArgsRest : tactic @[inherit_doc linarith] macro "linarith!" rest:linarithArgsRest : tactic => `(tactic| linarith ! $rest:linarithArgsRest) /-- An extension of `linarith` with some preprocessing to allow it to solve some nonlinear arithmetic problems. (Based on Coq's `nra` tactic.) See `linarith` for the available syntax of options, which are inherited by `nlinarith`; that is, `nlinarith!` and `nlinarith only [h1, h2]` all work as in `linarith`. The preprocessing is as follows: * For every subterm `a ^ 2` or `a * a` in a hypothesis or the goal, the assumption `0 ≤ a ^ 2` or `0 ≤ a * a` is added to the context. * For every pair of hypotheses `a1 R1 b1`, `a2 R2 b2` in the context, `R1, R2 ∈ {<, ≤, =}`, the assumption `0 R' (b1 - a1) * (b2 - a2)` is added to the context (non-recursively), where `R ∈ {<, ≤, =}` is the appropriate comparison derived from `R1, R2`. -/ syntax (name := nlinarith) "nlinarith" "!"? linarithArgsRest : tactic @[inherit_doc nlinarith] macro "nlinarith!" rest:linarithArgsRest : tactic => `(tactic| nlinarith ! $rest:linarithArgsRest) /-- Elaborate `t` in a way that is suitable for linarith. -/ def elabLinarithArg (tactic : Name) (t : Term) : TacticM Expr := Term.withoutErrToSorry do let (e, mvars) ← elabTermWithHoles t none tactic unless mvars.isEmpty do throwErrorAt t "Argument passed to {tactic} has metavariables:{indentD e}" return e /-- Allow elaboration of `LinarithConfig` arguments to tactics. -/ declare_config_elab elabLinarithConfig Linarith.LinarithConfig elab_rules : tactic | `(tactic| linarith $[!%$bang]? $[$cfg]? $[only%$o]? $[[$args,*]]?) => withMainContext do let args ← ((args.map (TSepArray.getElems)).getD {}).mapM (elabLinarithArg `linarith) let cfg := (← elabLinarithConfig (mkOptionalNode cfg)).updateReducibility bang.isSome commitIfNoEx do liftMetaFinishingTactic <| Linarith.linarith o.isSome args.toList cfg -- TODO restore this when `add_tactic_doc` is ported -- add_tactic_doc -- { name := "linarith", -- category := doc_category.tactic, -- decl_names := [`tactic.interactive.linarith], -- tags := ["arithmetic", "decision procedure", "finishing"] } open Linarith elab_rules : tactic | `(tactic| nlinarith $[!%$bang]? $[$cfg]? $[only%$o]? $[[$args,*]]?) => withMainContext do let args ← ((args.map (TSepArray.getElems)).getD {}).mapM (elabLinarithArg `nlinarith) let cfg := (← elabLinarithConfig (mkOptionalNode cfg)).updateReducibility bang.isSome let cfg := { cfg with preprocessors := cfg.preprocessors.concat nlinarithExtras } commitIfNoEx do liftMetaFinishingTactic <| Linarith.linarith o.isSome args.toList cfg -- TODO restore this when `add_tactic_doc` is ported -- add_tactic_doc -- { name := "nlinarith", -- category := doc_category.tactic, -- decl_names := [`tactic.interactive.nlinarith], -- tags := ["arithmetic", "decision procedure", "finishing"] }
Tactic\Linarith\Lemmas.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Batteries.Tactic.Lint.Basic import Mathlib.Algebra.Order.Monoid.Unbundled.Basic import Mathlib.Algebra.Order.Ring.Defs import Mathlib.Algebra.Order.ZeroLEOne import Mathlib.Data.Nat.Cast.Order.Ring import Mathlib.Init.Data.Int.Order /-! # Lemmas for `linarith`. Those in the `Linarith` namespace should stay here. Those outside the `Linarith` namespace may be deleted as they are ported to mathlib4. -/ namespace Linarith universe u theorem lt_irrefl {α : Type u} [Preorder α] {a : α} : ¬a < a := _root_.lt_irrefl a theorem eq_of_eq_of_eq {α} [OrderedSemiring α] {a b : α} (ha : a = 0) (hb : b = 0) : a + b = 0 := by simp [*] theorem le_of_eq_of_le {α} [OrderedSemiring α] {a b : α} (ha : a = 0) (hb : b ≤ 0) : a + b ≤ 0 := by simp [*] theorem lt_of_eq_of_lt {α} [OrderedSemiring α] {a b : α} (ha : a = 0) (hb : b < 0) : a + b < 0 := by simp [*] theorem le_of_le_of_eq {α} [OrderedSemiring α] {a b : α} (ha : a ≤ 0) (hb : b = 0) : a + b ≤ 0 := by simp [*] theorem lt_of_lt_of_eq {α} [OrderedSemiring α] {a b : α} (ha : a < 0) (hb : b = 0) : a + b < 0 := by simp [*] theorem mul_neg {α} [StrictOrderedRing α] {a b : α} (ha : a < 0) (hb : 0 < b) : b * a < 0 := have : (-b)*a > 0 := mul_pos_of_neg_of_neg (neg_neg_of_pos hb) ha neg_of_neg_pos (by simpa) theorem mul_nonpos {α} [OrderedRing α] {a b : α} (ha : a ≤ 0) (hb : 0 < b) : b * a ≤ 0 := have : (-b)*a ≥ 0 := mul_nonneg_of_nonpos_of_nonpos (le_of_lt (neg_neg_of_pos hb)) ha by simpa -- used alongside `mul_neg` and `mul_nonpos`, so has the same argument pattern for uniformity @[nolint unusedArguments] theorem mul_eq {α} [OrderedSemiring α] {a b : α} (ha : a = 0) (_ : 0 < b) : b * a = 0 := by simp [*] lemma eq_of_not_lt_of_not_gt {α} [LinearOrder α] (a b : α) (h1 : ¬ a < b) (h2 : ¬ b < a) : a = b := le_antisymm (le_of_not_gt h2) (le_of_not_gt h1) -- used in the `nlinarith` normalization steps. The `_` argument is for uniformity. @[nolint unusedArguments] lemma mul_zero_eq {α} {R : α → α → Prop} [Semiring α] {a b : α} (_ : R a 0) (h : b = 0) : a * b = 0 := by simp [h] -- used in the `nlinarith` normalization steps. The `_` argument is for uniformity. @[nolint unusedArguments] lemma zero_mul_eq {α} {R : α → α → Prop} [Semiring α] {a b : α} (h : a = 0) (_ : R b 0) : a * b = 0 := by simp [h] lemma natCast_nonneg (α : Type u) [OrderedSemiring α] (n : ℕ) : (0 : α) ≤ n := Nat.cast_nonneg n @[deprecated (since := "2024-04-17")] alias nat_cast_nonneg := natCast_nonneg end Linarith section open Function -- These lemmas can be removed when their originals are ported. theorem lt_zero_of_zero_gt {α : Type*} [Zero α] [LT α] {a : α} (h : 0 > a) : a < 0 := h theorem le_zero_of_zero_ge {α : Type*} [Zero α] [LE α] {a : α} (h : 0 ≥ a) : a ≤ 0 := h end
Tactic\Linarith\Parsing.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Tactic.Linarith.Datatypes /-! # Parsing input expressions into linear form `linarith` computes the linear form of its input expressions, assuming (without justification) that the type of these expressions is a commutative semiring. It identifies atoms up to ring-equivalence: that is, `(y*3)*x` will be identified `3*(x*y)`, where the monomial `x*y` is the linear atom. * Variables are represented by natural numbers. * Monomials are represented by `Monom := RBMap ℕ ℕ`. The monomial `1` is represented by the empty map. * Linear combinations of monomials are represented by `Sum := RBMap Monom ℤ`. All input expressions are converted to `Sum`s, preserving the map from expressions to variables. We then discard the monomial information, mapping each distinct monomial to a natural number. The resulting `RBMap ℕ ℤ` represents the ring-normalized linear form of the expression. This is ultimately converted into a `Linexp` in the obvious way. `linearFormsAndMaxVar` is the main entry point into this file. Everything else is contained. -/ open Linarith.Ineq Batteries section open Lean Elab Tactic Meta /-- `findDefeq red m e` looks for a key in `m` that is defeq to `e` (up to transparency `red`), and returns the value associated with this key if it exists. Otherwise, it fails. -/ def List.findDefeq {v : Type} (red : TransparencyMode) (m : List (Expr × v)) (e : Expr) : MetaM v := do if let some (_, n) ← m.findM? fun ⟨e', _⟩ => withTransparency red (isDefEq e e') then return n else failure end /-- We introduce a local instance allowing addition of `RBMap`s, removing any keys with value zero. We don't need to prove anything about this addition, as it is only used in meta code. -/ local instance {α β : Type*} {c : α → α → Ordering} [Add β] [Zero β] [DecidableEq β] : Add (RBMap α β c) where add := fun f g => (f.mergeWith (fun _ b b' => b + b') g).filter (fun _ b => b ≠ 0) namespace Linarith /-- A local abbreviation for `RBMap` so we don't need to write `Ord.compare` each time. -/ abbrev Map (α β) [Ord α] := RBMap α β Ord.compare /-! ### Parsing datatypes -/ /-- Variables (represented by natural numbers) map to their power. -/ abbrev Monom : Type := Map ℕ ℕ /-- `1` is represented by the empty monomial, the product of no variables. -/ def Monom.one : Monom := RBMap.empty /-- Compare monomials by first comparing their keys and then their powers. -/ def Monom.lt : Monom → Monom → Bool := fun a b => ((a.keys : List ℕ) < b.keys) || (((a.keys : List ℕ) = b.keys) && ((a.values : List ℕ) < b.values)) instance : Ord Monom where compare x y := if x.lt y then .lt else if x == y then .eq else .gt /-- Linear combinations of monomials are represented by mapping monomials to coefficients. -/ abbrev Sum : Type := Map Monom ℤ /-- `1` is represented as the singleton sum of the monomial `Monom.one` with coefficient 1. -/ def Sum.one : Sum := RBMap.empty.insert Monom.one 1 /-- `Sum.scaleByMonom s m` multiplies every monomial in `s` by `m`. -/ def Sum.scaleByMonom (s : Sum) (m : Monom) : Sum := s.foldr (fun m' coeff sm => sm.insert (m + m') coeff) RBMap.empty /-- `sum.mul s1 s2` distributes the multiplication of two sums. -/ def Sum.mul (s1 s2 : Sum) : Sum := s1.foldr (fun mn coeff sm => sm + ((s2.scaleByMonom mn).mapVal (fun _ v => v * coeff))) RBMap.empty /-- The `n`th power of `s : Sum` is the `n`-fold product of `s`, with `s.pow 0 = Sum.one`. -/ partial def Sum.pow (s : Sum) : ℕ → Sum | 0 => Sum.one | 1 => s | n => let m := n >>> 1 let a := s.pow m if n &&& 1 = 0 then a.mul a else a.mul a |>.mul s /-- `SumOfMonom m` lifts `m` to a sum with coefficient `1`. -/ def SumOfMonom (m : Monom) : Sum := RBMap.empty.insert m 1 /-- The unit monomial `one` is represented by the empty RBMap. -/ def one : Monom := RBMap.empty /-- A scalar `z` is represented by a `Sum` with coefficient `z` and monomial `one` -/ def scalar (z : ℤ) : Sum := RBMap.empty.insert one z /-- A single variable `n` is represented by a sum with coefficient `1` and monomial `n`. -/ def var (n : ℕ) : Sum := RBMap.empty.insert (RBMap.empty.insert n 1) 1 /-! ### Parsing algorithms -/ open Lean Elab Tactic Meta /-- `ExprMap` is used to record atomic expressions which have been seen while processing inequality expressions. -/ -- The natural number is just the index in the list, -- and we could reimplement to just use `List Expr` if desired. abbrev ExprMap := List (Expr × ℕ) /-- `linearFormOfAtom red map e` is the atomic case for `linear_form_of_expr`. If `e` appears with index `k` in `map`, it returns the singleton sum `var k`. Otherwise it updates `map`, adding `e` with index `n`, and returns the singleton sum `var n`. -/ def linearFormOfAtom (red : TransparencyMode) (m : ExprMap) (e : Expr) : MetaM (ExprMap × Sum) := do try let k ← m.findDefeq red e return (m, var k) catch _ => let n := m.length + 1 return ((e, n)::m, var n) /-- `linearFormOfExpr red map e` computes the linear form of `e`. `map` is a lookup map from atomic expressions to variable numbers. If a new atomic expression is encountered, it is added to the map with a new number. It matches atomic expressions up to reducibility given by `red`. Because it matches up to definitional equality, this function must be in the `MetaM` monad, and forces some functions that call it into `MetaM` as well. -/ partial def linearFormOfExpr (red : TransparencyMode) (m : ExprMap) (e : Expr) : MetaM (ExprMap × Sum) := match e.numeral? with | some 0 => return ⟨m, RBMap.empty⟩ | some (n+1) => return ⟨m, scalar (n+1)⟩ | none => match e.getAppFnArgs with | (``HMul.hMul, #[_, _, _, _, e1, e2]) => do let (m1, comp1) ← linearFormOfExpr red m e1 let (m2, comp2) ← linearFormOfExpr red m1 e2 return (m2, comp1.mul comp2) | (``HAdd.hAdd, #[_, _, _, _, e1, e2]) => do let (m1, comp1) ← linearFormOfExpr red m e1 let (m2, comp2) ← linearFormOfExpr red m1 e2 return (m2, comp1 + comp2) | (``HSub.hSub, #[_, _, _, _, e1, e2]) => do let (m1, comp1) ← linearFormOfExpr red m e1 let (m2, comp2) ← linearFormOfExpr red m1 e2 return (m2, comp1 + comp2.mapVal (fun _ v => -v)) | (``Neg.neg, #[_, _, e]) => do let (m1, comp) ← linearFormOfExpr red m e return (m1, comp.mapVal (fun _ v => -v)) | (``HPow.hPow, #[_, _, _, _, a, n]) => do match n.numeral? with | some n => do let (m1, comp) ← linearFormOfExpr red m a return (m1, comp.pow n) | none => linearFormOfAtom red m e | _ => linearFormOfAtom red m e /-- `elimMonom s map` eliminates the monomial level of the `Sum` `s`. `map` is a lookup map from monomials to variable numbers. The output `RBMap ℕ ℤ` has the same structure as `s : Sum`, but each monomial key is replaced with its index according to `map`. If any new monomials are encountered, they are assigned variable numbers and `map` is updated. -/ def elimMonom (s : Sum) (m : Map Monom ℕ) : Map Monom ℕ × Map ℕ ℤ := s.foldr (fun mn coeff ⟨map, out⟩ ↦ match map.find? mn with | some n => ⟨map, out.insert n coeff⟩ | none => let n := map.size ⟨map.insert mn n, out.insert n coeff⟩) (m, RBMap.empty) /-- `toComp red e e_map monom_map` converts an expression of the form `t < 0`, `t ≤ 0`, or `t = 0` into a `comp` object. `e_map` maps atomic expressions to indices; `monom_map` maps monomials to indices. Both of these are updated during processing and returned. -/ def toComp (red : TransparencyMode) (e : Expr) (e_map : ExprMap) (monom_map : Map Monom ℕ) : MetaM (Comp × ExprMap × Map Monom ℕ) := do let (iq, e) ← parseCompAndExpr e let (m', comp') ← linearFormOfExpr red e_map e let ⟨nm, mm'⟩ := elimMonom comp' monom_map -- Note: we use `.reverse` as `Linexp.get` assumes the monomial are in descending order return ⟨⟨iq, mm'.toList.reverse⟩, m', nm⟩ /-- `toCompFold red e_map exprs monom_map` folds `toComp` over `exprs`, updating `e_map` and `monom_map` as it goes. -/ def toCompFold (red : TransparencyMode) : ExprMap → List Expr → Map Monom ℕ → MetaM (List Comp × ExprMap × Map Monom ℕ) | m, [], mm => return ([], m, mm) | m, (h::t), mm => do let (c, m', mm') ← toComp red h m mm let (l, mp, mm') ← toCompFold red m' t mm' return (c::l, mp, mm') /-- `linearFormsAndMaxVar red pfs` is the main interface for computing the linear forms of a list of expressions. Given a list `pfs` of proofs of comparisons, it produces a list `c` of `Comp`s of the same length, such that `c[i]` represents the linear form of the type of `pfs[i]`. It also returns the largest variable index that appears in comparisons in `c`. -/ def linearFormsAndMaxVar (red : TransparencyMode) (pfs : List Expr) : MetaM (List Comp × ℕ) := do let pftps ← (pfs.mapM inferType) let (l, _, map) ← toCompFold red [] pftps RBMap.empty trace[linarith.detail] "monomial map: {map.toList.map fun ⟨k,v⟩ => (k.toList, v)}" return (l, map.size - 1) end Linarith
Tactic\Linarith\Preprocessing.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Tactic.Linarith.Datatypes import Mathlib.Tactic.Zify import Mathlib.Tactic.CancelDenoms.Core import Batteries.Data.RBMap.Basic import Mathlib.Control.Basic /-! # Linarith preprocessing This file contains methods used to preprocess inputs to `linarith`. In particular, `linarith` works over comparisons of the form `t R 0`, where `R ∈ {<,≤,=}`. It assumes that expressions in `t` have integer coefficients and that the type of `t` has well-behaved subtraction. ## Implementation details A `GlobalPreprocessor` is a function `List Expr → TacticM (List Expr)`. Users can add custom preprocessing steps by adding them to the `LinarithConfig` object. `Linarith.defaultPreprocessors` is the main list, and generally none of these should be skipped unless you know what you're doing. -/ namespace Linarith /-! ### Preprocessing -/ open Lean hiding Rat open Elab Tactic Meta open Qq open Mathlib.Tactic (AtomM) open Batteries (RBSet) /-- Processor that recursively replaces `P ∧ Q` hypotheses with the pair `P` and `Q`. -/ partial def splitConjunctions : Preprocessor where name := "split conjunctions" transform := aux where /-- Implementation of the `splitConjunctions` preprocessor. -/ aux (proof : Expr) : MetaM (List Expr) := do match (← instantiateMVars (← inferType proof)).getAppFnArgs with | (``And, #[_, _]) => pure ((← aux (← mkAppM ``And.left #[proof])) ++ (← aux (← mkAppM ``And.right #[proof]))) | _ => pure [proof] /-- Removes any expressions that are not proofs of inequalities, equalities, or negations thereof. -/ partial def filterComparisons : Preprocessor where name := "filter terms that are not proofs of comparisons" transform h := do let tp ← whnfR (← instantiateMVars (← inferType h)) if (← isProp tp) && (← aux tp) then pure [h] else pure [] where /-- Implementation of the `filterComparisons` preprocessor. -/ aux (e : Expr) : MetaM Bool := do match e.getAppFnArgs with | (``Eq, _) | (``LE.le, _) | (``LT.lt, _) => pure true | (``Not, #[e]) => match (← whnfR e).getAppFnArgs with | (``LE.le, _) | (``LT.lt, _) => pure true | _ => pure false | _ => pure false section removeNegations /-- If `prf` is a proof of `¬ e`, where `e` is a comparison, `flipNegatedComparison prf e` flips the comparison in `e` and returns a proof. For example, if `prf : ¬ a < b`, ``flipNegatedComparison prf q(a < b)`` returns a proof of `a ≥ b`. -/ def flipNegatedComparison (prf : Expr) (e : Expr) : MetaM (Option Expr) := match e.getAppFnArgs with | (``LE.le, #[_, _, _, _]) => try? <| mkAppM ``lt_of_not_ge #[prf] | (``LT.lt, #[_, _, _, _]) => try? <| mkAppM ``le_of_not_gt #[prf] | _ => throwError "Not a comparison (flipNegatedComparison): {e}" /-- Replaces proofs of negations of comparisons with proofs of the reversed comparisons. For example, a proof of `¬ a < b` will become a proof of `a ≥ b`. -/ def removeNegations : Preprocessor where name := "replace negations of comparisons" transform h := do let t : Q(Prop) ← whnfR (← inferType h) match t with | ~q(¬ $p) => match ← flipNegatedComparison h (← whnfR p) with | some h' => trace[linarith] "removing negation in {h}" return [h'] | _ => return [h] | _ => return [h] end removeNegations section natToInt open Mathlib.Tactic.Zify /-- `isNatProp tp` is true iff `tp` is an inequality or equality between natural numbers or the negation thereof. -/ partial def isNatProp (e : Expr) : Bool := match e.getAppFnArgs with | (``Eq, #[.const ``Nat [], _, _]) => true | (``LE.le, #[.const ``Nat [], _, _, _]) => true | (``LT.lt, #[.const ``Nat [], _, _, _]) => true | (``GE.ge, #[.const ``Nat [], _, _, _]) => true | (``GT.gt, #[.const ``Nat [], _, _, _]) => true | (``Not, #[e]) => isNatProp e | _ => false /-- If `e` is of the form `((n : ℕ) : C)`, `isNatCoe e` returns `⟨n, C⟩`. -/ def isNatCoe (e : Expr) : Option (Expr × Expr) := match e.getAppFnArgs with | (``Nat.cast, #[target, _, n]) => some ⟨n, target⟩ | _ => none /-- `getNatComparisons e` returns a list of all subexpressions of `e` of the form `((t : ℕ) : C)`. -/ partial def getNatComparisons (e : Expr) : List (Expr × Expr) := match isNatCoe e with | some x => [x] | none => match e.getAppFnArgs with | (``HAdd.hAdd, #[_, _, _, _, a, b]) => getNatComparisons a ++ getNatComparisons b | (``HMul.hMul, #[_, _, _, _, a, b]) => getNatComparisons a ++ getNatComparisons b | (``HSub.hSub, #[_, _, _, _, a, b]) => getNatComparisons a ++ getNatComparisons b | (``Neg.neg, #[_, _, a]) => getNatComparisons a | _ => [] /-- If `e : ℕ`, returns a proof of `0 ≤ (e : C)`. -/ def mk_natCast_nonneg_prf (p : Expr × Expr) : MetaM (Option Expr) := match p with | ⟨e, target⟩ => try commitIfNoEx (mkAppM ``natCast_nonneg #[target, e]) catch e => do trace[linarith] "Got exception when using cast {e.toMessageData}" return none /-- Ordering on `Expr`. -/ def Expr.Ord : Ord Expr := ⟨fun a b => if Expr.lt a b then .lt else if a.equal b then .eq else .gt⟩ attribute [local instance] Expr.Ord /-- If `h` is an equality or inequality between natural numbers, `natToInt` lifts this inequality to the integers. It also adds the facts that the integers involved are nonnegative. To avoid adding the same nonnegativity facts many times, it is a global preprocessor. -/ def natToInt : GlobalBranchingPreprocessor where name := "move nats to ints" transform g l := do let l ← l.mapM fun h => do let t ← whnfR (← instantiateMVars (← inferType h)) if isNatProp t then let (some (h', t'), _) ← Term.TermElabM.run' (run_for g (zifyProof none h t)) | throwError "zifyProof failed on {h}" if ← filterComparisons.aux t' then pure h' else -- `zifyProof` turned our comparison into something that wasn't a comparison -- probably replacing `n = n` with `True`, because of -- https://github.com/leanprover-community/mathlib4/issues/741 -- so we just keep the original hypothesis. pure h else pure h let nonnegs ← l.foldlM (init := ∅) fun (es : RBSet (Expr × Expr) lexOrd.compare) h => do try let (a, b) ← getRelSides (← inferType h) pure <| (es.insertMany (getNatComparisons a)).insertMany (getNatComparisons b) catch _ => pure es pure [(g, ((← nonnegs.toList.filterMapM mk_natCast_nonneg_prf) ++ l : List Expr))] end natToInt section strengthenStrictInt /-- If `pf` is a proof of a strict inequality `(a : ℤ) < b`, `mkNonstrictIntProof pf` returns a proof of `a + 1 ≤ b`, and similarly if `pf` proves a negated weak inequality. -/ def mkNonstrictIntProof (pf : Expr) : MetaM (Option Expr) := do match (← instantiateMVars (← inferType pf)).getAppFnArgs with | (``LT.lt, #[.const ``Int [], _, a, b]) => return mkApp (← mkAppM ``Iff.mpr #[← mkAppOptM ``Int.add_one_le_iff #[a, b]]) pf | (``GT.gt, #[.const ``Int [], _, a, b]) => return mkApp (← mkAppM ``Iff.mpr #[← mkAppOptM ``Int.add_one_le_iff #[b, a]]) pf | (``Not, #[P]) => match P.getAppFnArgs with | (``LE.le, #[.const ``Int [], _, a, b]) => return mkApp (← mkAppM ``Iff.mpr #[← mkAppOptM ``Int.add_one_le_iff #[b, a]]) (← mkAppM ``lt_of_not_ge #[pf]) | (``GE.ge, #[.const ``Int [], _, a, b]) => return mkApp (← mkAppM ``Iff.mpr #[← mkAppOptM ``Int.add_one_le_iff #[a, b]]) (← mkAppM ``lt_of_not_ge #[pf]) | _ => return none | _ => return none /-- `strengthenStrictInt h` turns a proof `h` of a strict integer inequality `t1 < t2` into a proof of `t1 ≤ t2 + 1`. -/ def strengthenStrictInt : Preprocessor where name := "strengthen strict inequalities over int" transform h := return [(← mkNonstrictIntProof h).getD h] end strengthenStrictInt section compWithZero /-- `rearrangeComparison e` takes a proof `e` of an equality, inequality, or negation thereof, and turns it into a proof of a comparison `_ R 0`, where `R ∈ {=, ≤, <}`. -/ partial def rearrangeComparison (e : Expr) : MetaM (Option Expr) := do aux e (← instantiateMVars (← inferType e)) where /-- Implementation of `rearrangeComparison`, after type inference. -/ aux (proof e : Expr) : MetaM (Option Expr) := let isZero (e : Expr) := e.getAppFnArgs matches (``OfNat.ofNat, #[_, .lit (.natVal 0), _]) match e.getAppFnArgs with | (``LE.le, #[_, _, a, b]) => match isZero a, isZero b with | _, true => return proof | true, _ => try? <| mkAppM ``neg_nonpos_of_nonneg #[proof] | _, _ => try? <| mkAppM ``sub_nonpos_of_le #[proof] | (``LT.lt, #[_, _, a, b]) => match isZero a, isZero b with | _, true => return proof | true, _ => try? <| mkAppM ``neg_neg_of_pos #[proof] | _, _ => try? <| mkAppM ``sub_neg_of_lt #[proof] | (``Eq, #[_, a, b]) => match isZero a, isZero b with | _, true => return proof | true, _ => try? <| mkAppM ``Eq.symm #[proof] | _, _ => try? <| mkAppM ``sub_eq_zero_of_eq #[proof] | (``GT.gt, #[_, _, a, b]) => match isZero a, isZero b with | _, true => try? <| mkAppM ``neg_neg_of_pos #[proof] | true, _ => try? <| mkAppM ``lt_zero_of_zero_gt #[proof] | _, _ => try? <| mkAppM ``sub_neg_of_lt #[proof] | (``GE.ge, #[_, _, a, b]) => match isZero a, isZero b with | _, true => try? <| mkAppM ``neg_nonpos_of_nonneg #[proof] | true, _ => try? <| mkAppM ``le_zero_of_zero_ge #[proof] | _, _ => try? <| mkAppM ``sub_nonpos_of_le #[proof] | (``Not, #[a]) => do let some nproof ← flipNegatedComparison proof a | return none aux nproof (← inferType nproof) | a => throwError "couldn't rearrange comparison {a}" /-- `compWithZero h` takes a proof `h` of an equality, inequality, or negation thereof, and turns it into a proof of a comparison `_ R 0`, where `R ∈ {=, ≤, <}`. -/ def compWithZero : Preprocessor where name := "make comparisons with zero" transform e := return (← rearrangeComparison e).toList end compWithZero section cancelDenoms theorem without_one_mul {M : Type*} [MulOneClass M] {a b : M} (h : 1 * a = b) : a = b := by rwa [one_mul] at h /-- `normalizeDenominatorsLHS h lhs` assumes that `h` is a proof of `lhs R 0`. It creates a proof of `lhs' R 0`, where all numeric division in `lhs` has been cancelled. -/ def normalizeDenominatorsLHS (h lhs : Expr) : MetaM Expr := do let mut (v, lhs') ← CancelDenoms.derive lhs if v = 1 then -- `lhs'` has a `1 *` out front, but `mkSingleCompZeroOf` has a special case -- where it does not produce `1 *`. We strip it off here: lhs' ← mkAppM ``without_one_mul #[lhs'] let (_, h'') ← mkSingleCompZeroOf v h try h''.rewriteType lhs' catch e => dbg_trace s!"Error in Linarith.normalizeDenominatorsLHS: {← e.toMessageData.toString}" throw e /-- `cancelDenoms pf` assumes `pf` is a proof of `t R 0`. If `t` contains the division symbol `/`, it tries to scale `t` to cancel out division by numerals. -/ def cancelDenoms : Preprocessor where name := "cancel denominators" transform := fun pf => (do let (_, lhs) ← parseCompAndExpr (← inferType pf) guard <| lhs.containsConst (fun n => n = ``HDiv.hDiv || n = ``Div.div) pure [← normalizeDenominatorsLHS pf lhs]) <|> return [pf] end cancelDenoms section nlinarith /-- `findSquares s e` collects all terms of the form `a ^ 2` and `a * a` that appear in `e` and adds them to the set `s`. A pair `(i, true)` is added to `s` when `atoms[i]^2` appears in `e`, and `(i, false)` is added to `s` when `atoms[i]*atoms[i]` appears in `e`. -/ partial def findSquares (s : RBSet (Nat × Bool) lexOrd.compare) (e : Expr) : AtomM (RBSet (Nat × Bool) lexOrd.compare) := -- Completely traversing the expression is non-ideal, -- as we can descend into expressions that could not possibly be seen by `linarith`. -- As a result we visit expressions with bvars, which then cause panics. -- Ideally this preprocessor would be reimplemented so it only visits things that could be atoms. -- In the meantime we just bail out if we ever encounter loose bvars. if e.hasLooseBVars then return s else match e.getAppFnArgs with | (``HPow.hPow, #[_, _, _, _, a, b]) => match b.numeral? with | some 2 => do let s ← findSquares s a let ai ← AtomM.addAtom a return (s.insert (ai, true)) | _ => e.foldlM findSquares s | (``HMul.hMul, #[_, _, _, _, a, b]) => do let ai ← AtomM.addAtom a let bi ← AtomM.addAtom b if ai = bi then do let s ← findSquares s a return (s.insert (ai, false)) else e.foldlM findSquares s | _ => e.foldlM findSquares s /-- `nlinarithExtras` is the preprocessor corresponding to the `nlinarith` tactic. * For every term `t` such that `t^2` or `t*t` appears in the input, adds a proof of `t^2 ≥ 0` or `t*t ≥ 0`. * For every pair of comparisons `t1 R1 0` and `t2 R2 0`, adds a proof of `t1*t2 R 0`. This preprocessor is typically run last, after all inputs have been canonized. -/ def nlinarithExtras : GlobalPreprocessor where name := "nonlinear arithmetic extras" transform ls := do -- find the squares in `AtomM` to ensure deterministic behavior let s ← AtomM.run .reducible do let si ← ls.foldrM (fun h s' => do findSquares s' (← instantiateMVars (← inferType h))) RBSet.empty si.toList.mapM fun (i, is_sq) => return ((← get).atoms[i]!, is_sq) let new_es ← s.filterMapM fun (e, is_sq) => observing? <| mkAppM (if is_sq then ``sq_nonneg else ``mul_self_nonneg) #[e] let new_es ← compWithZero.globalize.transform new_es trace[linarith] "nlinarith preprocessing found squares" trace[linarith] "{s}" linarithTraceProofs "so we added proofs" new_es let with_comps ← (new_es ++ ls).mapM (fun e => do let tp ← inferType e try let ⟨ine, _⟩ ← parseCompAndExpr tp pure (ine, e) catch _ => pure (Ineq.lt, e)) let products ← with_comps.mapDiagM fun (⟨posa, a⟩ : Ineq × Expr) ⟨posb, b⟩ => try (some <$> match posa, posb with | Ineq.eq, _ => mkAppM ``zero_mul_eq #[a, b] | _, Ineq.eq => mkAppM ``mul_zero_eq #[a, b] | Ineq.lt, Ineq.lt => mkAppM ``mul_pos_of_neg_of_neg #[a, b] | Ineq.lt, Ineq.le => do let a ← mkAppM ``le_of_lt #[a] mkAppM ``mul_nonneg_of_nonpos_of_nonpos #[a, b] | Ineq.le, Ineq.lt => do let b ← mkAppM ``le_of_lt #[b] mkAppM ``mul_nonneg_of_nonpos_of_nonpos #[a, b] | Ineq.le, Ineq.le => mkAppM ``mul_nonneg_of_nonpos_of_nonpos #[a, b]) catch _ => pure none let products ← compWithZero.globalize.transform products.reduceOption return (new_es ++ ls ++ products) end nlinarith section removeNe /-- `removeNe_aux` case splits on any proof `h : a ≠ b` in the input, turning it into `a < b ∨ a > b`. This produces `2^n` branches when there are `n` such hypotheses in the input. -/ partial def removeNe_aux : MVarId → List Expr → MetaM (List Branch) := fun g hs => do let some (e, α, a, b) ← hs.findSomeM? (fun e : Expr => do let some (α, a, b) := (← instantiateMVars (← inferType e)).ne?' | return none return some (e, α, a, b)) | return [(g, hs)] let [ng1, ng2] ← g.apply (← mkAppOptM ``Or.elim #[none, none, ← g.getType, ← mkAppOptM ``lt_or_gt_of_ne #[α, none, a, b, e]]) | failure let do_goal : MVarId → MetaM (List Branch) := fun g => do let (f, h) ← g.intro1 h.withContext do let ls ← removeNe_aux h <| hs.removeAll [e] return ls.map (fun b : Branch => (b.1, (.fvar f)::b.2)) return ((← do_goal ng1) ++ (← do_goal ng2)) /-- `removeNe` case splits on any proof `h : a ≠ b` in the input, turning it into `a < b ∨ a > b`, by calling `linarith.removeNe_aux`. This produces `2^n` branches when there are `n` such hypotheses in the input. -/ def removeNe : GlobalBranchingPreprocessor where name := "removeNe" transform := removeNe_aux end removeNe /-- The default list of preprocessors, in the order they should typically run. -/ def defaultPreprocessors : List GlobalBranchingPreprocessor := [filterComparisons, removeNegations, natToInt, strengthenStrictInt, compWithZero, cancelDenoms] /-- `preprocess pps l` takes a list `l` of proofs of propositions. It maps each preprocessor `pp ∈ pps` over this list. The preprocessors are run sequentially: each receives the output of the previous one. Note that a preprocessor may produce multiple or no expressions from each input expression, so the size of the list may change. -/ def preprocess (pps : List GlobalBranchingPreprocessor) (g : MVarId) (l : List Expr) : MetaM (List Branch) := g.withContext <| pps.foldlM (fun ls pp => return (← ls.mapM fun (g, l) => do pp.process g l).join) [(g, l)] end Linarith
Tactic\Linarith\Verification.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Tactic.Linarith.Parsing import Mathlib.Util.Qq /-! # Deriving a proof of false `linarith` uses an untrusted oracle to produce a certificate of unsatisfiability. It needs to do some proof reconstruction work to turn this into a proof term. This file implements the reconstruction. ## Main declarations The public facing declaration in this file is `proveFalseByLinarith`. -/ open Lean Elab Tactic Meta namespace Qq variable {u : Level} /-- Typesafe conversion of `n : ℕ` to `Q($α)`. -/ def ofNatQ (α : Q(Type $u)) (_ : Q(Semiring $α)) (n : ℕ) : Q($α) := match n with | 0 => q(0 : $α) | 1 => q(1 : $α) | k+2 => have lit : Q(ℕ) := mkRawNatLit n have k : Q(ℕ) := mkRawNatLit k haveI : $lit =Q $k + 2 := ⟨⟩ q(OfNat.ofNat $lit) end Qq namespace Linarith open Ineq open Qq /-! ### Auxiliary functions for assembling proofs -/ /-- A typesafe version of `mulExpr`. -/ def mulExpr' {u : Level} (n : ℕ) {α : Q(Type $u)} (inst : Q(Semiring $α)) (e : Q($α)) : Q($α) := if n = 1 then e else let n := ofNatQ α inst n q($n * $e) /-- `mulExpr n e` creates an `Expr` representing `n*e`. When elaborated, the coefficient will be a native numeral of the same type as `e`. -/ def mulExpr (n : ℕ) (e : Expr) : MetaM Expr := do let ⟨_, α, e⟩ ← inferTypeQ' e let inst : Q(Semiring $α) ← synthInstanceQ q(Semiring $α) return mulExpr' n inst e /-- A type-safe analogue of `addExprs`. -/ def addExprs' {u : Level} {α : Q(Type $u)} (_inst : Q(AddMonoid $α)) : List Q($α) → Q($α) | [] => q(0) | h::t => go h t where /-- Inner loop for `addExprs'`. -/ go (p : Q($α)) : List Q($α) → Q($α) | [] => p | [q] => q($p + $q) | q::t => go q($p + $q) t /-- `addExprs L` creates an `Expr` representing the sum of the elements of `L`, associated left. -/ def addExprs : List Expr → MetaM Expr | [] => return q(0) -- This may not be of the intended type; use with caution. | L@(h::_) => do let ⟨_, α, _⟩ ← inferTypeQ' h let inst : Q(AddMonoid $α) ← synthInstanceQ q(AddMonoid $α) -- This is not type safe; we just assume all the `Expr`s in the tail have the same type: return addExprs' inst L /-- If our goal is to add together two inequalities `t1 R1 0` and `t2 R2 0`, `addIneq R1 R2` produces the strength of the inequality in the sum `R`, along with the name of a lemma to apply in order to conclude `t1 + t2 R 0`. -/ def addIneq : Ineq → Ineq → (Name × Ineq) | eq, eq => (``Linarith.eq_of_eq_of_eq, eq) | eq, le => (``Linarith.le_of_eq_of_le, le) | eq, lt => (``Linarith.lt_of_eq_of_lt, lt) | le, eq => (``Linarith.le_of_le_of_eq, le) | le, le => (``add_nonpos, le) | le, lt => (``add_lt_of_le_of_neg, lt) | lt, eq => (``Linarith.lt_of_lt_of_eq, lt) | lt, le => (``add_lt_of_neg_of_le, lt) | lt, lt => (``Left.add_neg, lt) /-- `mkLTZeroProof coeffs pfs` takes a list of proofs of the form `tᵢ Rᵢ 0`, paired with coefficients `cᵢ`. It produces a proof that `∑cᵢ * tᵢ R 0`, where `R` is as strong as possible. -/ def mkLTZeroProof : List (Expr × ℕ) → MetaM Expr | [] => throwError "no linear hypotheses found" | [(h, c)] => do let (_, t) ← mkSingleCompZeroOf c h return t | ((h, c)::t) => do let (iq, h') ← mkSingleCompZeroOf c h let (_, t) ← t.foldlM (fun pr ce ↦ step pr.1 pr.2 ce.1 ce.2) (iq, h') return t where /-- `step c pf npf coeff` assumes that `pf` is a proof of `t1 R1 0` and `npf` is a proof of `t2 R2 0`. It uses `mkSingleCompZeroOf` to prove `t1 + coeff*t2 R 0`, and returns `R` along with this proof. -/ step (c : Ineq) (pf npf : Expr) (coeff : ℕ) : MetaM (Ineq × Expr) := do let (iq, h') ← mkSingleCompZeroOf coeff npf let (nm, niq) := addIneq c iq return (niq, ← mkAppM nm #[pf, h']) /-- If `prf` is a proof of `t R s`, `leftOfIneqProof prf` returns `t`. -/ def leftOfIneqProof (prf : Expr) : MetaM Expr := do let (t, _) ← getRelSides (← inferType prf) return t /-- If `prf` is a proof of `t R s`, `typeOfIneqProof prf` returns the type of `t`. -/ def typeOfIneqProof (prf : Expr) : MetaM Expr := do inferType (← leftOfIneqProof prf) /-- `mkNegOneLtZeroProof tp` returns a proof of `-1 < 0`, where the numerals are natively of type `tp`. -/ def mkNegOneLtZeroProof (tp : Expr) : MetaM Expr := do let zero_lt_one ← mkAppOptM ``zero_lt_one #[tp, none, none, none, none, none] mkAppM `neg_neg_of_pos #[zero_lt_one] /-- `addNegEqProofs l` inspects the list of proofs `l` for proofs of the form `t = 0`. For each such proof, it adds a proof of `-t = 0` to the list. -/ def addNegEqProofs : List Expr → MetaM (List Expr) | [] => return [] | (h::tl) => do let (iq, t) ← parseCompAndExpr (← inferType h) match iq with | Ineq.eq => do let nep := mkAppN (← mkAppM `Iff.mpr #[← mkAppOptM ``neg_eq_zero #[none, none, t]]) #[h] let tl ← addNegEqProofs tl return h::nep::tl | _ => return h :: (← addNegEqProofs tl) /-- `proveEqZeroUsing tac e` tries to use `tac` to construct a proof of `e = 0`. -/ def proveEqZeroUsing (tac : TacticM Unit) (e : Expr) : MetaM Expr := do let ⟨u, α, e⟩ ← inferTypeQ' e let _h : Q(Zero $α) ← synthInstanceQ q(Zero $α) synthesizeUsing' q($e = 0) tac /-! #### The main method -/ /-- `proveFalseByLinarith` is the main workhorse of `linarith`. Given a list `l` of proofs of `tᵢ Rᵢ 0`, it tries to derive a contradiction from `l` and use this to produce a proof of `False`. `oracle : CertificateOracle` is used to search for a certificate of unsatisfiability. The returned certificate is a map `m` from hypothesis indices to natural number coefficients. If our set of hypotheses has the form `{tᵢ Rᵢ 0}`, then the elimination process should have guaranteed that 1.\ `∑ (m i)*tᵢ = 0`, with at least one `i` such that `m i > 0` and `Rᵢ` is `<`. We have also that 2.\ `∑ (m i)*tᵢ < 0`, since for each `i`, `(m i)*tᵢ ≤ 0` and at least one is strictly negative. So we conclude a contradiction `0 < 0`. It remains to produce proofs of (1) and (2). (1) is verified by calling the provided `discharger` tactic, which is typically `ring`. We prove (2) by folding over the set of hypotheses. `transparency : TransparencyMode` controls the transparency level with which atoms are identified. -/ def proveFalseByLinarith (transparency : TransparencyMode) (oracle : CertificateOracle) (discharger : TacticM Unit) : MVarId → List Expr → MetaM Expr | _, [] => throwError "no args to linarith" | g, l@(h::_) => do trace[linarith.detail] "Beginning work in `proveFalseByLinarith`." -- for the elimination to work properly, we must add a proof of `-1 < 0` to the list, -- along with negated equality proofs. let l' ← addNegEqProofs l trace[linarith.detail] "... finished `addNegEqProofs`." let inputs := (← mkNegOneLtZeroProof (← typeOfIneqProof h))::l'.reverse trace[linarith.detail] "... finished `mkNegOneLtZeroProof`." trace[linarith.detail] (← inputs.mapM inferType) let (comps, max_var) ← linearFormsAndMaxVar transparency inputs trace[linarith.detail] "... finished `linearFormsAndMaxVar`." trace[linarith.detail] "{comps}" -- perform the elimination and fail if no contradiction is found. let certificate : Batteries.HashMap Nat Nat ← try oracle.produceCertificate comps max_var catch e => trace[linarith] e.toMessageData throwError "linarith failed to find a contradiction" trace[linarith] "linarith has found a contradiction: {certificate.toList}" let enum_inputs := inputs.enum -- construct a list pairing nonzero coeffs with the proof of their corresponding comparison let zip := enum_inputs.filterMap fun ⟨n, e⟩ => (certificate.find? n).map (e, ·) let mls ← zip.mapM fun ⟨e, n⟩ => do mulExpr n (← leftOfIneqProof e) -- `sm` is the sum of input terms, scaled to cancel out all variables. let sm ← addExprs mls -- let sm ← instantiateMVars sm trace[linarith] "The expression\n {sm}\nshould be both 0 and negative" -- we prove that `sm = 0`, typically with `ring`. let sm_eq_zero ← proveEqZeroUsing discharger sm -- we also prove that `sm < 0` let sm_lt_zero ← mkLTZeroProof zip -- this is a contradiction. let pftp ← inferType sm_lt_zero let ⟨_, nep, _⟩ ← g.rewrite pftp sm_eq_zero let pf' ← mkAppM ``Eq.mp #[nep, sm_lt_zero] mkAppM ``Linarith.lt_irrefl #[pf'] end Linarith
Tactic\Linarith\Oracle\FourierMotzkin.lean
/- Copyright (c) 2020 Robert Y. Lewis. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Robert Y. Lewis -/ import Mathlib.Tactic.Linarith.Datatypes import Batteries.Data.HashMap.WF /-! # The Fourier-Motzkin elimination procedure The Fourier-Motzkin procedure is a variable elimination method for linear inequalities. <https://en.wikipedia.org/wiki/Fourier%E2%80%93Motzkin_elimination> Given a set of linear inequalities `comps = {tᵢ Rᵢ 0}`, we aim to eliminate a single variable `a` from the set. We partition `comps` into `comps_pos`, `comps_neg`, and `comps_zero`, where `comps_pos` contains the comparisons `tᵢ Rᵢ 0` in which the coefficient of `a` in `tᵢ` is positive, and similar. For each pair of comparisons `tᵢ Rᵢ 0 ∈ comps_pos`, `tⱼ Rⱼ 0 ∈ comps_neg`, we compute coefficients `vᵢ, vⱼ ∈ ℕ` such that `vᵢ*tᵢ + vⱼ*tⱼ` cancels out `a`. We collect these sums `vᵢ*tᵢ + vⱼ*tⱼ R' 0` in a set `S` and set `comps' = S ∪ comps_zero`, a new set of comparisons in which `a` has been eliminated. Theorem: `comps` and `comps'` are equisatisfiable. We recursively eliminate all variables from the system. If we derive an empty clause `0 < 0`, we conclude that the original system was unsatisfiable. -/ open Batteries open Std (format ToFormat) namespace Linarith /-! ### Datatypes The `CompSource` and `PComp` datatypes are specific to the FM elimination routine; they are not shared with other components of `linarith`. -/ /-- `CompSource` tracks the source of a comparison. The atomic source of a comparison is an assumption, indexed by a natural number. Two comparisons can be added to produce a new comparison, and one comparison can be scaled by a natural number to produce a new comparison. -/ inductive CompSource : Type | assump : Nat → CompSource | add : CompSource → CompSource → CompSource | scale : Nat → CompSource → CompSource deriving Inhabited /-- Given a `CompSource` `cs`, `cs.flatten` maps an assumption index to the number of copies of that assumption that appear in the history of `cs`. For example, suppose `cs` is produced by scaling assumption 2 by 5, and adding to that the sum of assumptions 1 and 2. `cs.flatten` maps `1 ↦ 1, 2 ↦ 6`. -/ def CompSource.flatten : CompSource → HashMap Nat Nat | (CompSource.assump n) => HashMap.empty.insert n 1 | (CompSource.add c1 c2) => (CompSource.flatten c1).mergeWith (fun _ b b' => b + b') (CompSource.flatten c2) | (CompSource.scale n c) => (CompSource.flatten c).mapVal (fun _ v => v * n) /-- Formats a `CompSource` for printing. -/ def CompSource.toString : CompSource → String | (CompSource.assump e) => ToString.toString e | (CompSource.add c1 c2) => CompSource.toString c1 ++ " + " ++ CompSource.toString c2 | (CompSource.scale n c) => ToString.toString n ++ " * " ++ CompSource.toString c instance : ToFormat CompSource := ⟨fun a => CompSource.toString a⟩ /-- A `PComp` stores a linear comparison `Σ cᵢ*xᵢ R 0`, along with information about how this comparison was derived. The original expressions fed into `linarith` are each assigned a unique natural number label. The *historical set* `PComp.history` stores the labels of expressions that were used in deriving the current `PComp`. Variables are also indexed by natural numbers. The sets `PComp.effective`, `PComp.implicit`, and `PComp.vars` contain variable indices. * `PComp.vars` contains the variables that appear in any inequality in the historical set. * `PComp.effective` contains the variables that have been effectively eliminated from `PComp`. A variable `n` is said to be *effectively eliminated* in `p : PComp` if the elimination of `n` produced at least one of the ancestors of `p` (or `p` itself). * `PComp.implicit` contains the variables that have been implicitly eliminated from `PComp`. A variable `n` is said to be *implicitly eliminated* in `p` if it satisfies the following properties: - `n` appears in some inequality in the historical set (i.e. in `p.vars`). - `n` does not appear in `p.c.vars` (i.e. it has been eliminated). - `n` was not effectively eliminated. We track these sets in order to compute whether the history of a `PComp` is *minimal*. Checking this directly is expensive, but effective approximations can be defined in terms of these sets. During the variable elimination process, a `PComp` with non-minimal history can be discarded. -/ structure PComp : Type where /-- The comparison `Σ cᵢ*xᵢ R 0`. -/ c : Comp /-- We track how the comparison was constructed by adding and scaling previous comparisons, back to the original assumptions. -/ src : CompSource /-- The set of original assumptions which have been used in constructing this comparison. -/ history : RBSet ℕ Ord.compare /-- The variables which have been *effectively eliminated*, i.e. by running the elimination algorithm on that variable. -/ effective : RBSet ℕ Ord.compare /-- The variables which have been *implicitly eliminated*. These are variables that appear in the historical set, do not appear in `c` itself, and are not in `effective. -/ implicit : RBSet ℕ Ord.compare /-- The union of all variables appearing in those original assumptions which appear in the `history` set. -/ vars : RBSet ℕ Ord.compare /-- Any comparison whose history is not minimal is redundant, and need not be included in the new set of comparisons. `elimedGE : ℕ` is a natural number such that all variables with index ≥ `elimedGE` have been removed from the system. This test is an overapproximation to minimality. It gives necessary but not sufficient conditions. If the history of `c` is minimal, then `c.maybeMinimal` is true, but `c.maybeMinimal` may also be true for some `c` with non-minimal history. Thus, if `c.maybeMinimal` is false, `c` is known not to be minimal and must be redundant. See https://doi.org/10.1016/B978-0-444-88771-9.50019-2 (Theorem 13). The condition described there considers only implicitly eliminated variables that have been officially eliminated from the system. This is not the case for every implicitly eliminated variable. Consider eliminating `z` from `{x + y + z < 0, x - y - z < 0}`. The result is the set `{2*x < 0}`; `y` is implicitly but not officially eliminated. This implementation of Fourier-Motzkin elimination processes variables in decreasing order of indices. Immediately after a step that eliminates variable `k`, variable `k'` has been eliminated iff `k' ≥ k`. Thus we can compute the intersection of officially and implicitly eliminated variables by taking the set of implicitly eliminated variables with indices ≥ `elimedGE`. -/ def PComp.maybeMinimal (c : PComp) (elimedGE : ℕ) : Bool := c.history.size ≤ 1 + ((c.implicit.filter (· ≥ elimedGE)).union c.effective).size /-- The `src : CompSource` field is ignored when comparing `PComp`s. Two `PComp`s proving the same comparison, with different sources, are considered equivalent. -/ def PComp.cmp (p1 p2 : PComp) : Ordering := p1.c.cmp p2.c /-- `PComp.scale c n` scales the coefficients of `c` by `n` and notes this in the `CompSource`. -/ def PComp.scale (c : PComp) (n : ℕ) : PComp := { c with c := c.c.scale n, src := c.src.scale n } /-- `PComp.add c1 c2 elimVar` creates the result of summing the linear comparisons `c1` and `c2`, during the process of eliminating the variable `elimVar`. The computation assumes, but does not enforce, that `elimVar` appears in both `c1` and `c2` and does not appear in the sum. Computing the sum of the two comparisons is easy; the complicated details lie in tracking the additional fields of `PComp`. * The historical set `pcomp.history` of `c1 + c2` is the union of the two historical sets. * `vars` is the union of `c1.vars` and `c2.vars`. * The effectively eliminated variables of `c1 + c2` are the union of the two effective sets, with `elim_var` inserted. * The implicitly eliminated variables of `c1 + c2` are those that appear in `vars` but not `c.vars` or `effective`. (Note that the description of the implicitly eliminated variables of `c1 + c2` in the algorithm described in Section 6 of https://doi.org/10.1016/B978-0-444-88771-9.50019-2 seems to be wrong: that says it should be `(c1.implicit.union c2.implicit).sdiff explicit`. Since the implicitly eliminated sets start off empty for the assumption, this formula would leave them always empty.) -/ def PComp.add (c1 c2 : PComp) (elimVar : ℕ) : PComp := let c := c1.c.add c2.c let src := c1.src.add c2.src let history := c1.history.union c2.history let vars := c1.vars.union c2.vars let effective := (c1.effective.union c2.effective).insert elimVar let implicit := (vars.sdiff (.ofList c.vars _)).sdiff effective ⟨c, src, history, effective, implicit, vars⟩ /-- `PComp.assump c n` creates a `PComp` whose comparison is `c` and whose source is `CompSource.assump n`, that is, `c` is derived from the `n`th hypothesis. The history is the singleton set `{n}`. No variables have been eliminated (effectively or implicitly). -/ def PComp.assump (c : Comp) (n : ℕ) : PComp where c := c src := CompSource.assump n history := RBSet.empty.insert n effective := .empty implicit := .empty vars := .ofList c.vars _ instance : ToFormat PComp := ⟨fun p => format p.c.coeffs ++ toString p.c.str ++ "0"⟩ instance : ToString PComp := ⟨fun p => toString p.c.coeffs ++ toString p.c.str ++ "0"⟩ /-- A collection of comparisons. -/ abbrev PCompSet := RBSet PComp PComp.cmp /-! ### Elimination procedure -/ /-- If `c1` and `c2` both contain variable `a` with opposite coefficients, produces `v1` and `v2` such that `a` has been cancelled in `v1*c1 + v2*c2`. -/ def elimVar (c1 c2 : Comp) (a : ℕ) : Option (ℕ × ℕ) := let v1 := c1.coeffOf a let v2 := c2.coeffOf a if v1 * v2 < 0 then let vlcm := Nat.lcm v1.natAbs v2.natAbs some ⟨vlcm / v1.natAbs, vlcm / v2.natAbs⟩ else none /-- `pelimVar p1 p2` calls `elimVar` on the `Comp` components of `p1` and `p2`. If this returns `v1` and `v2`, it creates a new `PComp` equal to `v1*p1 + v2*p2`, and tracks this in the `CompSource`. -/ def pelimVar (p1 p2 : PComp) (a : ℕ) : Option PComp := do let (n1, n2) ← elimVar p1.c p2.c a return (p1.scale n1).add (p2.scale n2) a /-- A `PComp` represents a contradiction if its `Comp` field represents a contradiction. -/ def PComp.isContr (p : PComp) : Bool := p.c.isContr /-- `elimWithSet a p comps` collects the result of calling `pelimVar p p' a` for every `p' ∈ comps`. -/ def elimWithSet (a : ℕ) (p : PComp) (comps : PCompSet) : PCompSet := comps.foldl (fun s pc => match pelimVar p pc a with | some pc => if pc.maybeMinimal a then s.insert pc else s | none => s) RBSet.empty /-- The state for the elimination monad. * `maxVar`: the largest variable index that has not been eliminated. * `comps`: a set of comparisons The elimination procedure proceeds by eliminating variable `v` from `comps` progressively in decreasing order. -/ structure LinarithData : Type where /-- The largest variable index that has not been (officially) eliminated. -/ maxVar : ℕ /-- The set of comparisons. -/ comps : PCompSet /-- The linarith monad extends an exceptional monad with a `LinarithData` state. An exception produces a contradictory `PComp`. -/ abbrev LinarithM : Type → Type := StateT LinarithData (ExceptT PComp Id) /-- Returns the current max variable. -/ def getMaxVar : LinarithM ℕ := LinarithData.maxVar <$> get /-- Return the current comparison set. -/ def getPCompSet : LinarithM PCompSet := LinarithData.comps <$> get /-- Throws an exception if a contradictory `PComp` is contained in the current state. -/ def validate : LinarithM Unit := do match (← getPCompSet).toList.find? (fun p : PComp => p.isContr) with | none => return () | some c => throw c /-- Updates the current state with a new max variable and comparisons, and calls `validate` to check for a contradiction. -/ def update (maxVar : ℕ) (comps : PCompSet) : LinarithM Unit := do StateT.set ⟨maxVar, comps⟩ validate /-- `splitSetByVarSign a comps` partitions the set `comps` into three parts. * `pos` contains the elements of `comps` in which `a` has a positive coefficient. * `neg` contains the elements of `comps` in which `a` has a negative coefficient. * `notPresent` contains the elements of `comps` in which `a` has coefficient 0. Returns `(pos, neg, notPresent)`. -/ def splitSetByVarSign (a : ℕ) (comps : PCompSet) : PCompSet × PCompSet × PCompSet := comps.foldl (fun ⟨pos, neg, notPresent⟩ pc => let n := pc.c.coeffOf a if n > 0 then ⟨pos.insert pc, neg, notPresent⟩ else if n < 0 then ⟨pos, neg.insert pc, notPresent⟩ else ⟨pos, neg, notPresent.insert pc⟩) ⟨RBSet.empty, RBSet.empty, RBSet.empty⟩ /-- `elimVarM a` performs one round of Fourier-Motzkin elimination, eliminating the variable `a` from the `linarith` state. -/ def elimVarM (a : ℕ) : LinarithM Unit := do let vs ← getMaxVar if (a ≤ vs) then (do let ⟨pos, neg, notPresent⟩ := splitSetByVarSign a (← getPCompSet) update (vs - 1) (pos.foldl (fun s p => s.union (elimWithSet a p neg)) notPresent)) else pure () /-- `elimAllVarsM` eliminates all variables from the linarith state, leaving it with a set of ground comparisons. If this succeeds without exception, the original `linarith` state is consistent. -/ def elimAllVarsM : LinarithM Unit := do for i in (List.range ((← getMaxVar) + 1)).reverse do elimVarM i /-- `mkLinarithData hyps vars` takes a list of hypotheses and the largest variable present in those hypotheses. It produces an initial state for the elimination monad. -/ def mkLinarithData (hyps : List Comp) (maxVar : ℕ) : LinarithData := ⟨maxVar, .ofList (hyps.enum.map fun ⟨n, cmp⟩ => PComp.assump cmp n) _⟩ /-- An oracle that uses Fourier-Motzkin elimination. -/ def CertificateOracle.fourierMotzkin : CertificateOracle where produceCertificate hyps maxVar := match ExceptT.run (StateT.run (do validate; elimAllVarsM : LinarithM Unit) (mkLinarithData hyps maxVar)) with | (Except.ok _) => failure | (Except.error contr) => return contr.src.flatten end Linarith
Tactic\Linarith\Oracle\SimplexAlgorithm.lean
/- Copyright (c) 2024 Vasily Nesterov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Vasily Nesterov -/ import Mathlib.Tactic.Linarith.Datatypes import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm.PositiveVector /-! # The oracle based on Simplex Algorithm This file contains hooks to enable the use of the Simplex Algorithm in `linarith`. The algorithm's entry point is the function `Linarith.SimplexAlgorithm.findPositiveVector`. See the file `PositiveVector.lean` for details of how the procedure works. -/ open Batteries namespace Linarith.SimplexAlgorithm /-- Preprocess the goal to pass it to `Linarith.SimplexAlgorithm.findPositiveVector`. -/ def preprocess (matType : ℕ → ℕ → Type) [UsableInSimplexAlgorithm matType] (hyps : List Comp) (maxVar : ℕ) : matType (maxVar + 1) (hyps.length) × List Nat := let values : List (ℕ × ℕ × ℚ) := hyps.foldlIdx (init := []) fun idx cur comp => cur ++ comp.coeffs.map fun (var, c) => (var, idx, c) let strictIndexes := hyps.findIdxs (·.str == Ineq.lt) (ofValues values, strictIndexes) /-- Extract the certificate from the `vec` found by `Linarith.SimplexAlgorithm.findPositiveVector`. -/ def postprocess (vec : Array ℚ) : HashMap ℕ ℕ := let common_den : ℕ := vec.foldl (fun acc item => acc.lcm item.den) 1 let vecNat : Array ℕ := vec.map (fun x : ℚ => (x * common_den).floor.toNat) HashMap.ofList <| vecNat.toList.enum.filter (fun ⟨_, item⟩ => item != 0) end SimplexAlgorithm open SimplexAlgorithm /-- An oracle that uses the Simplex Algorithm. -/ def CertificateOracle.simplexAlgorithmSparse : CertificateOracle where produceCertificate hyps maxVar := do let (A, strictIndexes) := preprocess SparseMatrix hyps maxVar let vec ← findPositiveVector A strictIndexes return postprocess vec /-- The same oracle as `CertificateOracle.simplexAlgorithmSparse`, but uses dense matrices. Works faster on dense states. -/ def CertificateOracle.simplexAlgorithmDense : CertificateOracle where produceCertificate hyps maxVar := do let (A, strictIndexes) := preprocess DenseMatrix hyps maxVar let vec ← findPositiveVector A strictIndexes return postprocess vec end Linarith
Tactic\Linarith\Oracle\SimplexAlgorithm\Datatypes.lean
/- Copyright (c) 2024 Vasily Nesterov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Vasily Nesterov -/ import Batteries.Data.Rat.Basic /-! # Datatypes for the Simplex Algorithm implementation -/ namespace Linarith.SimplexAlgorithm /-- Specification for matrix types over ℚ which can be used in the Gauss Elimination and the Simplex Algorithm. It was introduced to unify dense matrices and sparse matrices. -/ class UsableInSimplexAlgorithm (α : Nat → Nat → Type) where /-- Returns `mat[i, j]`. -/ getElem {n m : Nat} (mat : α n m) (i j : Nat) : Rat /-- Sets `mat[i, j]`. -/ setElem {n m : Nat} (mat : α n m) (i j : Nat) (v : Rat) : α n m /-- Returns the list of elements of `mat` in the form `(i, j, mat[i, j])`. -/ getValues {n m : Nat} (mat : α n m) : List (Nat × Nat × Rat) /-- Creates a matrix from a list of elements in the form `(i, j, mat[i, j])`. -/ ofValues {n m : Nat} (values : List (Nat × Nat × Rat)) : α n m /-- Swaps two rows. -/ swapRows {n m : Nat} (mat : α n m) (i j : Nat) : α n m /-- Subtracts `i`-th row multiplied by `coef` from `j`-th row. -/ subtractRow {n m : Nat} (mat : α n m) (i j : Nat) (coef : Rat) : α n m /-- Divides the `i`-th row by `coef`. -/ divideRow {n m : Nat} (mat : α n m) (i : Nat) (coef : Rat) : α n m export UsableInSimplexAlgorithm (setElem getValues ofValues swapRows subtractRow divideRow) instance (n m : Nat) (matType : Nat → Nat → Type) [UsableInSimplexAlgorithm matType] : GetElem (matType n m) (Nat × Nat) Rat fun _ p => p.1 < n ∧ p.2 < m where getElem mat p _ := UsableInSimplexAlgorithm.getElem mat p.1 p.2 /-- Structure for matrices over ℚ. So far it is just a 2d-array carrying dimensions (that are supposed to match with the actual dimensions of `data`), but the plan is to add some `Prop`-data and make the structure strict and safe. Note: we avoid using the `Matrix` from `Mathlib.Data.Matrix` because it is far more efficient to store matrix as its entries than as function between `Fin`-s. -/ structure DenseMatrix (n m : Nat) where /-- The content of the matrix. -/ data : Array (Array Rat) instance : UsableInSimplexAlgorithm DenseMatrix where getElem mat i j := mat.data[i]![j]! setElem mat i j v := ⟨mat.data.modify i fun row => row.set! j v⟩ getValues mat := mat.data.zipWithIndex.foldl (init := []) fun acc (row, i) => let rowVals := Array.toList <| row.zipWithIndex.filterMap fun (v, j) => if v != 0 then .some (i, j, v) else .none rowVals ++ acc ofValues {n m : Nat} vals : DenseMatrix _ _ := Id.run do let mut data : Array (Array Rat) := Array.mkArray n <| Array.mkArray m 0 for ⟨i, j, v⟩ in vals do data := data.modify i fun row => row.set! j v return ⟨data⟩ swapRows mat i j := ⟨mat.data.swap! i j⟩ subtractRow mat i j coef := let newData : Array (Array Rat) := mat.data.modify j fun row => row.zipWith mat.data[i]! fun x y => x - coef * y ⟨newData⟩ divideRow mat i coef := ⟨mat.data.modify i (·.map (· / coef))⟩ /-- Structure for sparse matrices over ℚ, implemented as an array of hashmaps, containing only nonzero values. -/ structure SparseMatrix (n m : Nat) where /-- The content of the matrix. -/ data : Array <| Lean.HashMap Nat Rat instance : UsableInSimplexAlgorithm SparseMatrix where getElem mat i j := mat.data[i]!.findD j 0 setElem mat i j v := if v == 0 then ⟨mat.data.modify i fun row => row.erase j⟩ else ⟨mat.data.modify i fun row => row.insert j v⟩ getValues mat := mat.data.zipWithIndex.foldl (init := []) fun acc (row, i) => let rowVals := row.toList.map fun (j, v) => (i, j, v) rowVals ++ acc ofValues {n _ : Nat} vals := Id.run do let mut data : Array (Lean.HashMap Nat Rat) := Array.mkArray n .empty for ⟨i, j, v⟩ in vals do if v != 0 then data := data.modify i fun row => row.insert j v return ⟨data⟩ swapRows mat i j := ⟨mat.data.swap! i j⟩ subtractRow mat i j coef := let newData := mat.data.modify j fun row => mat.data[i]!.fold (fun cur k val => let newVal := (cur.findD k 0) - coef * val if newVal != 0 then cur.insert k newVal else cur.erase k ) row ⟨newData⟩ divideRow mat i coef := let newData : Array (Lean.HashMap Nat Rat) := mat.data.modify i fun row => row.fold (fun cur k v => cur.insert k (v / coef)) row ⟨newData⟩ /-- `Tableau` is a structure the Simplex Algorithm operates on. The `i`-th row of `mat` expresses the variable `basic[i]` as a linear combination of variables from `free`. -/ structure Tableau (matType : Nat → Nat → Type) [UsableInSimplexAlgorithm matType] where /-- Array containing the basic variables' indexes -/ basic : Array Nat /-- Array containing the free variables' indexes -/ free : Array Nat /-- Matrix of coefficients the basic variables expressed through the free ones. -/ mat : matType basic.size free.size end Linarith.SimplexAlgorithm
Tactic\Linarith\Oracle\SimplexAlgorithm\Gauss.lean
/- Copyright (c) 2024 Vasily Nesterov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Vasily Nesterov -/ import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm.Datatypes /-! # Gaussian Elimination algorithm The first step of `Linarith.SimplexAlgorithm.findPositiveVector` is finding initial feasible solution which is done by standard Gaussian Elimination algorithm implemented in this file. -/ namespace Linarith.SimplexAlgorithm.Gauss /-- The monad for the Gaussian Elimination algorithm. -/ abbrev GaussM (n m : Nat) (matType : Nat → Nat → Type) := StateM <| matType n m variable {n m : Nat} {matType : Nat → Nat → Type} [UsableInSimplexAlgorithm matType] /-- Finds the first row starting from the `rowStart` with nonzero element in the column `col`. -/ def findNonzeroRow (rowStart col : Nat) : GaussM n m matType <| Option Nat := do for i in [rowStart:n] do if (← get)[(i, col)]! != 0 then return i return .none /-- Implementation of `getTableau` in `GaussM` monad. -/ def getTableauImp : GaussM n m matType <| Tableau matType := do let mut free : Array Nat := #[] let mut basic : Array Nat := #[] let mut row : Nat := 0 let mut col : Nat := 0 while row < n && col < m do match ← findNonzeroRow row col with | .none => free := free.push col col := col + 1 continue | .some rowToSwap => modify fun mat => swapRows mat row rowToSwap modify fun mat => divideRow mat row mat[(row, col)]! for i in [:n] do if i == row then continue let coef := (← get)[(i, col)]! if coef != 0 then modify fun mat => subtractRow mat row i coef basic := basic.push col row := row + 1 col := col + 1 for i in [col:m] do free := free.push i let ansMatrix : matType basic.size free.size := ← do let vals := getValues (← get) |>.filterMap fun (i, j, v) => if j == basic[i]! then .none else .some (i, free.findIdx? (· == j) |>.get!, -v) return ofValues vals return ⟨basic, free, ansMatrix⟩ /-- Given matrix `A`, solves the linear equation `A x = 0` and returns the solution as a tableau where some variables are free and others (basic) variable are expressed as linear combinations of the free ones. -/ def getTableau (A : matType n m) : Tableau matType := Id.run do return (← getTableauImp.run A).fst end Linarith.SimplexAlgorithm.Gauss
Tactic\Linarith\Oracle\SimplexAlgorithm\PositiveVector.lean
/- Copyright (c) 2024 Vasily Nesterov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Vasily Nesterov -/ import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm.SimplexAlgorithm import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm.Gauss /-! # `linarith` certificate search as an LP problem `linarith` certificate search can easily be reduced to the following problem: given the matrix `A` and the list `strictIndexes`, find the nonnegative vector `v` such that some of its coordinates from the `strictIndexes` are positive and `A v = 0`. The function `findPositiveVector` solves this problem. # Algorithm sketch 1. We translate the problem stated above to some Linear Programming problem. See `stateLP` for details. Let us denote the corresponding matrix `B`. 2. We solve the equation `B x = 0` using Gauss Elimination, splitting the set of variables into *free* variables, which can take any value, and *basic* variables which are linearly expressed through the free one. This gives us an initial tableau for the Simplex Algorithm. See `Linarith.SimplexAlgorithm.Gauss.getTableau`. 3. We run the Simplex Algorithm until it finds a solution. See the file `SimplexAlgorithm.lean`. -/ namespace Linarith.SimplexAlgorithm variable {matType : Nat → Nat → Type} [UsableInSimplexAlgorithm matType] /-- Given matrix `A` and list `strictIndexes` of strict inequalities' indexes, we want to state the Linear Programming problem which solution would give us a solution for the initial problem (see `findPositiveVector`). As an objective function (that we are trying to maximize) we use sum of coordinates from `strictIndexes`: it suffices to find the nonnegative vector that makes this function positive. We introduce two auxiliary variables and one constraint: * The variable `y` is interpreted as "homogenized" `1`. We need it because dealing with a homogenized problem is easier, but having some "unit" is necessary. * To bound the problem we add the constraint `x₁ + ... + xₘ + z = y` introducing new variable `z`. The objective function also interpreted as an auxiliary variable with constraint `f = ∑ i ∈ strictIndexes, xᵢ`. The variable `f` has to always be basic while `y` has to be free. Our Gauss method implementation greedy collects basic variables moving from left to right. So we place `f` before `x`-s and `y` after them. We place `z` between `f` and `x` because in this case `z` will be basic and `Gauss.getTableau` produce tableau with nonnegative last column, meaning that we are starting from a feasible point. -/ def stateLP {n m : Nat} (A : matType n m) (strictIndexes : List Nat) : matType (n + 2) (m + 3) := /- +2 due to shifting by `f` and `z` -/ let objectiveRow : List (Nat × Nat × Rat) := (0, 0, -1) :: strictIndexes.map fun idx => (0, idx + 2, 1) let constraintRow : List (Nat × Nat × Rat) := [(1, 1, 1), (1, m + 2, -1)] ++ (List.range m).map (fun i => (1, i + 2, 1)) let valuesA := getValues A |>.map fun (i, j, v) => (i + 2, j + 2, v) ofValues (objectiveRow ++ constraintRow ++ valuesA) /-- Extracts target vector from the tableau, putting auxilary variables aside (see `stateLP`). -/ def extractSolution (tableau : Tableau matType) : Array Rat := Id.run do let mut ans : Array Rat := Array.mkArray (tableau.basic.size + tableau.free.size - 3) 0 for i in [1:tableau.basic.size] do ans := ans.set! (tableau.basic[i]! - 2) <| tableau.mat[(i, tableau.free.size - 1)]! return ans /-- Finds a nonnegative vector `v`, such that `A v = 0` and some of its coordinates from `strictCoords` are positive, in the case such `v` exists. If not, throws the error. The latter prevents `linarith` from doing useless post-processing. -/ def findPositiveVector {n m : Nat} {matType : Nat → Nat → Type} [UsableInSimplexAlgorithm matType] (A : matType n m) (strictIndexes : List Nat) : Lean.Meta.MetaM <| Array Rat := do /- State the linear programming problem. -/ let B := stateLP A strictIndexes /- Using Gaussian elimination split variable into free and basic forming the tableau that will be operated by the Simplex Algorithm. -/ let initTableau := Gauss.getTableau B /- Run the Simplex Algorithm and extract the solution. -/ let res := runSimplexAlgorithm.run initTableau if res.fst.isOk then return extractSolution res.snd else throwError "Simplex Algorithm failed"
Tactic\Linarith\Oracle\SimplexAlgorithm\SimplexAlgorithm.lean
/- Copyright (c) 2024 Vasily Nesterov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Vasily Nesterov -/ import Mathlib.Tactic.Linarith.Oracle.SimplexAlgorithm.Datatypes /-! # Simplex Algorithm To obtain required vector in `Linarith.SimplexAlgorithm.findPositiveVector` we run the Simplex Algorithm. We use Bland's rule for pivoting, which guarantees that the algorithm terminates. -/ namespace Linarith.SimplexAlgorithm /-- An exception in the `SimplexAlgorithmM` monad. -/ inductive SimplexAlgorithmException /-- The solution is infeasible. -/ | infeasible : SimplexAlgorithmException /-- The monad for the Simplex Algorithm. -/ abbrev SimplexAlgorithmM (matType : Nat → Nat → Type) [UsableInSimplexAlgorithm matType] := ExceptT SimplexAlgorithmException <| StateM (Tableau matType) variable {matType : Nat → Nat → Type} [UsableInSimplexAlgorithm matType] /-- Given indexes `exitIdx` and `enterIdx` of exiting and entering variables in the `basic` and `free` arrays, performs pivot operation, i.e. expresses one through the other and makes the free one basic and vice versa. -/ def doPivotOperation (exitIdx enterIdx : Nat) : SimplexAlgorithmM matType Unit := modify fun s : Tableau matType => Id.run do let mut mat := s.mat let intersectCoef := mat[(exitIdx, enterIdx)]! for i in [:s.basic.size] do if i == exitIdx then continue let coef := mat[(i, enterIdx)]! / intersectCoef if coef != 0 then mat := subtractRow mat exitIdx i coef mat := setElem mat i enterIdx coef mat := setElem mat exitIdx enterIdx (-1) mat := divideRow mat exitIdx (-intersectCoef) let newBasic := s.basic.set! exitIdx s.free[enterIdx]! let newFree := s.free.set! enterIdx s.basic[exitIdx]! have hb : newBasic.size = s.basic.size := by apply Array.size_setD have hf : newFree.size = s.free.size := by apply Array.size_setD return (⟨newBasic, newFree, hb ▸ hf ▸ mat⟩ : Tableau matType) /-- Check if the solution is found: the objective function is positive and all basic variables are nonnegative. -/ def checkSuccess : SimplexAlgorithmM matType Bool := do let lastIdx := (← get).free.size - 1 return (← get).mat[(0, lastIdx)]! > 0 && (← Nat.allM (← get).basic.size (fun i => do return (← get).mat[(i, lastIdx)]! >= 0)) /-- Chooses an entering variable: among the variables with a positive coefficient in the objective function, the one with the smallest index (in the initial indexing). -/ def chooseEnteringVar : SimplexAlgorithmM matType Nat := do let mut enterIdxOpt : Option Nat := .none -- index of entering variable in the `free` array let mut minIdx := 0 for i in [:(← get).free.size - 1] do if (← get).mat[(0, i)]! > 0 && (enterIdxOpt.isNone || (← get).free[i]! < minIdx) then enterIdxOpt := i minIdx := (← get).free[i]! /- If there is no such variable the solution does not exist for sure. -/ match enterIdxOpt with | .none => throw SimplexAlgorithmException.infeasible | .some enterIdx => return enterIdx /-- Chooses an exiting variable: the variable imposing the strictest limit on the increase of the entering variable, breaking ties by choosing the variable with smallest index. -/ def chooseExitingVar (enterIdx : Nat) : SimplexAlgorithmM matType Nat := do let mut exitIdxOpt : Option Nat := .none -- index of entering variable in the `basic` array let mut minCoef := 0 let mut minIdx := 0 for i in [1:(← get).basic.size] do if (← get).mat[(i, enterIdx)]! >= 0 then continue let lastIdx := (← get).free.size - 1 let coef := -(← get).mat[(i, lastIdx)]! / (← get).mat[(i, enterIdx)]! if exitIdxOpt.isNone || coef < minCoef || (coef == minCoef && (← get).basic[i]! < minIdx) then exitIdxOpt := i minCoef := coef minIdx := (← get).basic[i]! return exitIdxOpt.get! -- such variable always exists because our problem is bounded /-- Chooses entering and exiting variables using (Bland's rule)[(https://en.wikipedia.org/wiki/Bland%27s_rule)] that guarantees that the Simplex Algorithm terminates. -/ def choosePivots : SimplexAlgorithmM matType (Nat × Nat) := do let enterIdx ← chooseEnteringVar let exitIdx ← chooseExitingVar enterIdx return ⟨exitIdx, enterIdx⟩ /-- Runs the Simplex Algorithm inside the `SimplexAlgorithmM`. It always terminates, finding solution if such exists. -/ def runSimplexAlgorithm : SimplexAlgorithmM matType Unit := do while !(← checkSuccess) do let ⟨exitIdx, enterIdx⟩ ← choosePivots doPivotOperation exitIdx enterIdx end Linarith.SimplexAlgorithm
Tactic\Linter\GlobalAttributeIn.lean
/- Copyright (c) 2024 Michael Rothgang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Michael Rothgang, Damiano Testa -/ import Lean.Elab.Command import Lean.Linter.Util /-! # Linter for `attribute [...] in` declarations Linter for global attributes created via `attribute [...] in` declarations. The syntax `attribute [instance] instName in` can be used to accidentally create a global instance. This is **not** obvious from reading the code, and in fact happened twice during the port, hence, we lint against it. *Example*: before this was discovered, `Mathlib/Topology/Category/TopCat/Basic.lean` contained the following code: ``` attribute [instance] ConcreteCategory.instFunLike in instance (X Y : TopCat.{u}) : CoeFun (X ⟶ Y) fun _ => X → Y where coe f := f ``` Despite the `in`, this makes `ConcreteCategory.instFunLike` a global instance. This seems to apply to all attributes. For example: ```lean theorem what : False := sorry attribute [simp] what in #guard true -- the `simp` attribute persists example : False := by simp -- `simp` finds `what` theorem who {x y : Nat} : x = y := sorry attribute [ext] who in #guard true -- the `ext` attribute persists example {x y : Nat} : x = y := by ext ``` Therefore, we lint against this pattern on all instances. For *removing* attributes, the `in` works as expected. ```lean /-- error: failed to synthesize Add Nat -/ #guard_msgs in attribute [-instance] instAddNat in #synth Add Nat -- the `instance` persists /-- info: instAddNat -/ #guard_msgs in #synth Add Nat @[simp] theorem what : False := sorry /-- error: simp made no progress -/ #guard_msgs in attribute [-simp] what in example : False := by simp -- the `simp` attribute persists #guard_msgs in example : False := by simp ``` -/ open Lean Elab Command namespace Mathlib.Linter /-- Lint on any occurrence of `attribute [...] name in` which is not `local` or `scoped`: these are a footgun, as the attribute is applied *globally* (despite the `in`). -/ register_option linter.globalAttributeIn : Bool := { defValue := true descr := "enable the globalAttributeIn linter" } namespace globalAttributeInLinter /-- Gets the value of the `linter.globalAttributeIn` option. -/ def getLinterGlobalAttributeIn (o : Options) : Bool := Linter.getLinterValue linter.globalAttributeIn o /-- `getGlobalAttributesIn? cmd` assumes that `cmd` represents a `attribute [...] id in ...` command. If this is the case, then it returns `(id, #[non-local nor scoped attributes])`. Otherwise, it returns `default`. -/ def getGlobalAttributesIn? : Syntax → Option (Ident × Array (TSyntax `attr)) | `(attribute [$x,*] $id in $_) => let xs := x.getElems.filterMap fun a => match a.raw with | `(Parser.Command.eraseAttr| -$_) => none | `(Parser.Term.attrInstance| local $_attr:attr) => none | `(Parser.Term.attrInstance| scoped $_attr:attr) => none | `(attr| $a) => some a (id, xs) | _ => default /-- The `globalAttributeInLinter` linter flags any global attributes generated by an `attribute [...] in` declaration. (This includes the `instance`, `simp` and `ext` attributes.) Despite the `in`, these define *global* instances, which can be rather misleading. Instead, remove the `in` or mark them with `local`. -/ def globalAttributeIn : Linter where run := withSetOptionIn fun stx => do unless getLinterGlobalAttributeIn (← getOptions) do return if (← MonadState.get).messages.hasErrors then return for s in stx.topDown do if let .some (id, nonScopedNorLocal) := getGlobalAttributesIn? s then for attr in nonScopedNorLocal do Linter.logLint linter.globalAttributeIn attr m! "Despite the `in`, the attribute '{attr}' is added globally to '{id}'\n\ please remove the `in` or make this a `local {attr}`" initialize addLinter globalAttributeIn end globalAttributeInLinter end Mathlib.Linter
Tactic\Linter\HashCommandLinter.lean
/- Copyright (c) 2024 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import Lean.Elab.Command import Lean.Linter.Util import Batteries.Lean.HashSet /-! # `#`-command linter The `#`-command linter produces a warning when a command starting with `#` is used *and* * either the command emits no message; * or `warningAsError` is set to `true`. The rationale behind this is that `#`-commands are intended to be transient: they provide useful information in development, but are not intended to be present in final code. Most of them are noisy and get picked up anyway by CI, but even the quiet ones are not expected to outlive their in-development status. -/ namespace Mathlib.Linter /-- The linter emits a warning on any command beginning with `#` that itself emits no message. For example, `#guard true` and `#check_tactic True ~> True by skip` trigger a message. There is a list of silent `#`-command that are allowed. -/ register_option linter.hashCommand : Bool := { defValue := true descr := "enable the `#`-command linter" } namespace HashCommandLinter open Lean Elab /-- Gets the value of the `linter.hashCommand` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.hashCommand o open Command in /-- Exactly like `withSetOptionIn`, but recursively discards nested uses of `in`. Intended to be used in the `hashCommand` linter, where we want to enter `set_option` `in` commands. -/ private partial def withSetOptionIn' (cmd : CommandElab) : CommandElab := fun stx => do if stx.getKind == ``Lean.Parser.Command.in then if stx[0].getKind == ``Lean.Parser.Command.set_option then let opts ← Elab.elabSetOption stx[0][1] stx[0][3] withScope (fun scope => { scope with opts }) do withSetOptionIn' cmd stx[2] else withSetOptionIn' cmd stx[2] else cmd stx /-- `allowed_commands` is the `HashSet` of `#`-commands that are allowed in 'Mathlib'. -/ private abbrev allowed_commands : HashSet String := { "#adaptation_note" } /-- Checks that no command beginning with `#` is present in 'Mathlib', except for the ones in `allowed_commands`. If `warningAsError` is `true`, then the linter logs an info (rather than a warning). This means that CI will eventually fail on `#`-commands, but does not stop it from continuing. However, in order to avoid local clutter, when `warningAsError` is `false`, the linter logs a warning only for the `#`-commands that do not already emit a message. -/ def hashCommandLinter : Linter where run := withSetOptionIn' fun stx => do let mod := (← getMainModule).components if getLinterHash (← getOptions) && ((← get).messages.toList.isEmpty || warningAsError.get (← getOptions)) && -- we check that the module is either not in `test` or, is `test.HashCommandLinter` (mod.getD 0 default != `test || (mod == [`test, `HashCommandLinter])) then if let some sa := stx.getHead? then let a := sa.getAtomVal if (a.get ⟨0⟩ == '#' && ! allowed_commands.contains a) then let msg := m!"`#`-commands, such as '{a}', are not allowed in 'Mathlib'" if warningAsError.get (← getOptions) then logInfoAt sa (msg ++ " [linter.hashCommand]") else Linter.logLint linter.hashCommand sa msg initialize addLinter hashCommandLinter end HashCommandLinter
Tactic\Linter\HaveLetLinter.lean
/- Copyright (c) 2024 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import Lean.Elab.Command import Lean.Server.InfoUtils /-! # The `have` vs `let` linter The `have` vs `let` linter flags uses of `have` to introduce a hypothesis whose Type is not `Prop`. The option for this linter is a natural number, but really there are only 3 settings: * `0` -- inactive; * `1` -- active only on noisy declarations; * `2` or more -- always active. TODO: * Also lint `let` vs `have`. * `haveI` may need to change to `let/letI`? * `replace`, `classical!`, `classical`, `tauto` internally use `have`: should the linter act on them as well? -/ open Lean Elab Command Meta namespace Mathlib.Linter /-- The `have` vs `let` linter emits a warning on `have`s introducing a hypothesis whose Type is not `Prop`. There are three settings: * `0` -- inactive; * `1` -- active only on noisy declarations; * `2` or more -- always active. The default value is `1`. -/ register_option linter.haveLet : Nat := { defValue := 1 descr := "enable the `have` vs `let` linter:\n\ * 0 -- inactive;\n\ * 1 -- active only on noisy declarations;\n\ * 2 or more -- always active." } namespace haveLet /-- find the `have` syntax. -/ partial def isHave? : Syntax → Bool | .node _ ``Lean.Parser.Tactic.tacticHave_ _ => true |_ => false end haveLet end Mathlib.Linter namespace Mathlib.Linter.haveLet /-- a monadic version of `Lean.Elab.InfoTree.foldInfo`. Used to infer types inside a `CommandElabM`. -/ def InfoTree.foldInfoM {α m} [Monad m] (f : ContextInfo → Info → α → m α) (init : α) : InfoTree → m α := InfoTree.foldInfo (fun ctx i ma => do f ctx i (← ma)) (pure init) /-- given a `ContextInfo`, a `LocalContext` and an `Array` of `Expr`essions `es`, `areProp_toFormat` creates a `MetaM` context, and returns an array of pairs consisting of * a `Bool`ean, answering the question of whether the Type of `e` is a `Prop` or not, and * the pretty-printed `Format` of `e` for each `Expr`ession `e` in `es`. Concretely, `areProp_toFormat` runs `inferType` in `CommandElabM`. This is the kind of monadic lift that `nonPropHaves` uses to decide whether the Type of a `have` is in `Prop` or not. The output `Format` is just so that the linter displays a better message. -/ def areProp_toFormat (ctx : ContextInfo) (lc : LocalContext) (es : Array Expr) : CommandElabM (Array (Bool × Format)) := do ctx.runMetaM lc do es.mapM fun e => do let typ ← inferType (← instantiateMVars e) return (typ.isProp, ← ppExpr e) /-- returns the `have` syntax whose corresponding hypothesis does not have Type `Prop` and also a `Format`ted version of the corresponding Type. -/ partial def nonPropHaves : InfoTree → CommandElabM (Array (Syntax × Format)) := InfoTree.foldInfoM (init := #[]) fun ctx info args => return args ++ (← do let .ofTacticInfo i := info | return #[] let stx := i.stx let .original .. := stx.getHeadInfo | return #[] unless isHave? stx do return #[] let mctx := i.mctxAfter let mvdecls := (i.goalsAfter.map (mctx.decls.find? ·)).reduceOption -- we extract the `MetavarDecl` with largest index after a `have`, since this one -- holds information about the metavariable where `have` introduces the new hypothesis. let largestIdx := mvdecls.toArray.qsort (·.index > ·.index) -- the relevant `LocalContext` let lc := (largestIdx.getD 0 default).lctx -- we also accumulate all `fvarId`s from all local contexts before the use of `have` -- so that we can then isolate the `fvarId`s that are created by `have` let oldMvdecls := (i.goalsBefore.map (mctx.decls.find? ·)).reduceOption let oldLctx := oldMvdecls.map (·.lctx) let oldDecls := (oldLctx.map (·.decls.toList.reduceOption)).join let oldFVars := oldDecls.map (·.fvarId) -- `newDecls` are the local declarations whose `FVarID` did not exist before the `have` -- effectively they are the declarations that we want to test for being in `Prop` or not. let newDecls := lc.decls.toList.reduceOption.filter (! oldFVars.contains ·.fvarId) -- now, we get the `MetaM` state up and running to find the types of each entry of `newDecls` let fmts ← areProp_toFormat ctx lc (newDecls.map (·.type)).toArray let (_propFmts, typeFmts) := (fmts.zip (newDecls.map (·.userName)).toArray).partition (·.1.1) -- everything that is a Type triggers a warning on `have` return typeFmts.map fun ((_, fmt), na) => (stx, f!"{na} : {fmt}")) /-- The main implementation of the `have` vs `let` linter. -/ def haveLetLinter : Linter where run := withSetOptionIn fun _stx => do let gh := linter.haveLet.get (← getOptions) unless gh != 0 && (← getInfoState).enabled do return unless gh == 1 && (← MonadState.get).messages.unreported.isEmpty do let trees ← getInfoTrees for t in trees.toArray do for (s, fmt) in ← nonPropHaves t do -- Since the linter option is not in `Bool`, the standard `Linter.logLint` does not work. -- We emulate it with `logWarningAt` logWarningAt s <| .tagged linter.haveLet.name m!"'{fmt}' is a Type and not a Prop. Consider using 'let' instead of 'have'.\n\ You can disable this linter using `set_option linter.haveLet 0`" initialize addLinter haveLetLinter
Tactic\Linter\Lint.lean
/- Copyright (c) 2023 Floris van Doorn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Floris van Doorn -/ import Lean.Linter.Util import Batteries.Data.String.Matcher import Batteries.Tactic.Lint /-! # Linters for Mathlib In this file we define additional linters for mathlib. Perhaps these should be moved to Batteries in the future. -/ namespace Std.Tactic.Lint open Lean Meta /-- Linter that checks whether a structure should be in Prop. -/ @[env_linter] def structureInType : Linter where noErrorsFound := "no structures that should be in Prop found." errorsFound := "FOUND STRUCTURES THAT SHOULD BE IN PROP." test declName := do unless isStructure (← getEnv) declName do return none -- remark: using `Lean.Meta.isProp` doesn't suffice here, because it doesn't (always?) -- recognize predicates as propositional. let isProp ← forallTelescopeReducing (← inferType (← mkConstWithLevelParams declName)) fun _ ty => return ty == .sort .zero if isProp then return none let projs := (getStructureInfo? (← getEnv) declName).get!.fieldNames if projs.isEmpty then return none -- don't flag empty structures let allProofs ← projs.allM (do isProof <| ← mkConstWithLevelParams <| declName ++ ·) unless allProofs do return none return m!"all fields are propositional but the structure isn't." /-- Linter that check that all `deprecated` tags come with `since` dates. -/ @[env_linter] def deprecatedNoSince : Linter where noErrorsFound := "no `deprecated` tags without `since` dates." errorsFound := "FOUND `deprecated` tags without `since` dates." test declName := do let some info := Lean.Linter.deprecatedAttr.getParam? (← getEnv) declName | return none match info.since? with | some _ => return none -- TODO: enforce `YYYY-MM-DD` format | none => return m!"`deprecated` attribute without `since` date" end Std.Tactic.Lint namespace Mathlib.Linter /-! # `dupNamespace` linter The `dupNamespace` linter produces a warning when a declaration contains the same namespace at least twice consecutively. For instance, `Nat.Nat.foo` and `One.two.two` trigger a warning, while `Nat.One.Nat` does not. -/ /-- The `dupNamespace` linter is set on by default. Lean emits a warning on any declaration that contains the same namespace at least twice consecutively. For instance, `Nat.Nat.foo` and `One.two.two` trigger a warning, while `Nat.One.Nat` does not. *Note.* This linter will not detect duplication in namespaces of autogenerated declarations (other than the one whose `declId` is present in the source declaration). -/ register_option linter.dupNamespace : Bool := { defValue := true descr := "enable the duplicated namespace linter" } namespace DupNamespaceLinter open Lean Parser Elab Command Meta /-- Gets the value of the `linter.dupNamespace` option. -/ def getLinterDupNamespace (o : Options) : Bool := Linter.getLinterValue linter.dupNamespace o /-- `getIds stx` extracts the `declId` nodes from the `Syntax` `stx`. If `stx` is an `alias` or an `export`, then it extracts an `ident`, instead of a `declId`. -/ partial def getIds : Syntax → Array Syntax | .node _ `Batteries.Tactic.Alias.alias args => args[2:3] | .node _ ``Lean.Parser.Command.export args => (args[3:4] : Array Syntax).map (·[0]) | stx@(.node _ _ args) => ((args.attach.map fun ⟨a, _⟩ => getIds a).foldl (· ++ ·) #[stx]).filter (·.getKind == ``declId) | _ => default @[inherit_doc linter.dupNamespace] def dupNamespace : Linter where run := withSetOptionIn fun stx => do if getLinterDupNamespace (← getOptions) then match getIds stx with | #[id] => let ns := (← getScope).currNamespace let declName := ns ++ (if id.getKind == ``declId then id[0].getId else id.getId) let nm := declName.components let some (dup, _) := nm.zip (nm.tailD []) |>.find? fun (x, y) => x == y | return Linter.logLint linter.dupNamespace id m!"The namespace '{dup}' is duplicated in the declaration '{declName}'" | _ => return initialize addLinter dupNamespace end DupNamespaceLinter /-! # The "missing end" linter The "missing end" linter emits a warning on non-closed `section`s and `namespace`s. It allows the "outermost" `noncomputable section` to be left open (whether or not it is named). -/ open Lean Elab Command /-- The "missing end" linter emits a warning on non-closed `section`s and `namespace`s. It allows the "outermost" `noncomputable section` to be left open (whether or not it is named). -/ register_option linter.missingEnd : Bool := { defValue := true descr := "enable the missing end linter" } namespace MissingEnd /-- Gets the value of the `linter.missingEnd` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.missingEnd o @[inherit_doc Mathlib.Linter.linter.missingEnd] def missingEndLinter : Linter where run := withSetOptionIn fun stx ↦ do -- Only run this linter at the end of a module. unless stx.isOfKind ``Lean.Parser.Command.eoi do return -- TODO: once mathlib's Lean version includes leanprover/lean4#4741, make this configurable unless #[`Mathlib, `test, `Archive, `Counterexamples].contains (← getMainModule).getRoot do return if getLinterHash (← getOptions) && !(← MonadState.get).messages.hasErrors then let sc ← getScopes -- The last scope is always the "base scope", corresponding to no active `section`s or -- `namespace`s. We are interested in any *other* unclosed scopes. if sc.length == 1 then return let ends := sc.dropLast.map fun s ↦ (s.header, s.isNoncomputable) -- If the outermost scope corresponds to a `noncomputable section`, we ignore it. let ends := if ends.getLast!.2 then ends.dropLast else ends -- If there are any further un-closed scopes, we emit a warning. if !ends.isEmpty then let ending := (ends.map Prod.fst).foldl (init := "") fun a b ↦ a ++ s!"\n\nend{if b == "" then "" else " "}{b}" Linter.logLint linter.missingEnd stx m!"unclosed sections or namespaces; expected: '{ending}'" initialize addLinter missingEndLinter end MissingEnd /-! # The `cdot` linter The `cdot` linter is a syntax-linter that flags uses of the "cdot" `·` that are achieved by typing a character different from `·`. For instance, a "plain" dot `.` is allowed syntax, but is flagged by the linter. -/ /-- The `cdot` linter flags uses of the "cdot" `·` that are achieved by typing a character different from `·`. For instance, a "plain" dot `.` is allowed syntax, but is flagged by the linter. -/ register_option linter.cdot : Bool := { defValue := true descr := "enable the `cdot` linter" } /-- `isCDot? stx` checks whether `stx` is a `Syntax` node corresponding to a `cdot` typed with the character `·`. -/ def isCDot? : Syntax → Bool | .node _ ``cdotTk #[.node _ `patternIgnore #[.node _ _ #[.atom _ v]]] => v == "·" | .node _ ``Lean.Parser.Term.cdot #[.atom _ v] => v == "·" | _ => false /-- `findCDot stx` extracts from `stx` the syntax nodes of `kind` `Lean.Parser.Term.cdot` or `cdotTk`. -/ partial def findCDot : Syntax → Array Syntax | stx@(.node _ kind args) => let dargs := (args.map findCDot).flatten match kind with | ``Lean.Parser.Term.cdot | ``cdotTk=> dargs.push stx | _ => dargs |_ => #[] /-- `unwanted_cdot stx` returns an array of syntax atoms within `stx` corresponding to `cdot`s that are not written with the character `·`. This is precisely what the `cdot` linter flags. -/ def unwanted_cdot (stx : Syntax) : Array Syntax := (findCDot stx).filter (!isCDot? ·) namespace CDotLinter /-- Gets the value of the `linter.generic` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.cdot o @[inherit_doc linter.cdot] def cdotLinter : Linter where run := withSetOptionIn fun stx => do unless getLinterHash (← getOptions) do return if (← MonadState.get).messages.hasErrors then return for s in unwanted_cdot stx do Linter.logLint linter.cdot s m!"Please, use '·' (typed as `\\·`) instead of '{s}' as 'cdot'." initialize addLinter cdotLinter end CDotLinter /-! # The "longLine linter" -/ /-- The "longLine" linter emits a warning on lines longer than 100 characters. We allow lines containing URLs to be longer, though. -/ register_option linter.longLine : Bool := { defValue := true descr := "enable the longLine linter" } namespace LongLine /-- Gets the value of the `linter.longLine` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.longLine o @[inherit_doc Mathlib.Linter.linter.longLine] def longLineLinter : Linter where run := withSetOptionIn fun stx ↦ do unless getLinterHash (← getOptions) do return if (← MonadState.get).messages.hasErrors then return -- TODO: once mathlib's Lean version includes leanprover/lean4#4741, make this configurable unless #[`Mathlib, `test, `Archive, `Counterexamples].contains (← getMainModule).getRoot do return -- The linter ignores the `#guard_msgs` command, in particular its doc-string. -- The linter still lints the message guarded by `#guard_msgs`. if stx.isOfKind ``Lean.guardMsgsCmd then return -- if the linter reached the end of the file, then we scan the `import` syntax instead let stx := ← do if stx.isOfKind ``Lean.Parser.Command.eoi then let fname ← getFileName if !(← System.FilePath.pathExists fname) then return default let contents ← IO.FS.readFile fname -- `impMods` is the syntax for the modules imported in the current file let (impMods, _) ← Parser.parseHeader (Parser.mkInputContext contents fname) return impMods else return stx let sstr := stx.getSubstring? let fm ← getFileMap let longLines := ((sstr.getD default).splitOn "\n").filter fun line => (100 < (fm.toPosition line.stopPos).column) for line in longLines do if !(line.containsSubstr "http") then Linter.logLint linter.longLine (.ofRange ⟨line.startPos, line.stopPos⟩) m!"This line exceeds the 100 character limit, please shorten it!" initialize addLinter longLineLinter end LongLine end Mathlib.Linter
Tactic\Linter\MinImports.lean
/- Copyright (c) 2024 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import ImportGraph.Imports import Mathlib.Tactic.MinImports /-! # The `minImports` linter The `minImports` linter incrementally computes the minimal imports needed for each file to build. Whenever it detects that a new command requires an increase in the (transitive) imports that it computed so far, it emits a warning mentioning the bigger minimal imports. Unlike the related `#min_imports` command, the linter takes into account notation and tactic information. It also works incrementally, accumulating increasing import information. This is better suited, for instance, to split files. -/ open Lean Elab Command /-! # The "minImports" linter The "minImports" linter tracks information about minimal imports over several commands. -/ namespace Mathlib.Linter /-- `minImportsRef` keeps track of cumulative imports across multiple commands. -/ initialize minImportsRef : IO.Ref NameSet ← IO.mkRef {} /-- `#reset_min_imports` sets to empty the current list of cumulative imports. -/ elab "#reset_min_imports" : command => minImportsRef.set {} /-- The `minImports` linter incrementally computes the minimal imports needed for each file to build. Whenever it detects that a new command requires an increase in the (transitive) imports that it computed so far, it emits a warning mentioning the bigger minimal imports. Unlike the related `#min_imports` command, the linter takes into account notation and tactic information. It also works incrementally, providing information that it better suited, for instance, to split files. -/ register_option linter.minImports : Bool := { defValue := false descr := "enable the minImports linter" } namespace MinImports open Mathlib.Command.MinImports /-- Gets the value of the `linter.minImports` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.minImports o @[inherit_doc Mathlib.Linter.linter.minImports] def minImportsLinter : Linter where run := withSetOptionIn fun stx => do unless linter.minImports.get (← getOptions) do return if (← MonadState.get).messages.hasErrors then return if stx == (← `(command| set_option $(mkIdent `linter.minImports) true)) then return let importsSoFar ← minImportsRef.get -- when the linter reaches the end of the file or `#exit`, it gives a report if #[``Parser.Command.eoi, ``Lean.Parser.Command.exit].contains stx.getKind then let explicitImportsInFile : NameSet := .fromArray (((← getEnv).imports.map (·.module)).erase `Init) Name.quickCmp let newImps := importsSoFar.diff explicitImportsInFile let currentlyUnneededImports := explicitImportsInFile.diff importsSoFar -- we read the current file, to do a custom parsing of the imports: -- this is a hack to obtain some `Syntax` information for the `import X` commands let fname ← getFileName let contents ← IO.FS.readFile fname -- `impMods` is the syntax for the modules imported in the current file let (impMods, _) ← Parser.parseHeader (Parser.mkInputContext contents fname) for i in currentlyUnneededImports do match impMods.find? (·.getId == i) with | some impPos => logWarningAt impPos m!"unneeded import '{i}'" | _ => dbg_trace f!"'{i}' not found" -- this should be unreachable -- if the linter found new imports that should be added (likely to *reduce* the dependencies) if !newImps.isEmpty then -- format the imports prepending `import ` to each module name let withImport := (newImps.toArray.qsort Name.lt).map (s!"import {·}") -- log a warning at the first `import`, if there is one. logWarningAt ((impMods.find? (·.isOfKind `import)).getD default) m!"-- missing imports\n{"\n".intercalate withImport.toList}" let id ← getId stx let newImports := getIrredundantImports (← getEnv) (← getAllImports stx id) let tot := (newImports.append importsSoFar) let redundant := (← getEnv).findRedundantImports tot.toArray let currImports := tot.diff redundant let currImpArray := currImports.toArray.qsort Name.lt if currImpArray != #[] && currImpArray ≠ importsSoFar.toArray.qsort Name.lt then minImportsRef.modify fun _ => currImports Linter.logLint linter.minImports stx m!"Imports increased to\n{currImpArray}" initialize addLinter minImportsLinter end MinImports end Mathlib.Linter
Tactic\Linter\OldObtain.lean
/- Copyright (c) 2024 Michael Rothgang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Michael Rothgang -/ import Lean.Elab.Command import Lean.Linter.Util /-! # The `oldObtain` linter, against stream-of-conciousness `obtain` The `oldObtain` linter flags any occurrences of "stream-of-conciousness" `obtain`, i.e. uses of the `obtain` tactic which do not immediately provide a proof. ## Example There are six different kinds of `obtain` uses. In one example, they look like this. ``` theorem foo : True := by -- These cases are fine. obtain := trivial obtain h := trivial obtain : True := trivial obtain h : True := trivial -- These are linted against. obtain : True · trivial obtain h : True · trivial ``` We allow the first four (since an explicit proof is provided), but lint against the last two. ## Why is this bad? This is similar to removing all uses of `Tactic.Replace` and `Tactic.Have` from mathlib: in summary, - this version is a Lean3-ism, which can be unlearned now - the syntax `obtain foo : type := proof` is slightly shorter; particularly so when the first tactic of the proof is `exact` - when using the old syntax as `obtain foo : type; · proof`, there is an intermediate state with multiple goals right before the focusing dot. This can be confusing. (This gets amplified with the in-flight "multiple goal linter", which seems generally desired --- for many reasons, including teachability. Granted, the linter could be tweaked to not lint in this case... but by now, the "old" syntax is not clearly better.) - the old syntax *could* be slightly nicer when deferring goals: however, this is rare. In the 30 replacements of the last PR, this occurred twice. In both cases, the `suffices` tactic could also be used, as was in fact clearer. -/ open Lean Elab namespace Mathlib.Linter.Style /-- Whether a syntax element is an `obtain` tactic call without a provided proof. -/ def is_obtain_without_proof : Syntax → Bool -- Using the `obtain` tactic without a proof requires proving a type; -- a pattern is optional. | `(tactic|obtain : $_type) | `(tactic|obtain $_pat : $_type) => true | _ => false /-- The `oldObtain` linter emits a warning upon uses of the "stream-of-conciousness" variants of the `obtain` tactic, i.e. with the proof postponed. -/ register_option linter.oldObtain : Bool := { defValue := true descr := "enable the `oldObtain` linter" } /-- Gets the value of the `linter.oldObtain` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.oldObtain o /-- The `oldObtain` linter: see docstring above -/ def oldObtainLinter : Linter where run := withSetOptionIn fun stx => do unless getLinterHash (← getOptions) do return if (← MonadState.get).messages.hasErrors then return if let some head := stx.find? is_obtain_without_proof then Linter.logLint linter.oldObtain head m!"Please remove stream-of-conciousness `obtain` syntax" initialize addLinter oldObtainLinter end Mathlib.Linter.Style
Tactic\Linter\RefineLinter.lean
/- Copyright (c) 2024 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import Lean.Elab.Command import Lean.Linter.Util /-! # The "refine" linter The "refine" linter flags usages of the `refine'` tactic. The tactics `refine` and `refine'` are similar, but they handle meta-variables slightly differently. This means that they are not completely interchangeable, nor can one completely replace the other. However, `refine` is more readable and (heuristically) tends to be more efficient on average. This linter is an incentive to discourage uses of `refine'`, without being a ban. -/ open Lean Elab namespace Mathlib.Linter.refine /-- The refine linter emits a warning on usages of `refine'`. -/ register_option linter.refine : Bool := { defValue := true descr := "enable the refine linter" } /-- `getRefine' t` returns all usages of the `refine'` tactic in the input syntax `t`. -/ partial def getRefine' : Syntax → Array Syntax | stx@(.node _ kind args) => let rargs := (args.map getRefine').flatten if kind == ``Lean.Parser.Tactic.refine' then rargs.push stx else rargs | _ => default /-- The "refine" linter flags usages of the `refine'` tactic. The tactics `refine` and `refine'` are similar, but they handle meta-variables slightly differently. This means that they are not completely interchangeable, nor can one completely replace the other. However, `refine` is more readable and (heuristically) tends to be more efficient on average. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.refine o @[inherit_doc getLinterHash] def refineLinter : Linter where run := withSetOptionIn fun _stx => do unless getLinterHash (← getOptions) do return if (← MonadState.get).messages.hasErrors then return for stx in (getRefine' _stx) do Linter.logLint linter.refine stx "The `refine'` tactic is discouraged: \ please strongly consider using `refine` or `apply` instead." initialize addLinter refineLinter
Tactic\Linter\Style.lean
/- Copyright (c) 2024 Michael Rothgang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Michael Rothgang -/ import Lean.Elab.Command import Lean.Linter.Util /-! ## Style linters This file contains (currently one, eventually more) linters about stylistic aspects: these are only about coding style, but do not affect correctness nor global coherence of mathlib. Historically, these were ported from the `lint-style.py` Python script. -/ open Lean Elab Command namespace Mathlib.Linter /-- The `setOption` linter emits a warning on a `set_option` command, term or tactic which sets a `pp`, `profiler` or `trace` option. -/ register_option linter.setOption : Bool := { defValue := true descr := "enable the `setOption` linter" } namespace Style.SetOption /-- Whether a syntax element is a `set_option` command, tactic or term: Return the name of the option being set, if any. -/ def parse_set_option : Syntax → Option Name -- This handles all four possibilities of `_val`: a string, number, `true` and `false`. | `(command|set_option $name:ident $_val) => some name.getId | `(set_option $name:ident $_val in $_x) => some name.getId | `(tactic|set_option $name:ident $_val in $_x) => some name.getId | _ => none /-- Whether a given piece of syntax is a `set_option` command, tactic or term. -/ def is_set_option : Syntax → Bool := fun stx ↦ parse_set_option stx matches some _name /-- Gets the value of the `linter.setOption` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.setOption o /-- The `setOption` linter: this lints any `set_option` command, term or tactic which sets a `pp`, `profiler` or `trace` option. **Why is this bad?** These options are good for debugging, but should not be used in production code. **How to fix this?** Remove these options: usually, they are not necessary for production code. (Some tests will intentionally use one of these options; in this case, simply allow the linter.) -/ def setOptionLinter : Linter where run := withSetOptionIn fun stx => do unless getLinterHash (← getOptions) do return if (← MonadState.get).messages.hasErrors then return -- TODO: once mathlib's Lean version includes leanprover/lean4#4741, make this configurable unless #[`Mathlib, `test, `Archive, `Counterexamples].contains (← getMainModule).getRoot do return if let some head := stx.find? is_set_option then if let some name := parse_set_option head then let forbidden := [`debug, `pp, `profiler, `trace] if forbidden.contains name.getRoot then Linter.logLint linter.setOption head m!"Setting options starting with '{"', '".intercalate (forbidden.map (·.toString))}' \ is only intended for development and not for final code. \ If you intend to submit this contribution to the Mathlib project, \ please remove 'set_option {name}'." initialize addLinter setOptionLinter end Style.SetOption end Mathlib.Linter
Tactic\Linter\TextBased.lean
/- Copyright (c) 2024 Michael Rothgang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Michael Rothgang -/ import Batteries.Data.String.Matcher import Mathlib.Data.Nat.Notation /-! ## Text-based linters This file defines various mathlib linters which are based on reading the source code only. In practice, only style linters will have this form. All of these have been rewritten from the `lint-style.py` script. For now, this only contains the linters for the copyright and author headers and large files: further linters will be ported in subsequent PRs. An executable running all these linters is defined in `scripts/lint_style.lean`. -/ open System /-- Different kinds of "broad imports" that are linted against. -/ inductive BroadImports /-- Importing the entire "Mathlib.Tactic" folder -/ | TacticFolder /-- Importing any module in `Lake`, unless carefully measured This has caused unexpected regressions in the past. -/ | Lake deriving BEq /-- Possible errors that text-based linters can report. -/ -- We collect these in one inductive type to centralise error reporting. inductive StyleError where /-- Missing or malformed copyright header. Unlike in the python script, we may provide some context on the actual error. -/ | copyright (context : Option String) /-- Malformed authors line in the copyright header -/ | authors /-- The bare string "Adaptation note" (or variants thereof): instead, the #adaptation_note command should be used. -/ | adaptationNote /-- Lint against "too broad" imports, such as `Mathlib.Tactic` or any module in `Lake` (unless carefully measured) -/ | broadImport (module : BroadImports) /-- Line longer than 100 characters -/ | lineLength (actual : Int) : StyleError /-- The current file was too large: this error contains the current number of lines as well as a size limit (slightly larger). On future runs, this linter will allow this file to grow up to this limit. For diagnostic purposes, this may also contain a previous size limit, which is now exceeded. -/ | fileTooLong (number_lines : ℕ) (new_size_limit : ℕ) (previousLimit : Option ℕ) : StyleError deriving BEq /-- How to format style errors -/ inductive ErrorFormat /-- Produce style error output aimed at humans: no error code, clickable file name -/ | humanReadable : ErrorFormat /-- Produce an entry in the style-exceptions file: mention the error code, slightly uglier than humand-readable output -/ | exceptionsFile : ErrorFormat /-- Produce output suitable for Github error annotations: in particular, duplicate the file path, line number and error code -/ | github : ErrorFormat deriving BEq /-- Create the underlying error message for a given `StyleError`. -/ def StyleError.errorMessage (err : StyleError) (style : ErrorFormat) : String := match err with | StyleError.copyright (some context) => s!"Malformed or missing copyright header: {context}" | StyleError.copyright none => "Malformed or missing copyright header" | StyleError.authors => "Authors line should look like: 'Authors: Jean Dupont, Иван Иванович Иванов'" | StyleError.adaptationNote => "Found the string \"Adaptation note:\", please use the #adaptation_note command instead" | StyleError.broadImport BroadImports.TacticFolder => "Files in mathlib cannot import the whole tactic folder" | StyleError.broadImport BroadImports.Lake => "In the past, importing 'Lake' in mathlib has led to dramatic slow-downs of the linter (see \ e.g. mathlib4#13779). Please consider carefully if this import is useful and make sure to \ benchmark it. If this is fine, feel free to allow this linter." | StyleError.lineLength n => s!"Line has {n} characters, which is more than 100" | StyleError.fileTooLong current_size size_limit previousLimit => match style with | ErrorFormat.github => if let some n := previousLimit then s!"file contains {current_size} lines (at most {n} allowed), try to split it up" else s!"file contains {current_size} lines, try to split it up" | ErrorFormat.exceptionsFile => s!"{size_limit} file contains {current_size} lines, try to split it up" | ErrorFormat.humanReadable => s!"file contains {current_size} lines, try to split it up" /-- The error code for a given style error. Keep this in sync with `parse?_errorContext` below! -/ -- FUTURE: we're matching the old codes in `lint-style.py` for compatibility; -- in principle, we could also print something more readable. def StyleError.errorCode (err : StyleError) : String := match err with | StyleError.copyright _ => "ERR_COP" | StyleError.authors => "ERR_AUT" | StyleError.adaptationNote => "ERR_ADN" | StyleError.broadImport _ => "ERR_IMP" | StyleError.lineLength _ => "ERR_LIN" | StyleError.fileTooLong _ _ _ => "ERR_NUM_LIN" /-- Context for a style error: the actual error, the line number in the file we're reading and the path to the file. -/ structure ErrorContext where /-- The underlying `StyleError`-/ error : StyleError /-- The line number of the error (1-based) -/ lineNumber : ℕ /-- The path to the file which was linted -/ path : FilePath /-- Possible results of comparing an `ErrorContext` to an `existing` entry: most often, they are different --- if the existing entry covers the new exception, depending on the error, we prefer the new or the existing entry. -/ inductive ComparisonResult /-- The contexts describe different errors: two separate style exceptions are required to cover both. -/ | Different /-- The existing exception also covers the new error. Indicate whether we prefer keeping the existing exception (the more common case) or would rather replace it by the new exception (this is more rare, and currently only happens for particular file length errors). -/ | Comparable (preferExisting : Bool) deriving BEq /-- Determine whether a `new` `ErrorContext` is covered by an `existing` exception, and, if it is, if we prefer replacing the new exception or keeping the previous one. -/ def compare (existing new : ErrorContext) : ComparisonResult := -- Two comparable error contexts must have the same path. if existing.path != new.path then ComparisonResult.Different -- We entirely ignore their line numbers: not sure if this is best. -- NB: keep the following in sync with `parse?_errorContext` below. -- Generally, comparable errors must have equal `StyleError`s, but there are some exceptions. else match (existing.error, new.error) with -- File length errors are the biggest exceptions: generally, we prefer to keep the -- existing entry, *except* when a newer entry is much shorter. | (StyleError.fileTooLong n nLimit _, StyleError.fileTooLong m _mLimit _) => -- The only exception are "file too long" errors. -- If a file got much longer, the existing exception does not apply; if m > nLimit then ComparisonResult.Different -- if it does apply, we prefer to keep the existing entry, -- *unless* the newer entry is much shorter. else if m + 200 <= n then ComparisonResult.Comparable false else ComparisonResult.Comparable true -- We do *not* care about the *kind* of wrong copyright, -- nor about the particular length of a too long line. | (StyleError.copyright _, StyleError.copyright _) => ComparisonResult.Comparable true | (StyleError.lineLength _, StyleError.lineLength _) => ComparisonResult.Comparable true -- In all other cases, `StyleErrors` must compare equal. | (a, b) => if a == b then ComparisonResult.Comparable true else ComparisonResult.Different /-- Find the first style exception in `exceptions` (if any) which covers a style exception `e`. -/ def ErrorContext.find?_comparable (e : ErrorContext) (exceptions : Array ErrorContext) : Option ErrorContext := (exceptions).find? (fun new ↦ compare e new matches ComparisonResult.Comparable _) /-- Output the formatted error message, containing its context. `style` specifies if the error should be formatted for humans to read, github problem matchers to consume, or for the style exceptions file. -/ def outputMessage (errctx : ErrorContext) (style : ErrorFormat) : String := let error_message := errctx.error.errorMessage style match style with | ErrorFormat.github => -- We are outputting for github: duplicate file path, line number and error code, -- so that they are also visible in the plain text output. let path := errctx.path let nr := errctx.lineNumber let code := errctx.error.errorCode s!"::ERR file={path},line={nr},code={code}::{path}:{nr} {code}: {error_message}" | ErrorFormat.exceptionsFile => -- Produce an entry in the exceptions file: with error code and "line" in front of the number. s!"{errctx.path} : line {errctx.lineNumber} : {errctx.error.errorCode} : {error_message}" | ErrorFormat.humanReadable => -- Print for humans: clickable file name and omit the error code s!"error: {errctx.path}:{errctx.lineNumber}: {error_message}" /-- Try parsing an `ErrorContext` from a string: return `some` if successful, `none` otherwise. -/ def parse?_errorContext (line : String) : Option ErrorContext := Id.run do let parts := line.split (· == ' ') match parts with | filename :: ":" :: "line" :: line_number :: ":" :: error_code :: ":" :: error_message => -- Turn the filename into a path. In general, this is ambiguous if we don't know if we're -- dealing with e.g. Windows or POSIX paths. In our setting, this is fine, since no path -- component contains any path separator. let path := mkFilePath (filename.split (FilePath.pathSeparators.contains ·)) -- Parse the error kind from the error code, ugh. -- NB: keep this in sync with `StyleError.errorCode` above! let err : Option StyleError := match error_code with -- Use default values for parameters which are ignored for comparing style exceptions. -- NB: keep this in sync with `compare` above! | "ERR_COP" => some (StyleError.copyright none) | "ERR_LIN" => if let some n := error_message.get? 2 then match String.toNat? n with | some n => return StyleError.lineLength n | none => none else none | "ERR_AUT" => some (StyleError.authors) | "ERR_ADN" => some (StyleError.adaptationNote) | "ERR_IMP" => -- XXX tweak exceptions messages to ease parsing? if (error_message.get! 0).containsSubstr "tactic" then some (StyleError.broadImport BroadImports.TacticFolder) else some (StyleError.broadImport BroadImports.Lake) | "ERR_NUM_LIN" => -- Parse the error message in the script. `none` indicates invalid input. match (error_message.get? 0, error_message.get? 3) with | (some limit, some current) => match (String.toNat? limit, String.toNat? current) with | (some size_limit, some current_size) => some (StyleError.fileTooLong current_size size_limit (some size_limit)) | _ => none | _ => none | _ => none match String.toNat? line_number with | some n => err.map fun e ↦ (ErrorContext.mk e n path) | _ => none -- It would be nice to print an error on any line which doesn't match the above format, -- but is awkward to do so (this `def` is not in any IO monad). Hopefully, this is not necessary -- anyway as the style exceptions file is mostly automatically generated. | _ => none /-- Parse all style exceptions for a line of input. Return an array of all exceptions which could be parsed: invalid input is ignored. -/ def parseStyleExceptions (lines : Array String) : Array ErrorContext := Id.run do -- We treat all lines starting with "--" as a comment and ignore them. Array.filterMap (parse?_errorContext ·) (lines.filter (fun line ↦ !line.startsWith "--")) /-- Print information about all errors encountered to standard output. `style` specifies if the error should be formatted for humans to read, github problem matchers to consume, or for the style exceptions file. -/ def formatErrors (errors : Array ErrorContext) (style : ErrorFormat) : IO Unit := do for e in errors do IO.println (outputMessage e style) /-- Core logic of a text based linter: given a collection of lines, return an array of all style errors with line numbers. -/ abbrev TextbasedLinter := Array String → Array (StyleError × ℕ) /-! Definitions of the actual text-based linters. -/ section /-- Return if `line` looks like a correct authors line in a copyright header. -/ def isCorrectAuthorsLine (line : String) : Bool := -- We cannot reasonably validate the author names, so we look only for a few common mistakes: -- the file starting wrong, double spaces, using ' and ' between names, -- and ending the line with a period. line.startsWith "Authors: " && (!line.containsSubstr " ") && (!line.containsSubstr " and ") && (!line.endsWith ".") /-- Lint a collection of input lines if they are missing an appropriate copyright header. A copyright header should start at the very beginning of the file and contain precisely five lines, including the copy year and holder, the license and main author(s) of the file (in this order). -/ def copyrightHeaderLinter : TextbasedLinter := fun lines ↦ Id.run do -- Unlike the Python script, we just emit one warning. let start := lines.extract 0 4 -- The header should start and end with blank comments. let _ := match (start.get? 0, start.get? 4) with | (some "/-", some "-/") => none | (some "/-", _) => return #[(StyleError.copyright none, 4)] | _ => return #[(StyleError.copyright none, 0)] -- If this is given, we go over the individual lines one by one, -- and provide some context on what is mis-formatted (if anything). let mut output := Array.mkEmpty 0 -- By hypotheses above, start has at least five lines, so the `none` cases below are never hit. -- The first real line should state the copyright. if let some copy := start.get? 1 then if !(copy.startsWith "Copyright (c) 20" && copy.endsWith ". All rights reserved.") then output := output.push (StyleError.copyright "Copyright line is malformed", 2) -- The second line should be standard. let expectedSecondLine := "Released under Apache 2.0 license as described in the file LICENSE." if start.get? 2 != some expectedSecondLine then output := output.push (StyleError.copyright s!"Second line should be \"{expectedSecondLine}\"", 3) -- The third line should contain authors. if let some line := start.get? 3 then if !line.containsSubstr "Author" then output := output.push (StyleError.copyright "The third line should describe the file's main authors", 4) else -- If it does, we check the authors line is formatted correctly. if !isCorrectAuthorsLine line then output := output.push (StyleError.authors, 4) return output /-- Lint on any occurrences of the string "Adaptation note:" or variants thereof. -/ def adaptationNoteLinter : TextbasedLinter := fun lines ↦ Id.run do let mut errors := Array.mkEmpty 0 let mut lineNumber := 1 for line in lines do -- We make this shorter to catch "Adaptation note", "adaptation note" and a missing colon. if line.containsSubstr "daptation note" then errors := errors.push (StyleError.adaptationNote, lineNumber) lineNumber := lineNumber + 1 return errors /-- Lint a collection of input strings if one of them contains an unnecessarily broad import. -/ def broadImportsLinter : TextbasedLinter := fun lines ↦ Id.run do let mut errors := Array.mkEmpty 0 -- All import statements must be placed "at the beginning" of the file: -- we can have any number of blank lines, imports and single or multi-line comments. -- Doc comments, however, are not allowed: there is no item they could document. let mut inDocComment : Bool := False let mut lineNumber := 1 for line in lines do if inDocComment then if line.endsWith "-/" then inDocComment := False else -- If `line` is just a single-line comment (starts with "--"), we just continue. if line.startsWith "/-" then inDocComment := True else if let some (rest) := line.dropPrefix? "import " then -- If there is any in-line or beginning doc comment on that line, trim that. -- Small hack: just split the string on space, "/" and "-": -- none of these occur in module names, so this is safe. if let some name := ((toString rest).split (" /-".contains ·)).head? then if name == "Mathlib.Tactic" then errors := errors.push (StyleError.broadImport BroadImports.TacticFolder, lineNumber) else if name == "Lake" || name.startsWith "Lake." then errors := errors.push (StyleError.broadImport BroadImports.Lake, lineNumber) lineNumber := lineNumber + 1 return errors /-- Iterates over a collection of strings, finding all lines which are longer than 101 chars. We allow URLs to be longer, though. -/ def lineLengthLinter : TextbasedLinter := fun lines ↦ Id.run do let errors := (lines.toList.enumFrom 1).filterMap (fun (line_number, line) ↦ if line.length > 101 && !line.containsSubstr "http" then some (StyleError.lineLength line.length, line_number) else none) errors.toArray /-- Whether a collection of lines consists *only* of imports, blank lines and single-line comments. In practice, this means it's an imports-only file and exempt from almost all linting. -/ def isImportsOnlyFile (lines : Array String) : Bool := -- The Python version also excluded multi-line comments: for all files generated by `mk_all`, -- this is in fact not necessary. (It is needed for `Tactic/Linter.lean`, though.) lines.all (fun line ↦ line.startsWith "import " || line == "" || line.startsWith "-- ") /-- Error if a collection of lines is too large. "Too large" means more than 1500 lines **and** longer than an optional previous limit. If the file is too large, return a matching `StyleError`, which includes a new size limit (which is somewhat larger than the current size). -/ def checkFileLength (lines : Array String) (existing_limit : Option ℕ) : Option StyleError := Id.run do if lines.size > 1500 then let is_larger : Bool := match existing_limit with | some mark => lines.size > mark | none => true if is_larger then -- We add about 200 lines of slack to the current file size: small PRs will be unaffected, -- but sufficiently large PRs will get nudged towards splitting up this file. return some (StyleError.fileTooLong lines.size ((Nat.div lines.size 100) * 100 + 200) existing_limit) none end /-- All text-based linters registered in this file. -/ def allLinters : Array TextbasedLinter := #[ copyrightHeaderLinter, adaptationNoteLinter, broadImportsLinter, lineLengthLinter ] /-- Controls what kind of output this programme produces. -/ inductive OutputSetting : Type /-- Print any style error to standard output (the default) -/ | print (style : ErrorFormat) /-- Update the style exceptions file (and still print style errors to standard output). This adds entries for any new exceptions, removes any entries which are no longer necessary, and tries to not modify exception entries unless necessary. To fully regenerate the exceptions file, delete `style-exceptions.txt` and run again in this mode. -/ | update deriving BEq /-- Read a file and apply all text-based linters. Return a list of all unexpected errors. `sizeLimit` is any pre-existing limit on this file's size. `exceptions` are any other style exceptions. -/ def lintFile (path : FilePath) (sizeLimit : Option ℕ) (exceptions : Array ErrorContext) : IO (Array ErrorContext) := do let lines ← IO.FS.lines path -- We don't need to run any checks on imports-only files. -- NB. The Python script used to still run a few linters; this is in fact not necessary. if isImportsOnlyFile lines then return #[] let mut errors := #[] if let some (StyleError.fileTooLong n limit ex) := checkFileLength lines sizeLimit then errors := #[ErrorContext.mk (StyleError.fileTooLong n limit ex) 1 path] let allOutput := (Array.map (fun lint ↦ (Array.map (fun (e, n) ↦ ErrorContext.mk e n path)) (lint lines))) allLinters -- This this list is not sorted: for github, this is fine. errors := errors.append (allOutput.flatten.filter (fun e ↦ (e.find?_comparable exceptions).isNone)) return errors /-- Lint a collection of modules for style violations. Print formatted errors for all unexpected style violations to standard output; update the list of style exceptions if configured so. Return the number of files which had new style errors. `moduleNames` are all the modules to lint, `mode` specifies what kind of output this script should produce. -/ def lintModules (moduleNames : Array String) (mode : OutputSetting) : IO UInt32 := do -- Read the style exceptions file. -- We also have a `nolints` file with manual exceptions for the linter. let exceptionsFilePath : FilePath := "scripts" / "style-exceptions.txt" let exceptions ← IO.FS.lines exceptionsFilePath let mut styleExceptions := parseStyleExceptions exceptions let nolints ← IO.FS.lines ("scripts" / "nolints-style.txt") styleExceptions := styleExceptions.append (parseStyleExceptions nolints) let mut numberErrorFiles : UInt32 := 0 let mut allUnexpectedErrors := #[] for module in moduleNames do -- Convert the module name to a file name, then lint that file. let path := (mkFilePath (module.split (· == '.'))).addExtension "lean" -- Find all size limits for this given file. -- If several size limits are given (unlikely in practice), we use the first one. let sizeLimits := (styleExceptions.filter (fun ex ↦ ex.path == path)).filterMap (fun errctx ↦ match errctx.error with | StyleError.fileTooLong _ limit _ => some limit | _ => none) let errors := if let OutputSetting.print _ := mode then ← lintFile path (sizeLimits.get? 0) styleExceptions else -- In "update" mode, we ignore the exceptions file (and only take `nolints` into account). ← lintFile path none (parseStyleExceptions nolints) if errors.size > 0 then allUnexpectedErrors := allUnexpectedErrors.append errors numberErrorFiles := numberErrorFiles + 1 match mode with | OutputSetting.print style => formatErrors allUnexpectedErrors style if numberErrorFiles > 0 && mode matches OutputSetting.print _ then IO.println s!"error: found {numberErrorFiles} new style errors\n\ run `lake exe lint_style --update` to ignore all of them" | OutputSetting.update => formatErrors allUnexpectedErrors ErrorFormat.humanReadable -- Regenerate the style exceptions file, including the Python output. IO.FS.writeFile exceptionsFilePath "" let python_output ← IO.Process.run { cmd := "./scripts/print-style-errors.sh" } -- Combine style exception entries: for each new error, replace by a corresponding -- previous exception if that is preferred. let mut tweaked := allUnexpectedErrors.map fun err ↦ if let some existing := err.find?_comparable styleExceptions then if let ComparisonResult.Comparable (true) := _root_.compare err existing then existing else err else err let this_output := "\n".intercalate (tweaked.map (fun err ↦ outputMessage err ErrorFormat.exceptionsFile)).toList IO.FS.writeFile exceptionsFilePath s!"{python_output}{this_output}\n" return numberErrorFiles
Tactic\Linter\UnusedTactic.lean
/- Copyright (c) 2024 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import Lean.Elab.Command import Lean.Linter.Util import Batteries.Tactic.Unreachable /-! # The unused tactic linter The unused linter makes sure that every tactic call actually changes *something*. The inner workings of the linter are as follows. The linter inspects the goals before and after each tactic execution. If they are not identical, the linter is happy. If they are identical, then the linter checks if the tactic is whitelisted. Possible reason for whitelisting are * tactics that emit messages, such as `have?`, `extract_goal`, or `says`; * tactics that are in place to assert something, such as `guard`; * tactics that allow to work on a specific goal, such as `on_goal`; * "flow control" tactics, such as `success_if_fail` and related. The only tactic that has a bespoke criterion is `swap_var`: the reason is that the only change that `swap_var` has is to relabel the usernames of local declarations. Thus, to check that `swap_var` was used, so we inspect the names of all the local declarations before and after and see if there is some change. ## Notable exclusions * `conv` is completely ignored by the linter. * The linter does not enter a "sequence tactic": upon finding `tac <;> [tac1, tac2, ...]` the linter assumes that the tactic is doing something and does not recurse into each `tac1, tac2, ...`. This is just for lack of an implementation: it may not be hard to do this. * The tactic does not check the discharger for `linear_combination`, but checks `linear_combination` itself. The main reason is that `skip` is a common discharger tactic and the linter would then always fail whenever the user explicitly chose to pass `skip` as a discharger tactic. ## TODO * The linter seems to be silenced by `set_option ... in`: maybe it should enter `in`s? ## Implementation notes Yet another linter copied from the `unreachableTactic` linter! -/ open Lean Elab namespace Mathlib.Linter /-- The unused tactic linter makes sure that every tactic call actually changes *something*. -/ register_option linter.unusedTactic : Bool := { defValue := true descr := "enable the unused tactic linter" } namespace UnusedTactic /-- The monad for collecting the ranges of the syntaxes that do not modify any goal. -/ abbrev M := StateRefT (HashMap String.Range Syntax) IO /-- `Parser`s allowed to not change the tactic state. This can be increased dynamically, using `#allow_unused_tactic`. -/ initialize allowedRef : IO.Ref (HashSet SyntaxNodeKind) ← IO.mkRef <| HashSet.empty |>.insert `Mathlib.Tactic.Says.says |>.insert `Batteries.Tactic.«tacticOn_goal-_=>_» -- attempt to speed up, by ignoring more tactics |>.insert `by |>.insert `null |>.insert `«]» |>.insert ``Lean.Parser.Term.byTactic |>.insert ``Lean.Parser.Tactic.tacticSeq |>.insert ``Lean.Parser.Tactic.tacticSeq1Indented |>.insert ``Lean.Parser.Tactic.tacticTry_ -- the following `SyntaxNodeKind`s play a role in silencing `test`s |>.insert ``Lean.Parser.Tactic.guardHyp |>.insert ``Lean.Parser.Tactic.guardTarget |>.insert ``Lean.Parser.Tactic.failIfSuccess |>.insert `Mathlib.Tactic.successIfFailWithMsg |>.insert `Mathlib.Tactic.failIfNoProgress |>.insert `Mathlib.Tactic.ExtractGoal.extractGoal |>.insert `Mathlib.Tactic.Propose.propose' |>.insert `Lean.Parser.Tactic.traceState |>.insert `Mathlib.Tactic.tacticMatch_target_ |>.insert `change? |>.insert `«tactic#adaptation_note_» /-- `#allow_unused_tactic` takes an input a space-separated list of identifiers. These identifiers are then allowed by the unused tactic linter: even if these tactics do not modify goals, there will be no warning emitted. Note: for this to work, these identifiers should be the `SyntaxNodeKind` of each tactic. For instance, you can allow the `done` and `skip` tactics using ```lean #allow_unused_tactic Lean.Parser.Tactic.done Lean.Parser.Tactic.skip ``` Notice that you should use the `SyntaxNodeKind` of the tactic. -/ elab "#allow_unused_tactic " ids:ident* : command => do let ids := ← Command.liftCoreM do ids.mapM realizeGlobalConstNoOverload allowedRef.modify (·.insertMany ids) /-- A list of blacklisted syntax kinds, which are expected to have subterms that contain unevaluated tactics. -/ initialize ignoreTacticKindsRef : IO.Ref NameHashSet ← IO.mkRef <| HashSet.empty |>.insert `Mathlib.Tactic.Says.says |>.insert ``Parser.Term.binderTactic |>.insert ``Lean.Parser.Term.dynamicQuot |>.insert ``Lean.Parser.Tactic.quotSeq |>.insert ``Lean.Parser.Tactic.tacticStop_ |>.insert ``Lean.Parser.Command.notation |>.insert ``Lean.Parser.Command.mixfix |>.insert ``Lean.Parser.Tactic.discharger |>.insert ``Lean.Parser.Tactic.Conv.conv |>.insert `Batteries.Tactic.seq_focus |>.insert `Mathlib.Tactic.Hint.registerHintStx |>.insert `Mathlib.Tactic.LinearCombination.linearCombination -- the following `SyntaxNodeKind`s play a role in silencing `test`s |>.insert ``Lean.Parser.Tactic.failIfSuccess |>.insert `Mathlib.Tactic.successIfFailWithMsg |>.insert `Mathlib.Tactic.failIfNoProgress /-- Is this a syntax kind that contains intentionally unused tactic subterms? -/ def isIgnoreTacticKind (ignoreTacticKinds : NameHashSet) (k : SyntaxNodeKind) : Bool := k.components.contains `Conv || "slice".isPrefixOf k.toString || match k with | .str _ "quot" => true | _ => ignoreTacticKinds.contains k /-- Adds a new syntax kind whose children will be ignored by the `unusedTactic` linter. This should be called from an `initialize` block. -/ def addIgnoreTacticKind (kind : SyntaxNodeKind) : IO Unit := ignoreTacticKindsRef.modify (·.insert kind) variable (ignoreTacticKinds : NameHashSet) (isTacKind : SyntaxNodeKind → Bool) in /-- Accumulates the set of tactic syntaxes that should be evaluated at least once. -/ @[specialize] partial def getTactics (stx : Syntax) : M Unit := do if let .node _ k args := stx then if !isIgnoreTacticKind ignoreTacticKinds k then args.forM getTactics if isTacKind k then if let some r := stx.getRange? true then modify fun m => m.insert r stx /-- `getNames mctx` extracts the names of all the local declarations implied by the `MetavarContext` `mctx`. -/ def getNames (mctx : MetavarContext) : List Name := let lcts := mctx.decls.toList.map (MetavarDecl.lctx ∘ Prod.snd) let locDecls := (lcts.map (PersistentArray.toList ∘ LocalContext.decls)).join.reduceOption locDecls.map LocalDecl.userName mutual /-- Search for tactic executions in the info tree and remove the syntax of the tactics that changed something. -/ partial def eraseUsedTacticsList (trees : PersistentArray InfoTree) : M Unit := trees.forM eraseUsedTactics /-- Search for tactic executions in the info tree and remove the syntax of the tactics that changed something. -/ partial def eraseUsedTactics : InfoTree → M Unit | .node i c => do if let .ofTacticInfo i := i then let stx := i.stx let kind := stx.getKind if let some r := stx.getRange? true then if (← allowedRef.get).contains kind -- if the tactic is allowed to not change the goals then modify (·.erase r) else -- if the goals have changed if i.goalsAfter != i.goalsBefore then modify (·.erase r) -- bespoke check for `swap_var`: the only change that it does is -- in the usernames of local declarations, so we check the names before and after else if (kind == `Mathlib.Tactic.«tacticSwap_var__,,») && (getNames i.mctxBefore != getNames i.mctxAfter) then modify (·.erase r) eraseUsedTacticsList c | .context _ t => eraseUsedTactics t | .hole _ => pure () end /-- Gets the value of the `linter.unusedTactic` option. -/ def getLinterHash (o : Options) : Bool := Linter.getLinterValue linter.unusedTactic o /-- The main entry point to the unused tactic linter. -/ def unusedTacticLinter : Linter where run := withSetOptionIn fun stx => do unless getLinterHash (← getOptions) && (← getInfoState).enabled do return if (← get).messages.hasErrors then return let cats := (Parser.parserExtension.getState (← getEnv)).categories -- These lookups may fail when the linter is run in a fresh, empty environment let some tactics := Parser.ParserCategory.kinds <$> cats.find? `tactic | return let some convs := Parser.ParserCategory.kinds <$> cats.find? `conv | return let trees ← getInfoTrees let go : M Unit := do getTactics (← ignoreTacticKindsRef.get) (fun k => tactics.contains k || convs.contains k) stx eraseUsedTacticsList trees let (_, map) ← go.run {} let unused := map.toArray let key (r : String.Range) := (r.start.byteIdx, (-r.stop.byteIdx : Int)) let mut last : String.Range := ⟨0, 0⟩ for (r, stx) in let _ := @lexOrd; let _ := @ltOfOrd.{0}; unused.qsort (key ·.1 < key ·.1) do if stx.getKind ∈ [``Batteries.Tactic.unreachable, ``Batteries.Tactic.unreachableConv] then continue if last.start ≤ r.start && r.stop ≤ last.stop then continue Linter.logLint linter.unusedTactic stx m!"'{stx}' tactic does nothing" last := r initialize addLinter unusedTacticLinter
Tactic\Measurability\Init.lean
/- Copyright (c) 2023 Miyahara Kō. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Miyahara Kō -/ import Aesop /-! # Measurability Rule Set This module defines the `Measurable` Aesop rule set which is used by the `measurability` tactic. Aesop rule sets only become visible once the file in which they're declared is imported, so we must put this declaration into its own file. -/ declare_aesop_rule_sets [Measurable]
Tactic\Monotonicity\Attr.lean
/- Copyright (c) 2023 Heather Macbeth. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Heather Macbeth -/ import Lean.LabelAttribute /-! # The @[mono] attribute -/ namespace Mathlib.Tactic.Monotonicity syntax mono.side := &"left" <|> &"right" <|> &"both" namespace Attr /-- A lemma stating the monotonicity of some function, with respect to appropriate relations on its domain and range, and possibly with side conditions. -/ syntax (name := mono) "mono" (ppSpace mono.side)? : attr -- The following is inlined from `register_label_attr`. /- TODO: currently `left`/`right`/`both` is ignored, and e.g. `@[mono left]` means the same as `@[mono]`. No error is thrown by e.g. `@[mono left]`. -/ -- TODO: possibly extend `register_label_attr` to handle trailing syntax open Lean in @[inherit_doc mono] initialize ext : LabelExtension ← ( let descr := "A lemma stating the monotonicity of some function, with respect to appropriate relations on its domain and range, and possibly with side conditions." let mono := `mono registerLabelAttr mono descr mono)
Tactic\Monotonicity\Basic.lean
/- Copyright (c) 2019 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon -/ import Lean.Elab.Tactic.SolveByElim import Mathlib.Tactic.Monotonicity.Attr /-! # Monotonicity tactic The tactic `mono` applies monotonicity rules (collected through the library by being tagged `@[mono]`). The version of the tactic here is a cheap partial port of the `mono` tactic from Lean 3, which had many more options and features. It is implemented as a wrapper on top of `solve_by_elim`. Temporary syntax change: Lean 3 `mono` applied a single monotonicity rule, then applied local hypotheses and the `rfl` tactic as many times as it could. This is hard to implement on top of `solve_by_elim` because the counting system used in the `maxDepth` field of its configuration would count these as separate steps, throwing off the count in the desired configuration `maxDepth := 1`. So instead we just implement a version of `mono` in which monotonicity rules, local hypotheses and `rfl` are all applied repeatedly until nothing more is applicable. The syntax for this in Lean 3 was `mono*`. Both `mono` and `mono*` implement this behavior for now. -/ open Lean Elab Tactic Parser Tactic open Tactic SolveByElim namespace Mathlib.Tactic.Monotonicity /-- `mono` applies monotonicity rules and local hypotheses repetitively. For example, ```lean example (x y z k : ℤ) (h : 3 ≤ (4 : ℤ)) (h' : z ≤ y) : (k + 3 + x) - y ≤ (k + 4 + x) - z := by mono ``` -/ syntax (name := mono) "mono" "*"? (ppSpace mono.side)? (" with " (colGt term),+)? (" using " (colGt simpArg),+)? : tactic elab_rules : tactic | `(tactic| mono $[*]? $[$h:mono.side]? $[ with%$w $a:term,*]? $[ using%$u $s,*]? ) => do let msg (s : String) := s ++ " syntax is not yet supported in 'mono'" if let some h := h then throwErrorAt h (msg "'left'/'right'/'both'") if let some w := w then throwErrorAt w (msg "'with'") if let some u := u then throwErrorAt u (msg "'using'") let cfg ← elabApplyRulesConfig <| mkNullNode #[] let cfg := { cfg with backtracking := false transparency := .reducible exfalso := false } liftMetaTactic fun g => do processSyntax cfg false false [] [] #[mkIdent `mono] [g]
Tactic\Monotonicity\Lemmas.lean
/- Copyright (c) 2019 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon -/ import Mathlib.Algebra.Order.Group.Abs import Mathlib.Algebra.Order.Ring.Defs import Mathlib.Algebra.Order.Sub.Canonical import Mathlib.Data.Set.Lattice import Mathlib.Tactic.Monotonicity.Attr /-! # Lemmas for the `mono` tactic The `mono` tactic works by throwing all lemmas tagged with the attribute `@[mono]` at the goal. In this file we tag a few foundational lemmas with the mono attribute. Lemmas in more advanced files are tagged in place. -/ open Set attribute [mono] le_refl -- added for Lean 4 version attribute [mono] subset_refl inter_subset_inter union_subset_union sUnion_mono iUnion₂_mono sInter_subset_sInter iInter₂_mono image_subset preimage_mono prod_mono Monotone.set_prod seq_mono image2_subset OrderEmbedding.monotone attribute [mono] upperBounds_mono_set lowerBounds_mono_set upperBounds_mono_mem lowerBounds_mono_mem upperBounds_mono lowerBounds_mono BddAbove.mono BddBelow.mono attribute [mono] add_le_add mul_le_mul neg_le_neg mul_lt_mul_of_pos_left mul_lt_mul_of_pos_right mul_le_mul_of_nonneg_left mul_le_mul_of_nonneg_right mul_le_mul_of_nonpos_left mul_le_mul_of_nonpos_right -- imp_imp_imp -- le_implies_le_of_le_of_le tsub_lt_tsub_left_of_le tsub_lt_tsub_right_of_le tsub_le_tsub abs_le_abs sup_le_sup inf_le_inf -- attribute [mono left] add_lt_add_of_le_of_lt mul_lt_mul' -- attribute [mono right] add_lt_add_of_lt_of_le mul_lt_mul
Tactic\Nontriviality\Core.lean
/- Copyright (c) 2020 Sébastien Gouëzel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Sébastien Gouëzel, Mario Carneiro -/ import Qq.MetaM import Mathlib.Logic.Nontrivial.Basic import Mathlib.Tactic.Attr.Core /-! # The `nontriviality` tactic. -/ universe u namespace Mathlib.Tactic.Nontriviality open Lean Elab Meta Tactic Qq theorem subsingleton_or_nontrivial_elim {p : Prop} {α : Type u} (h₁ : Subsingleton α → p) (h₂ : Nontrivial α → p) : p := (subsingleton_or_nontrivial α).elim @h₁ @h₂ /-- Tries to generate a `Nontrivial α` instance by performing case analysis on `subsingleton_or_nontrivial α`, attempting to discharge the subsingleton branch using lemmas with `@[nontriviality]` attribute, including `Subsingleton.le` and `eq_iff_true_of_subsingleton`. -/ def nontrivialityByElim {u : Level} (α : Q(Type u)) (g : MVarId) (simpArgs : Array Syntax) : MetaM MVarId := do let p : Q(Prop) ← g.getType guard (← instantiateMVars (← inferType p)).isProp g.withContext do let g₁ ← mkFreshExprMVarQ q(Subsingleton $α → $p) let (_, g₁') ← g₁.mvarId!.intro1 g₁'.withContext try -- FIXME: restore after lean4#2054 is fixed -- g₁'.inferInstance <|> do (do g₁'.assign (← synthInstance (← g₁'.getType))) <|> do let simpArgs := simpArgs.push (Unhygienic.run `(Parser.Tactic.simpLemma| nontriviality)) let stx := open TSyntax.Compat in Unhygienic.run `(tactic| simp [$simpArgs,*]) let ([], _) ← runTactic g₁' stx | failure catch _ => throwError "Could not prove goal assuming `{q(Subsingleton $α)}`\n{MessageData.ofGoal g₁'}" let g₂ : Q(Nontrivial $α → $p) ← mkFreshExprMVarQ q(Nontrivial $α → $p) g.assign q(subsingleton_or_nontrivial_elim $g₁ $g₂) pure g₂.mvarId! open Lean.Elab.Tactic.SolveByElim in /-- Tries to generate a `Nontrivial α` instance using `nontrivial_of_ne` or `nontrivial_of_lt` and local hypotheses. -/ def nontrivialityByAssumption (g : MVarId) : MetaM Unit := do g.inferInstance <|> do _ ← processSyntax {maxDepth := 6} false false [← `(nontrivial_of_ne), ← `(nontrivial_of_lt)] [] #[] [g] /-- Attempts to generate a `Nontrivial α` hypothesis. The tactic first checks to see that there is not already a `Nontrivial α` instance before trying to synthesize one using other techniques. If the goal is an (in)equality, the type `α` is inferred from the goal. Otherwise, the type needs to be specified in the tactic invocation, as `nontriviality α`. The `nontriviality` tactic will first look for strict inequalities amongst the hypotheses, and use these to derive the `Nontrivial` instance directly. Otherwise, it will perform a case split on `Subsingleton α ∨ Nontrivial α`, and attempt to discharge the `Subsingleton` goal using `simp [h₁, h₂, ..., hₙ, nontriviality]`, where `[h₁, h₂, ..., hₙ]` is a list of additional `simp` lemmas that can be passed to `nontriviality` using the syntax `nontriviality α using h₁, h₂, ..., hₙ`. ``` example {R : Type} [OrderedRing R] {a : R} (h : 0 < a) : 0 < a := by nontriviality -- There is now a `Nontrivial R` hypothesis available. assumption ``` ``` example {R : Type} [CommRing R] {r s : R} : r * s = s * r := by nontriviality -- There is now a `Nontrivial R` hypothesis available. apply mul_comm ``` ``` example {R : Type} [OrderedRing R] {a : R} (h : 0 < a) : (2 : ℕ) ∣ 4 := by nontriviality R -- there is now a `Nontrivial R` hypothesis available. dec_trivial ``` ``` def myeq {α : Type} (a b : α) : Prop := a = b example {α : Type} (a b : α) (h : a = b) : myeq a b := by success_if_fail nontriviality α -- Fails nontriviality α using myeq -- There is now a `Nontrivial α` hypothesis available assumption ``` -/ syntax (name := nontriviality) "nontriviality" (ppSpace colGt term)? (" using " Parser.Tactic.simpArg,+)? : tactic /-- Elaborator for the `nontriviality` tactic. -/ @[tactic nontriviality] def elabNontriviality : Tactic := fun stx => do let g ← getMainGoal let α ← match stx[1].getOptional? with | some e => Term.elabType e | none => (do let mut tgt ← withReducible g.getType' if let some tgt' := tgt.not? then tgt ← withReducible (whnf tgt') if let some (α, _) := tgt.eq? then return α if let some (α, _) := tgt.app4? ``LE.le then return α if let some (α, _) := tgt.app4? ``LT.lt then return α throwError "The goal is not an (in)equality, so you'll need to specify the desired \ `Nontrivial α` instance by invoking `nontriviality α`.") let .sort u ← whnf (← inferType α) | unreachable! let some v := u.dec | throwError "not a type{indentExpr α}" let α : Q(Type v) := α let tac := do let ty := q(Nontrivial $α) let m ← mkFreshExprMVar (some ty) nontrivialityByAssumption m.mvarId! g.assert `inst ty m let g ← liftM <| tac <|> nontrivialityByElim α g stx[2][1].getSepArgs replaceMainGoal [(← g.intro1).2]
Tactic\NormNum\Basic.lean
/- Copyright (c) 2021 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Thomas Murrills -/ import Mathlib.Tactic.NormNum.Core import Mathlib.Tactic.HaveI import Mathlib.Data.Nat.Cast.Commute import Mathlib.Algebra.Ring.Int import Mathlib.Algebra.GroupWithZero.Invertible import Mathlib.Tactic.ClearExclamation import Mathlib.Data.Nat.Cast.Basic /-! ## `norm_num` basic plugins This file adds `norm_num` plugins for * constructors and constants * `Nat.cast`, `Int.cast`, and `mkRat` * `+`, `-`, `*`, and `/` * `Nat.succ`, `Nat.sub`, `Nat.mod`, and `Nat.div`. See other files in this directory for many more plugins. -/ universe u namespace Mathlib open Lean hiding Rat mkRat open Meta namespace Meta.NormNum open Qq theorem IsInt.raw_refl (n : ℤ) : IsInt n n := ⟨rfl⟩ /-! # Constructors and constants -/ theorem isNat_zero (α) [AddMonoidWithOne α] : IsNat (Zero.zero : α) (nat_lit 0) := ⟨Nat.cast_zero.symm⟩ /-- The `norm_num` extension which identifies the expression `Zero.zero`, returning `0`. -/ @[norm_num Zero.zero] def evalZero : NormNumExt where eval {u α} e := do let sα ← inferAddMonoidWithOne α match e with | ~q(Zero.zero) => return .isNat sα (mkRawNatLit 0) q(isNat_zero $α) theorem isNat_one (α) [AddMonoidWithOne α] : IsNat (One.one : α) (nat_lit 1) := ⟨Nat.cast_one.symm⟩ /-- The `norm_num` extension which identifies the expression `One.one`, returning `1`. -/ @[norm_num One.one] def evalOne : NormNumExt where eval {u α} e := do let sα ← inferAddMonoidWithOne α match e with | ~q(One.one) => return .isNat sα (mkRawNatLit 1) q(isNat_one $α) theorem isNat_ofNat (α : Type u) [AddMonoidWithOne α] {a : α} {n : ℕ} (h : n = a) : IsNat a n := ⟨h.symm⟩ /-- The `norm_num` extension which identifies an expression `OfNat.ofNat n`, returning `n`. -/ @[norm_num OfNat.ofNat _] def evalOfNat : NormNumExt where eval {u α} e := do let sα ← inferAddMonoidWithOne α match e with | ~q(@OfNat.ofNat _ $n $oα) => let n : Q(ℕ) ← whnf n guard n.isRawNatLit let ⟨a, (pa : Q($n = $e))⟩ ← mkOfNat α sα n guard <|← isDefEq a e return .isNat sα n q(isNat_ofNat $α $pa) theorem isNat_intOfNat : {n n' : ℕ} → IsNat n n' → IsNat (Int.ofNat n) n' | _, _, ⟨rfl⟩ => ⟨rfl⟩ /-- The `norm_num` extension which identifies the constructor application `Int.ofNat n` such that `norm_num` successfully recognizes `n`, returning `n`. -/ @[norm_num Int.ofNat _] def evalIntOfNat : NormNumExt where eval {u α} e := do let .app (.const ``Int.ofNat _) (n : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q Int := ⟨⟩ let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let sℤ : Q(AddMonoidWithOne ℤ) := q(instAddMonoidWithOne) let ⟨n', p⟩ ← deriveNat n sℕ haveI' x : $e =Q Int.ofNat $n := ⟨⟩ return .isNat sℤ n' q(isNat_intOfNat $p) theorem isNat_natAbs_pos : {n : ℤ} → {a : ℕ} → IsNat n a → IsNat n.natAbs a | _, _, ⟨rfl⟩ => ⟨rfl⟩ theorem isNat_natAbs_neg : {n : ℤ} → {a : ℕ} → IsInt n (.negOfNat a) → IsNat n.natAbs a | _, _, ⟨rfl⟩ => ⟨by simp⟩ /-- The `norm_num` extension which identifies the expression `Int.natAbs n` such that `norm_num` successfully recognizes `n`. -/ @[norm_num Int.natAbs (_ : ℤ)] def evalIntNatAbs : NormNumExt where eval {u α} e := do let .app (.const ``Int.natAbs _) (x : Q(ℤ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q Int.natAbs $x := ⟨⟩ let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) match ← derive (u := .zero) x with | .isNat _ a p => assumeInstancesCommute; return .isNat sℕ a q(isNat_natAbs_pos $p) | .isNegNat _ a p => assumeInstancesCommute; return .isNat sℕ a q(isNat_natAbs_neg $p) | _ => failure /-! # Casts -/ theorem isNat_natCast {R} [AddMonoidWithOne R] (n m : ℕ) : IsNat n m → IsNat (n : R) m := by rintro ⟨⟨⟩⟩; exact ⟨rfl⟩ @[deprecated (since := "2024-04-17")] alias isNat_cast := isNat_natCast /-- The `norm_num` extension which identifies an expression `Nat.cast n`, returning `n`. -/ @[norm_num Nat.cast _, NatCast.natCast _] def evalNatCast : NormNumExt where eval {u α} e := do let sα ← inferAddMonoidWithOne α let .app n (a : Q(ℕ)) ← whnfR e | failure guard <|← withNewMCtxDepth <| isDefEq n q(Nat.cast (R := $α)) let ⟨na, pa⟩ ← deriveNat a q(instAddMonoidWithOneNat) haveI' : $e =Q $a := ⟨⟩ return .isNat sα na q(isNat_natCast $a $na $pa) theorem isNat_intCast {R} [Ring R] (n : ℤ) (m : ℕ) : IsNat n m → IsNat (n : R) m := by rintro ⟨⟨⟩⟩; exact ⟨by simp⟩ @[deprecated (since := "2024-04-17")] alias isNat_int_cast := isNat_intCast theorem isintCast {R} [Ring R] (n m : ℤ) : IsInt n m → IsInt (n : R) m := by rintro ⟨⟨⟩⟩; exact ⟨rfl⟩ @[deprecated (since := "2024-04-17")] alias isInt_cast := isintCast /-- The `norm_num` extension which identifies an expression `Int.cast n`, returning `n`. -/ @[norm_num Int.cast _, IntCast.intCast _] def evalIntCast : NormNumExt where eval {u α} e := do let rα ← inferRing α let .app i (a : Q(ℤ)) ← whnfR e | failure guard <|← withNewMCtxDepth <| isDefEq i q(Int.cast (R := $α)) match ← derive (α := q(ℤ)) a with | .isNat _ na pa => assumeInstancesCommute haveI' : $e =Q Int.cast $a := ⟨⟩ return .isNat _ na q(isNat_intCast $a $na $pa) | .isNegNat _ na pa => assumeInstancesCommute haveI' : $e =Q Int.cast $a := ⟨⟩ return .isNegNat _ na q(isintCast $a (.negOfNat $na) $pa) | _ => failure /-! # Arithmetic -/ library_note "norm_num lemma function equality"/-- Note: Many of the lemmas in this file use a function equality hypothesis like `f = HAdd.hAdd` below. The reason for this is that when this is applied, to prove e.g. `100 + 200 = 300`, the `+` here is `HAdd.hAdd` with an instance that may not be syntactically equal to the one supplied by the `AddMonoidWithOne` instance, and rather than attempting to prove the instances equal lean will sometimes decide to evaluate `100 + 200` directly (into whatever `+` is defined to do in this ring), which is definitely not what we want; if the subterms are expensive to kernel-reduce then this could cause a `(kernel) deep recursion detected` error (see lean4#2171, mathlib4#4048). By using an equality for the unapplied `+` function and proving it by `rfl` we take away the opportunity for lean to unfold the numerals (and the instance defeq problem is usually comparatively easy). -/ -- see note [norm_num lemma function equality] theorem isNat_add {α} [AddMonoidWithOne α] : ∀ {f : α → α → α} {a b : α} {a' b' c : ℕ}, f = HAdd.hAdd → IsNat a a' → IsNat b b' → Nat.add a' b' = c → IsNat (f a b) c | _, _, _, _, _, _, rfl, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨(Nat.cast_add _ _).symm⟩ -- see note [norm_num lemma function equality] theorem isInt_add {α} [Ring α] : ∀ {f : α → α → α} {a b : α} {a' b' c : ℤ}, f = HAdd.hAdd → IsInt a a' → IsInt b b' → Int.add a' b' = c → IsInt (f a b) c | _, _, _, _, _, _, rfl, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨(Int.cast_add ..).symm⟩ /-- If `b` divides `a` and `a` is invertible, then `b` is invertible. -/ def invertibleOfMul {α} [Semiring α] (k : ℕ) (b : α) : ∀ (a : α) [Invertible a], a = k * b → Invertible b | _, ⟨c, hc1, hc2⟩, rfl => by rw [← mul_assoc] at hc1 rw [Nat.cast_commute k, mul_assoc, Nat.cast_commute k] at hc2 exact ⟨_, hc1, hc2⟩ /-- If `b` divides `a` and `a` is invertible, then `b` is invertible. -/ def invertibleOfMul' {α} [Semiring α] {a k b : ℕ} [Invertible (a : α)] (h : a = k * b) : Invertible (b : α) := invertibleOfMul k (b:α) ↑a (by simp [h]) -- TODO: clean up and move it somewhere in mathlib? It's a bit much for this file -- see note [norm_num lemma function equality] theorem isRat_add {α} [Ring α] {f : α → α → α} {a b : α} {na nb nc : ℤ} {da db dc k : ℕ} : f = HAdd.hAdd → IsRat a na da → IsRat b nb db → Int.add (Int.mul na db) (Int.mul nb da) = Int.mul k nc → Nat.mul da db = Nat.mul k dc → IsRat (f a b) nc dc := by rintro rfl ⟨_, rfl⟩ ⟨_, rfl⟩ (h₁ : na * db + nb * da = k * nc) (h₂ : da * db = k * dc) have : Invertible (↑(da * db) : α) := by simpa using invertibleMul (da:α) db have := invertibleOfMul' (α := α) h₂ use this have H := (Nat.cast_commute (α := α) da db).invOf_left.invOf_right.right_comm have h₁ := congr_arg (↑· * (⅟↑da * ⅟↑db : α)) h₁ simp only [Int.cast_add, Int.cast_mul, Int.cast_natCast, ← mul_assoc, add_mul, mul_mul_invOf_self_cancel] at h₁ have h₂ := congr_arg (↑nc * ↑· * (⅟↑da * ⅟↑db * ⅟↑dc : α)) h₂ simp only [H, mul_mul_invOf_self_cancel', Nat.cast_mul, ← mul_assoc] at h₁ h₂ rw [h₁, h₂, Nat.cast_commute] simp only [mul_mul_invOf_self_cancel, (Nat.cast_commute (α := α) da dc).invOf_left.invOf_right.right_comm, (Nat.cast_commute (α := α) db dc).invOf_left.invOf_right.right_comm] instance : MonadLift Option MetaM where monadLift | none => failure | some e => pure e /-- The `norm_num` extension which identifies expressions of the form `a + b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num _ + _] def evalAdd : NormNumExt where eval {u α} e := do let .app (.app (f : Q($α → $α → $α)) (a : Q($α))) (b : Q($α)) ← whnfR e | failure let ra ← derive a; let rb ← derive b match ra, rb with | .isBool .., _ | _, .isBool .. => failure | .isNat _ .., .isNat _ .. | .isNat _ .., .isNegNat _ .. | .isNat _ .., .isRat _ .. | .isNegNat _ .., .isNat _ .. | .isNegNat _ .., .isNegNat _ .. | .isNegNat _ .., .isRat _ .. | .isRat _ .., .isNat _ .. | .isRat _ .., .isNegNat _ .. | .isRat _ .., .isRat _ .. => guard <|← withNewMCtxDepth <| isDefEq f q(HAdd.hAdd (α := $α)) let rec /-- Main part of `evalAdd`. -/ core : Option (Result e) := do let rec intArm (rα : Q(Ring $α)) := do haveI' : $e =Q $a + $b := ⟨⟩ let ⟨za, na, pa⟩ ← ra.toInt _; let ⟨zb, nb, pb⟩ ← rb.toInt _ haveI' : $f =Q HAdd.hAdd := ⟨⟩ let zc := za + zb have c := mkRawIntLit zc haveI' : Int.add $na $nb =Q $c := ⟨⟩ return .isInt rα c zc q(isInt_add (f := $f) (.refl $f) $pa $pb (.refl $c)) let rec ratArm (dα : Q(DivisionRing $α)) : Option (Result _) := do haveI' : $e =Q $a + $b := ⟨⟩ haveI' : $f =Q HAdd.hAdd := ⟨⟩ let ⟨qa, na, da, pa⟩ ← ra.toRat' dα; let ⟨qb, nb, db, pb⟩ ← rb.toRat' dα let qc := qa + qb let dd := qa.den * qb.den let k := dd / qc.den have t1 : Q(ℤ) := mkRawIntLit (k * qc.num) have t2 : Q(ℕ) := mkRawNatLit dd have nc : Q(ℤ) := mkRawIntLit qc.num have dc : Q(ℕ) := mkRawNatLit qc.den have k : Q(ℕ) := mkRawNatLit k let r1 : Q(Int.add (Int.mul $na $db) (Int.mul $nb $da) = Int.mul $k $nc) := (q(Eq.refl $t1) : Expr) let r2 : Q(Nat.mul $da $db = Nat.mul $k $dc) := (q(Eq.refl $t2) : Expr) return .isRat' dα qc nc dc q(isRat_add (f := $f) (.refl $f) $pa $pb $r1 $r2) match ra, rb with | .isBool .., _ | _, .isBool .. => failure | .isRat dα .., _ | _, .isRat dα .. => ratArm dα | .isNegNat rα .., _ | _, .isNegNat rα .. => intArm rα | .isNat _ na pa, .isNat sα nb pb => haveI' : $e =Q $a + $b := ⟨⟩ haveI' : $f =Q HAdd.hAdd := ⟨⟩ assumeInstancesCommute have c : Q(ℕ) := mkRawNatLit (na.natLit! + nb.natLit!) haveI' : Nat.add $na $nb =Q $c := ⟨⟩ return .isNat sα c q(isNat_add (f := $f) (.refl $f) $pa $pb (.refl $c)) core -- see note [norm_num lemma function equality] theorem isInt_neg {α} [Ring α] : ∀ {f : α → α} {a : α} {a' b : ℤ}, f = Neg.neg → IsInt a a' → Int.neg a' = b → IsInt (-a) b | _, _, _, _, rfl, ⟨rfl⟩, rfl => ⟨(Int.cast_neg ..).symm⟩ -- see note [norm_num lemma function equality] theorem isRat_neg {α} [Ring α] : ∀ {f : α → α} {a : α} {n n' : ℤ} {d : ℕ}, f = Neg.neg → IsRat a n d → Int.neg n = n' → IsRat (-a) n' d | _, _, _, _, _, rfl, ⟨h, rfl⟩, rfl => ⟨h, by rw [← neg_mul, ← Int.cast_neg]; rfl⟩ /-- The `norm_num` extension which identifies expressions of the form `-a`, such that `norm_num` successfully recognises `a`. -/ @[norm_num -_] def evalNeg : NormNumExt where eval {u α} e := do let .app (f : Q($α → $α)) (a : Q($α)) ← whnfR e | failure let ra ← derive a let rα ← inferRing α let ⟨(_f_eq : $f =Q Neg.neg)⟩ ← withNewMCtxDepth <| assertDefEqQ _ _ haveI' _e_eq : $e =Q -$a := ⟨⟩ let rec /-- Main part of `evalNeg`. -/ core : Option (Result e) := do let intArm (rα : Q(Ring $α)) := do assumeInstancesCommute let ⟨za, na, pa⟩ ← ra.toInt rα let zb := -za have b := mkRawIntLit zb haveI' : Int.neg $na =Q $b := ⟨⟩ return .isInt rα b zb q(isInt_neg (f := $f) (.refl $f) $pa (.refl $b)) let ratArm (dα : Q(DivisionRing $α)) : Option (Result _) := do assumeInstancesCommute let ⟨qa, na, da, pa⟩ ← ra.toRat' dα let qb := -qa have nb := mkRawIntLit qb.num haveI' : Int.neg $na =Q $nb := ⟨⟩ return .isRat' dα qb nb da q(isRat_neg (f := $f) (.refl $f) $pa (.refl $nb)) match ra with | .isBool _ .. => failure | .isNat _ .. => intArm rα | .isNegNat rα .. => intArm rα | .isRat dα .. => ratArm dα core -- see note [norm_num lemma function equality] theorem isInt_sub {α} [Ring α] : ∀ {f : α → α → α} {a b : α} {a' b' c : ℤ}, f = HSub.hSub → IsInt a a' → IsInt b b' → Int.sub a' b' = c → IsInt (f a b) c | _, _, _, _, _, _, rfl, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨(Int.cast_sub ..).symm⟩ -- see note [norm_num lemma function equality] theorem isRat_sub {α} [Ring α] {f : α → α → α} {a b : α} {na nb nc : ℤ} {da db dc k : ℕ} (hf : f = HSub.hSub) (ra : IsRat a na da) (rb : IsRat b nb db) (h₁ : Int.sub (Int.mul na db) (Int.mul nb da) = Int.mul k nc) (h₂ : Nat.mul da db = Nat.mul k dc) : IsRat (f a b) nc dc := by rw [hf, sub_eq_add_neg] refine isRat_add rfl ra (isRat_neg (n' := -nb) rfl rb rfl) (k := k) (nc := nc) ?_ h₂ rw [show Int.mul (-nb) _ = _ from neg_mul ..]; exact h₁ /-- The `norm_num` extension which identifies expressions of the form `a - b` in a ring, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num _ - _] def evalSub : NormNumExt where eval {u α} e := do let .app (.app (f : Q($α → $α → $α)) (a : Q($α))) (b : Q($α)) ← whnfR e | failure let rα ← inferRing α let ⟨(_f_eq : $f =Q HSub.hSub)⟩ ← withNewMCtxDepth <| assertDefEqQ _ _ let ra ← derive a; let rb ← derive b haveI' _e_eq : $e =Q $a - $b := ⟨⟩ let rec /-- Main part of `evalAdd`. -/ core : Option (Result e) := do let intArm (rα : Q(Ring $α)) := do assumeInstancesCommute let ⟨za, na, pa⟩ ← ra.toInt rα; let ⟨zb, nb, pb⟩ ← rb.toInt rα let zc := za - zb have c := mkRawIntLit zc haveI' : Int.sub $na $nb =Q $c := ⟨⟩ return Result.isInt rα c zc q(isInt_sub (f := $f) (.refl $f) $pa $pb (.refl $c)) let ratArm (dα : Q(DivisionRing $α)) : Option (Result _) := do assumeInstancesCommute let ⟨qa, na, da, pa⟩ ← ra.toRat' dα; let ⟨qb, nb, db, pb⟩ ← rb.toRat' dα let qc := qa - qb let dd := qa.den * qb.den let k := dd / qc.den have t1 : Q(ℤ) := mkRawIntLit (k * qc.num) have t2 : Q(ℕ) := mkRawNatLit dd have nc : Q(ℤ) := mkRawIntLit qc.num have dc : Q(ℕ) := mkRawNatLit qc.den have k : Q(ℕ) := mkRawNatLit k let r1 : Q(Int.sub (Int.mul $na $db) (Int.mul $nb $da) = Int.mul $k $nc) := (q(Eq.refl $t1) : Expr) let r2 : Q(Nat.mul $da $db = Nat.mul $k $dc) := (q(Eq.refl $t2) : Expr) return .isRat' dα qc nc dc q(isRat_sub (f := $f) (.refl $f) $pa $pb $r1 $r2) match ra, rb with | .isBool .., _ | _, .isBool .. => failure | .isRat dα .., _ | _, .isRat dα .. => ratArm dα | .isNegNat rα .., _ | _, .isNegNat rα .. | .isNat _ .., .isNat _ .. => intArm rα core -- see note [norm_num lemma function equality] theorem isNat_mul {α} [Semiring α] : ∀ {f : α → α → α} {a b : α} {a' b' c : ℕ}, f = HMul.hMul → IsNat a a' → IsNat b b' → Nat.mul a' b' = c → IsNat (a * b) c | _, _, _, _, _, _, rfl, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨(Nat.cast_mul ..).symm⟩ -- see note [norm_num lemma function equality] theorem isInt_mul {α} [Ring α] : ∀ {f : α → α → α} {a b : α} {a' b' c : ℤ}, f = HMul.hMul → IsInt a a' → IsInt b b' → Int.mul a' b' = c → IsInt (a * b) c | _, _, _, _, _, _, rfl, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨(Int.cast_mul ..).symm⟩ theorem isRat_mul {α} [Ring α] {f : α → α → α} {a b : α} {na nb nc : ℤ} {da db dc k : ℕ} : f = HMul.hMul → IsRat a na da → IsRat b nb db → Int.mul na nb = Int.mul k nc → Nat.mul da db = Nat.mul k dc → IsRat (f a b) nc dc := by rintro rfl ⟨_, rfl⟩ ⟨_, rfl⟩ (h₁ : na * nb = k * nc) (h₂ : da * db = k * dc) have : Invertible (↑(da * db) : α) := by simpa using invertibleMul (da:α) db have := invertibleOfMul' (α := α) h₂ refine ⟨this, ?_⟩ have H := (Nat.cast_commute (α := α) da db).invOf_left.invOf_right.right_comm have h₁ := congr_arg (Int.cast (R := α)) h₁ simp only [Int.cast_mul, Int.cast_natCast] at h₁ simp only [← mul_assoc, (Nat.cast_commute (α := α) da nb).invOf_left.right_comm, h₁] have h₂ := congr_arg (↑nc * ↑· * (⅟↑da * ⅟↑db * ⅟↑dc : α)) h₂ simp only [Nat.cast_mul, ← mul_assoc] at h₂; rw [H] at h₂ simp only [mul_mul_invOf_self_cancel'] at h₂; rw [h₂, Nat.cast_commute] simp only [mul_mul_invOf_self_cancel, (Nat.cast_commute (α := α) da dc).invOf_left.invOf_right.right_comm, (Nat.cast_commute (α := α) db dc).invOf_left.invOf_right.right_comm] /-- The `norm_num` extension which identifies expressions of the form `a * b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num _ * _] def evalMul : NormNumExt where eval {u α} e := do let .app (.app (f : Q($α → $α → $α)) (a : Q($α))) (b : Q($α)) ← whnfR e | failure let sα ← inferSemiring α let ra ← derive a; let rb ← derive b guard <|← withNewMCtxDepth <| isDefEq f q(HMul.hMul (α := $α)) haveI' : $f =Q HMul.hMul := ⟨⟩ haveI' : $e =Q $a * $b := ⟨⟩ let rec /-- Main part of `evalMul`. -/ core : Option (Result e) := do let rec intArm (rα : Q(Ring $α)) := do assumeInstancesCommute let ⟨za, na, pa⟩ ← ra.toInt rα; let ⟨zb, nb, pb⟩ ← rb.toInt rα let zc := za * zb have c := mkRawIntLit zc haveI' : Int.mul $na $nb =Q $c := ⟨⟩ return .isInt rα c zc q(isInt_mul (f := $f) (.refl $f) $pa $pb (.refl $c)) let rec ratArm (dα : Q(DivisionRing $α)) : Option (Result _) := do assumeInstancesCommute let ⟨qa, na, da, pa⟩ ← ra.toRat' dα; let ⟨qb, nb, db, pb⟩ ← rb.toRat' dα let qc := qa * qb let dd := qa.den * qb.den let k := dd / qc.den have nc : Q(ℤ) := mkRawIntLit qc.num have dc : Q(ℕ) := mkRawNatLit qc.den have k : Q(ℕ) := mkRawNatLit k let r1 : Q(Int.mul $na $nb = Int.mul $k $nc) := (q(Eq.refl (Int.mul $na $nb)) : Expr) have t2 : Q(ℕ) := mkRawNatLit dd let r2 : Q(Nat.mul $da $db = Nat.mul $k $dc) := (q(Eq.refl $t2) : Expr) return .isRat' dα qc nc dc q(isRat_mul (f := $f) (.refl $f) $pa $pb $r1 $r2) match ra, rb with | .isBool .., _ | _, .isBool .. => failure | .isRat dα .., _ | _, .isRat dα .. => ratArm dα | .isNegNat rα .., _ | _, .isNegNat rα .. => intArm rα | .isNat mα' na pa, .isNat mα nb pb => haveI' : $mα =Q by clear! $mα $mα'; apply AddCommMonoidWithOne.toAddMonoidWithOne := ⟨⟩ assumeInstancesCommute have c : Q(ℕ) := mkRawNatLit (na.natLit! * nb.natLit!) haveI' : Nat.mul $na $nb =Q $c := ⟨⟩ return .isNat mα c q(isNat_mul (f := $f) (.refl $f) $pa $pb (.refl $c)) core theorem isRat_div {α : Type u} [DivisionRing α] : {a b : α} → {cn : ℤ} → {cd : ℕ} → IsRat (a * b⁻¹) cn cd → IsRat (a / b) cn cd | _, _, _, _, h => by simpa [div_eq_mul_inv] using h /-- Helper function to synthesize a typed `DivisionRing α` expression. -/ def inferDivisionRing {u : Level} (α : Q(Type u)) : MetaM Q(DivisionRing $α) := return ← synthInstanceQ (q(DivisionRing $α) : Q(Type u)) <|> throwError "not a division ring" /-- The `norm_num` extension which identifies expressions of the form `a / b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num _ / _] def evalDiv : NormNumExt where eval {u α} e := do let .app (.app f (a : Q($α))) (b : Q($α)) ← whnfR e | failure let dα ← inferDivisionRing α haveI' : $e =Q $a / $b := ⟨⟩ guard <|← withNewMCtxDepth <| isDefEq f q(HDiv.hDiv (α := $α)) let rab ← derive (q($a * $b⁻¹) : Q($α)) let ⟨qa, na, da, pa⟩ ← rab.toRat' dα assumeInstancesCommute return .isRat' dα qa na da q(isRat_div $pa) /-! # Logic -/ /-- The `norm_num` extension which identifies `True`. -/ @[norm_num True] def evalTrue : NormNumExt where eval {u α} e := return (.isTrue q(True.intro) : Result q(True)) /-- The `norm_num` extension which identifies `False`. -/ @[norm_num False] def evalFalse : NormNumExt where eval {u α} e := return (.isFalse q(not_false) : Result q(False)) /-- The `norm_num` extension which identifies expressions of the form `¬a`, such that `norm_num` successfully recognises `a`. -/ @[norm_num ¬_] def evalNot : NormNumExt where eval {u α} e := do let .app (.const ``Not _) (a : Q(Prop)) ← whnfR e | failure guard <|← withNewMCtxDepth <| isDefEq α q(Prop) let ⟨b, p⟩ ← deriveBool q($a) match b with | true => return .isFalse q(not_not_intro $p) | false => return .isTrue q($p) /-! # (In)equalities -/ variable {α : Type u} theorem isNat_eq_true [AddMonoidWithOne α] : {a b : α} → {c : ℕ} → IsNat a c → IsNat b c → a = b | _, _, _, ⟨rfl⟩, ⟨rfl⟩ => rfl theorem ble_eq_false {x y : ℕ} : x.ble y = false ↔ y < x := by rw [← Nat.not_le, ← Bool.not_eq_true, Nat.ble_eq] theorem isInt_eq_true [Ring α] : {a b : α} → {z : ℤ} → IsInt a z → IsInt b z → a = b | _, _, _, ⟨rfl⟩, ⟨rfl⟩ => rfl theorem isRat_eq_true [Ring α] : {a b : α} → {n : ℤ} → {d : ℕ} → IsRat a n d → IsRat b n d → a = b | _, _, _, _, ⟨_, rfl⟩, ⟨_, rfl⟩ => by congr; apply Subsingleton.elim theorem eq_of_true {a b : Prop} (ha : a) (hb : b) : a = b := propext (iff_of_true ha hb) theorem ne_of_false_of_true {a b : Prop} (ha : ¬a) (hb : b) : a ≠ b := mt (· ▸ hb) ha theorem ne_of_true_of_false {a b : Prop} (ha : a) (hb : ¬b) : a ≠ b := mt (· ▸ ha) hb theorem eq_of_false {a b : Prop} (ha : ¬a) (hb : ¬b) : a = b := propext (iff_of_false ha hb) /-! # Nat operations -/ theorem isNat_natSucc : {a : ℕ} → {a' c : ℕ} → IsNat a a' → Nat.succ a' = c → IsNat (a.succ) c | _, _,_, ⟨rfl⟩, rfl => ⟨by simp⟩ /-- The `norm_num` extension which identifies expressions of the form `Nat.succ a`, such that `norm_num` successfully recognises `a`. -/ @[norm_num Nat.succ _] def evalNatSucc : NormNumExt where eval {u α} e := do let .app f (a : Q(ℕ)) ← whnfR e | failure guard <|← withNewMCtxDepth <| isDefEq f q(Nat.succ) haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q Nat.succ $a := ⟨⟩ let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ have nc : Q(ℕ) := mkRawNatLit (na.natLit!.succ) haveI' : $nc =Q ($na).succ := ⟨⟩ return .isNat sℕ nc q(isNat_natSucc $pa (.refl $nc)) theorem isNat_natSub : {a b : ℕ} → {a' b' c : ℕ} → IsNat a a' → IsNat b b' → Nat.sub a' b' = c → IsNat (a - b) c | _, _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨by simp⟩ /-- The `norm_num` extension which identifies expressions of the form `Nat.sub a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℕ) - _] def evalNatSub : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure -- We assert that the default instance for `HSub` is `Nat.sub` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HSub.hSub (α := ℕ)) haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a - $b := ⟨⟩ let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ have nc : Q(ℕ) := mkRawNatLit (na.natLit! - nb.natLit!) haveI' : Nat.sub $na $nb =Q $nc := ⟨⟩ return .isNat sℕ nc q(isNat_natSub $pa $pb (.refl $nc)) theorem isNat_natMod : {a b : ℕ} → {a' b' c : ℕ} → IsNat a a' → IsNat b b' → Nat.mod a' b' = c → IsNat (a % b) c | _, _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨by aesop⟩ /-- The `norm_num` extension which identifies expressions of the form `Nat.mod a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℕ) % _] def evalNatMod : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a % $b := ⟨⟩ -- We assert that the default instance for `HMod` is `Nat.mod` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HMod.hMod (α := ℕ)) let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ have nc : Q(ℕ) := mkRawNatLit (na.natLit! % nb.natLit!) haveI' : Nat.mod $na $nb =Q $nc := ⟨⟩ return .isNat sℕ nc q(isNat_natMod $pa $pb (.refl $nc)) theorem isNat_natDiv : {a b : ℕ} → {a' b' c : ℕ} → IsNat a a' → IsNat b b' → Nat.div a' b' = c → IsNat (a / b) c | _, _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨by aesop⟩ /-- The `norm_num` extension which identifies expressions of the form `Nat.div a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℕ) / _] def evalNatDiv : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a / $b := ⟨⟩ -- We assert that the default instance for `HDiv` is `Nat.div` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HDiv.hDiv (α := ℕ)) let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ have nc : Q(ℕ) := mkRawNatLit (na.natLit! / nb.natLit!) haveI' : Nat.div $na $nb =Q $nc := ⟨⟩ return .isNat sℕ nc q(isNat_natDiv $pa $pb (.refl $nc)) theorem isNat_dvd_true : {a b : ℕ} → {a' b' : ℕ} → IsNat a a' → IsNat b b' → Nat.mod b' a' = nat_lit 0 → a ∣ b | _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, e => Nat.dvd_of_mod_eq_zero e theorem isNat_dvd_false : {a b : ℕ} → {a' b' c : ℕ} → IsNat a a' → IsNat b b' → Nat.mod b' a' = Nat.succ c → ¬a ∣ b | _, _, _, _, c, ⟨rfl⟩, ⟨rfl⟩, e => mt Nat.mod_eq_zero_of_dvd (e.symm ▸ Nat.succ_ne_zero c :) /-- The `norm_num` extension which identifies expressions of the form `(a : ℕ) | b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℕ) ∣ _] def evalNatDvd : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure -- We assert that the default instance for `Dvd` is `Nat.dvd` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(Dvd.dvd (α := ℕ)) let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ match nb.natLit! % na.natLit! with | 0 => have : Q(Nat.mod $nb $na = nat_lit 0) := (q(Eq.refl (nat_lit 0)) : Expr) return .isTrue q(isNat_dvd_true $pa $pb $this) | c+1 => have nc : Q(ℕ) := mkRawNatLit c have : Q(Nat.mod $nb $na = Nat.succ $nc) := (q(Eq.refl (Nat.succ $nc)) : Expr) return .isFalse q(isNat_dvd_false $pa $pb $this) end NormNum end Meta end Mathlib
Tactic\NormNum\BigOperators.lean
/- Copyright (c) 2023 Anne Baanen. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Anne Baanen, Floris van Doorn -/ import Mathlib.Tactic.NormNum.Basic import Mathlib.Algebra.BigOperators.Group.Finset import Mathlib.Data.List.FinRange /-! # `norm_num` plugin for big operators This file adds `norm_num` plugins for `Finset.prod` and `Finset.sum`. The driving part of this plugin is `Mathlib.Meta.NormNum.evalFinsetBigop`. We repeatedly use `Finset.proveEmptyOrCons` to try to find a proof that the given set is empty, or that it consists of one element inserted into a strict subset, and evaluate the big operator on that subset until the set is completely exhausted. ## See also * The `fin_cases` tactic has similar scope: splitting out a finite collection into its elements. ## Porting notes This plugin is noticeably less powerful than the equivalent version in Mathlib 3: the design of `norm_num` means plugins have to return numerals, rather than a generic expression. In particular, we can't use the plugin on sums containing variables. (See also the TODO note "To support variables".) ## TODO * Support intervals: `Finset.Ico`, `Finset.Icc`, ... * To support variables, like in Mathlib 3, turn this into a standalone tactic that unfolds the sum/prod, without computing its numeric value (using the `ring` tactic to do some normalization?) -/ namespace Mathlib.Meta open Lean hiding Rat mkRat open Meta open Qq variable {u v : Level} /-- This represents the result of trying to determine whether the given expression `n : Q(ℕ)` is either `zero` or `succ`. -/ inductive Nat.UnifyZeroOrSuccResult (n : Q(ℕ)) /-- `n` unifies with `0` -/ | zero (pf : $n =Q 0) /-- `n` unifies with `succ n'` for this specific `n'` -/ | succ (n' : Q(ℕ)) (pf : $n =Q Nat.succ $n') /-- Determine whether the expression `n : Q(ℕ)` unifies with `0` or `Nat.succ n'`. We do not use `norm_num` functionality because we want definitional equality, not propositional equality, for use in dependent types. Fails if neither of the options succeed. -/ def Nat.unifyZeroOrSucc (n : Q(ℕ)) : MetaM (Nat.UnifyZeroOrSuccResult n) := do match ← isDefEqQ n q(0) with | .defEq pf => return .zero pf | .notDefEq => do let n' : Q(ℕ) ← mkFreshExprMVar q(ℕ) let ⟨(_pf : $n =Q Nat.succ $n')⟩ ← assertDefEqQ n q(Nat.succ $n') let (.some (n'_val : Q(ℕ))) ← getExprMVarAssignment? n'.mvarId! | throwError "could not figure out value of `?n` from `{n} =?= Nat.succ ?n`" pure (.succ n'_val ⟨⟩) /-- This represents the result of trying to determine whether the given expression `s : Q(List $α)` is either empty or consists of an element inserted into a strict subset. -/ inductive List.ProveNilOrConsResult {α : Q(Type u)} (s : Q(List $α)) /-- The set is Nil. -/ | nil (pf : Q($s = [])) /-- The set equals `a` inserted into the strict subset `s'`. -/ | cons (a : Q($α)) (s' : Q(List $α)) (pf : Q($s = List.cons $a $s')) /-- If `s` unifies with `t`, convert a result for `s` to a result for `t`. If `s` does not unify with `t`, this results in a type-incorrect proof. -/ def List.ProveNilOrConsResult.uncheckedCast {α : Q(Type u)} {β : Q(Type v)} (s : Q(List $α)) (t : Q(List $β)) : List.ProveNilOrConsResult s → List.ProveNilOrConsResult t | .nil pf => .nil pf | .cons a s' pf => .cons a s' pf /-- If `s = t` and we can get the result for `t`, then we can get the result for `s`. -/ def List.ProveNilOrConsResult.eq_trans {α : Q(Type u)} {s t : Q(List $α)} (eq : Q($s = $t)) : List.ProveNilOrConsResult t → List.ProveNilOrConsResult s | .nil pf => .nil q(Eq.trans $eq $pf) | .cons a s' pf => .cons a s' q(Eq.trans $eq $pf) lemma List.range_zero' {n : ℕ} (pn : NormNum.IsNat n 0) : List.range n = [] := by rw [pn.out, Nat.cast_zero, List.range_zero] lemma List.range_succ_eq_map' {n nn n' : ℕ} (pn : NormNum.IsNat n nn) (pn' : nn = Nat.succ n') : List.range n = 0 :: List.map Nat.succ (List.range n') := by rw [pn.out, Nat.cast_id, pn', List.range_succ_eq_map] set_option linter.unusedVariables false in /-- Either show the expression `s : Q(List α)` is Nil, or remove one element from it. Fails if we cannot determine which of the alternatives apply to the expression. -/ partial def List.proveNilOrCons {u : Level} {α : Q(Type u)} (s : Q(List $α)) : MetaM (List.ProveNilOrConsResult s) := s.withApp fun e a => match (e, e.constName, a) with | (_, ``EmptyCollection.emptyCollection, _) => haveI : $s =Q {} := ⟨⟩; pure (.nil q(.refl [])) | (_, ``List.nil, _) => haveI : $s =Q [] := ⟨⟩; pure (.nil q(rfl)) | (_, ``List.cons, #[_, (a : Q($α)), (s' : Q(List $α))]) => haveI : $s =Q $a :: $s' := ⟨⟩; pure (.cons a s' q(rfl)) | (_, ``List.range, #[(n : Q(ℕ))]) => have s : Q(List ℕ) := s; .uncheckedCast _ _ <$> show MetaM (ProveNilOrConsResult s) from do let ⟨nn, pn⟩ ← NormNum.deriveNat n _ haveI' : $s =Q .range $n := ⟨⟩ let nnL := nn.natLit! if nnL = 0 then haveI' : $nn =Q 0 := ⟨⟩ return .nil q(List.range_zero' $pn) else have n' : Q(ℕ) := mkRawNatLit (nnL - 1) have : $nn =Q .succ $n' := ⟨⟩ return .cons _ _ q(List.range_succ_eq_map' $pn (.refl $nn)) | (_, ``List.finRange, #[(n : Q(ℕ))]) => have s : Q(List (Fin $n)) := s .uncheckedCast _ _ <$> show MetaM (ProveNilOrConsResult s) from do haveI' : $s =Q .finRange $n := ⟨⟩ return match ← Nat.unifyZeroOrSucc n with -- We want definitional equality on `n`. | .zero _pf => .nil q(List.finRange_zero) | .succ n' _pf => .cons _ _ q(List.finRange_succ_eq_map $n') | (.const ``List.map [v, _], _, #[(β : Q(Type v)), _, (f : Q($β → $α)), (xxs : Q(List $β))]) => do haveI' : $s =Q ($xxs).map $f := ⟨⟩ return match ← List.proveNilOrCons xxs with | .nil pf => .nil q(($pf ▸ List.map_nil : List.map _ _ = _)) | .cons x xs pf => .cons q($f $x) q(($xs).map $f) q(($pf ▸ List.map_cons $f $x $xs : List.map _ _ = _)) | (_, fn, args) => throwError "List.proveNilOrCons: unsupported List expression {s} ({fn}, {args})" /-- This represents the result of trying to determine whether the given expression `s : Q(Multiset $α)` is either empty or consists of an element inserted into a strict subset. -/ inductive Multiset.ProveZeroOrConsResult {α : Q(Type u)} (s : Q(Multiset $α)) /-- The set is zero. -/ | zero (pf : Q($s = 0)) /-- The set equals `a` inserted into the strict subset `s'`. -/ | cons (a : Q($α)) (s' : Q(Multiset $α)) (pf : Q($s = Multiset.cons $a $s')) /-- If `s` unifies with `t`, convert a result for `s` to a result for `t`. If `s` does not unify with `t`, this results in a type-incorrect proof. -/ def Multiset.ProveZeroOrConsResult.uncheckedCast {α : Q(Type u)} {β : Q(Type v)} (s : Q(Multiset $α)) (t : Q(Multiset $β)) : Multiset.ProveZeroOrConsResult s → Multiset.ProveZeroOrConsResult t | .zero pf => .zero pf | .cons a s' pf => .cons a s' pf /-- If `s = t` and we can get the result for `t`, then we can get the result for `s`. -/ def Multiset.ProveZeroOrConsResult.eq_trans {α : Q(Type u)} {s t : Q(Multiset $α)} (eq : Q($s = $t)) : Multiset.ProveZeroOrConsResult t → Multiset.ProveZeroOrConsResult s | .zero pf => .zero q(Eq.trans $eq $pf) | .cons a s' pf => .cons a s' q(Eq.trans $eq $pf) lemma Multiset.insert_eq_cons {α : Type*} (a : α) (s : Multiset α) : insert a s = Multiset.cons a s := rfl lemma Multiset.range_zero' {n : ℕ} (pn : NormNum.IsNat n 0) : Multiset.range n = 0 := by rw [pn.out, Nat.cast_zero, Multiset.range_zero] lemma Multiset.range_succ' {n nn n' : ℕ} (pn : NormNum.IsNat n nn) (pn' : nn = Nat.succ n') : Multiset.range n = n' ::ₘ Multiset.range n' := by rw [pn.out, Nat.cast_id, pn', Multiset.range_succ] /-- Either show the expression `s : Q(Multiset α)` is Zero, or remove one element from it. Fails if we cannot determine which of the alternatives apply to the expression. -/ partial def Multiset.proveZeroOrCons {α : Q(Type u)} (s : Q(Multiset $α)) : MetaM (Multiset.ProveZeroOrConsResult s) := match s.getAppFnArgs with | (``EmptyCollection.emptyCollection, _) => haveI : $s =Q {} := ⟨⟩; pure (.zero q(rfl)) | (``Zero.zero, _) => haveI : $s =Q 0 := ⟨⟩; pure (.zero q(rfl)) | (``Multiset.cons, #[_, (a : Q($α)), (s' : Q(Multiset $α))]) => haveI : $s =Q .cons $a $s' := ⟨⟩ pure (.cons a s' q(rfl)) | (``Multiset.ofList, #[_, (val : Q(List $α))]) => do haveI : $s =Q .ofList $val := ⟨⟩ return match ← List.proveNilOrCons val with | .nil pf => .zero q($pf ▸ Multiset.coe_nil : Multiset.ofList _ = _) | .cons a s' pf => .cons a q($s') q($pf ▸ Multiset.cons_coe $a $s' : Multiset.ofList _ = _) | (``Multiset.range, #[(n : Q(ℕ))]) => do have s : Q(Multiset ℕ) := s; .uncheckedCast _ _ <$> show MetaM (ProveZeroOrConsResult s) from do let ⟨nn, pn⟩ ← NormNum.deriveNat n _ haveI' : $s =Q .range $n := ⟨⟩ let nnL := nn.natLit! if nnL = 0 then haveI' : $nn =Q 0 := ⟨⟩ return .zero q(Multiset.range_zero' $pn) else have n' : Q(ℕ) := mkRawNatLit (nnL - 1) haveI' : $nn =Q ($n').succ := ⟨⟩ return .cons _ _ q(Multiset.range_succ' $pn rfl) | (fn, args) => throwError "Multiset.proveZeroOrCons: unsupported multiset expression {s} ({fn}, {args})" /-- This represents the result of trying to determine whether the given expression `s : Q(Finset $α)` is either empty or consists of an element inserted into a strict subset. -/ inductive Finset.ProveEmptyOrConsResult {α : Q(Type u)} (s : Q(Finset $α)) /-- The set is empty. -/ | empty (pf : Q($s = ∅)) /-- The set equals `a` inserted into the strict subset `s'`. -/ | cons (a : Q($α)) (s' : Q(Finset $α)) (h : Q($a ∉ $s')) (pf : Q($s = Finset.cons $a $s' $h)) /-- If `s` unifies with `t`, convert a result for `s` to a result for `t`. If `s` does not unify with `t`, this results in a type-incorrect proof. -/ def Finset.ProveEmptyOrConsResult.uncheckedCast {α : Q(Type u)} {β : Q(Type v)} (s : Q(Finset $α)) (t : Q(Finset $β)) : Finset.ProveEmptyOrConsResult s → Finset.ProveEmptyOrConsResult t | .empty pf => .empty pf | .cons a s' h pf => .cons a s' h pf /-- If `s = t` and we can get the result for `t`, then we can get the result for `s`. -/ def Finset.ProveEmptyOrConsResult.eq_trans {α : Q(Type u)} {s t : Q(Finset $α)} (eq : Q($s = $t)) : Finset.ProveEmptyOrConsResult t → Finset.ProveEmptyOrConsResult s | .empty pf => .empty q(Eq.trans $eq $pf) | .cons a s' h pf => .cons a s' h q(Eq.trans $eq $pf) lemma Finset.insert_eq_cons {α : Type*} [DecidableEq α] (a : α) (s : Finset α) (h : a ∉ s) : insert a s = Finset.cons a s h := by ext; simp lemma Finset.range_zero' {n : ℕ} (pn : NormNum.IsNat n 0) : Finset.range n = {} := by rw [pn.out, Nat.cast_zero, Finset.range_zero] lemma Finset.range_succ' {n nn n' : ℕ} (pn : NormNum.IsNat n nn) (pn' : nn = Nat.succ n') : Finset.range n = Finset.cons n' (Finset.range n') Finset.not_mem_range_self := by rw [pn.out, Nat.cast_id, pn', Finset.range_succ, Finset.insert_eq_cons] lemma Finset.univ_eq_elems {α : Type*} [Fintype α] (elems : Finset α) (complete : ∀ x : α, x ∈ elems) : Finset.univ = elems := by ext x; simpa using complete x /-- Either show the expression `s : Q(Finset α)` is empty, or remove one element from it. Fails if we cannot determine which of the alternatives apply to the expression. -/ partial def Finset.proveEmptyOrCons {α : Q(Type u)} (s : Q(Finset $α)) : MetaM (ProveEmptyOrConsResult s) := match s.getAppFnArgs with | (``EmptyCollection.emptyCollection, _) => haveI : $s =Q {} := ⟨⟩; pure (.empty q(rfl)) | (``Finset.cons, #[_, (a : Q($α)), (s' : Q(Finset $α)), (h : Q(¬ $a ∈ $s'))]) => haveI : $s =Q .cons $a $s' $h := ⟨⟩ pure (.cons a s' h q(.refl $s)) | (``Finset.mk, #[_, (val : Q(Multiset $α)), (nd : Q(Multiset.Nodup $val))]) => do match ← Multiset.proveZeroOrCons val with | .zero pf => pure <| .empty (q($pf ▸ Finset.mk_zero) : Q(Finset.mk $val $nd = ∅)) | .cons a s' pf => do let h : Q(Multiset.Nodup ($a ::ₘ $s')) := q($pf ▸ $nd) let nd' : Q(Multiset.Nodup $s') := q((Multiset.nodup_cons.mp $h).2) let h' : Q($a ∉ $s') := q((Multiset.nodup_cons.mp $h).1) return (.cons a q(Finset.mk $s' $nd') h' (q($pf ▸ Finset.mk_cons $h) : Q(Finset.mk $val $nd = Finset.cons $a ⟨$s', $nd'⟩ $h'))) | (``Finset.range, #[(n : Q(ℕ))]) => have s : Q(Finset ℕ) := s; .uncheckedCast _ _ <$> show MetaM (ProveEmptyOrConsResult s) from do let ⟨nn, pn⟩ ← NormNum.deriveNat n _ haveI' : $s =Q .range $n := ⟨⟩ let nnL := nn.natLit! if nnL = 0 then haveI : $nn =Q 0 := ⟨⟩ return .empty q(Finset.range_zero' $pn) else have n' : Q(ℕ) := mkRawNatLit (nnL - 1) haveI' : $nn =Q ($n').succ := ⟨⟩ return .cons n' _ _ q(Finset.range_succ' $pn (.refl $nn)) | (``Finset.univ, #[_, (instFT : Q(Fintype $α))]) => do haveI' : $s =Q .univ := ⟨⟩ match (← whnfI instFT).getAppFnArgs with | (``Fintype.mk, #[_, (elems : Q(Finset $α)), (complete : Q(∀ x : $α, x ∈ $elems))]) => do let res ← Finset.proveEmptyOrCons elems pure <| res.eq_trans q(Finset.univ_eq_elems $elems $complete) | e => throwError "Finset.proveEmptyOrCons: could not determine elements of Fintype instance {e}" | (fn, args) => throwError "Finset.proveEmptyOrCons: unsupported finset expression {s} ({fn}, {args})" namespace NormNum /-- If `a = b` and we can evaluate `b`, then we can evaluate `a`. -/ def Result.eq_trans {α : Q(Type u)} {a b : Q($α)} (eq : Q($a = $b)) : Result b → Result a | .isBool true proof => have a : Q(Prop) := a have b : Q(Prop) := b have eq : Q($a = $b) := eq have proof : Q($b) := proof Result.isTrue (x := a) q($eq ▸ $proof) | .isBool false proof => have a : Q(Prop) := a have b : Q(Prop) := b have eq : Q($a = $b) := eq have proof : Q(¬ $b) := proof Result.isFalse (x := a) q($eq ▸ $proof) | .isNat inst lit proof => Result.isNat inst lit q($eq ▸ $proof) | .isNegNat inst lit proof => Result.isNegNat inst lit q($eq ▸ $proof) | .isRat inst q n d proof => Result.isRat inst q n d q($eq ▸ $proof) protected lemma Finset.sum_empty {β α : Type*} [CommSemiring β] (f : α → β) : IsNat (Finset.sum ∅ f) 0 := ⟨by simp⟩ protected lemma Finset.prod_empty {β α : Type*} [CommSemiring β] (f : α → β) : IsNat (Finset.prod ∅ f) 1 := ⟨by simp⟩ /-- Evaluate a big operator applied to a finset by repeating `proveEmptyOrCons` until we exhaust all elements of the set. -/ partial def evalFinsetBigop {α : Q(Type u)} {β : Q(Type v)} (op : Q(Finset $α → ($α → $β) → $β)) (f : Q($α → $β)) (res_empty : Result q($op Finset.empty $f)) (res_cons : {a : Q($α)} -> {s' : Q(Finset $α)} -> {h : Q($a ∉ $s')} -> Result (α := β) q($f $a) -> Result (α := β) q($op $s' $f) -> MetaM (Result (α := β) q($op (Finset.cons $a $s' $h) $f))) : (s : Q(Finset $α)) → MetaM (Result (α := β) q($op $s $f)) | s => do match ← Finset.proveEmptyOrCons s with | .empty pf => pure <| res_empty.eq_trans q(congr_fun (congr_arg _ $pf) _) | .cons a s' h pf => do let fa : Q($β) := Expr.betaRev f #[a] let res_fa ← derive fa let res_op_s' : Result q($op $s' $f) ← evalFinsetBigop op f res_empty @res_cons s' let res ← res_cons res_fa res_op_s' let eq : Q($op $s $f = $op (Finset.cons $a $s' $h) $f) := q(congr_fun (congr_arg _ $pf) _) pure (res.eq_trans eq) /-- `norm_num` plugin for evaluating products of finsets. If your finset is not supported, you can add it to the match in `Finset.proveEmptyOrCons`. -/ @[norm_num @Finset.prod _ _ _ _ _] partial def evalFinsetProd : NormNumExt where eval {u β} e := do let .app (.app (.app (.app (.app (.const ``Finset.prod [v, _]) α) β') _) s) f ← whnfR e | failure guard <| ← withNewMCtxDepth <| isDefEq β β' have α : Q(Type v) := α have s : Q(Finset $α) := s have f : Q($α → $β) := f let instCS : Q(CommSemiring $β) ← synthInstanceQ q(CommSemiring $β) <|> throwError "not a commutative semiring: {β}" let instS : Q(Semiring $β) := q(CommSemiring.toSemiring) -- Have to construct this expression manually, `q(1)` doesn't parse correctly: let n : Q(ℕ) := .lit (.natVal 1) let pf : Q(IsNat (Finset.prod ∅ $f) $n) := q(@Finset.prod_empty $β $α $instCS $f) let res_empty := Result.isNat _ n pf evalFinsetBigop q(Finset.prod) f res_empty (fun {a s' h} res_fa res_prod_s' ↦ do let fa : Q($β) := Expr.app f a let res ← evalMul.core q($fa * Finset.prod $s' $f) q(HMul.hMul) _ _ instS res_fa res_prod_s' let eq : Q(Finset.prod (Finset.cons $a $s' $h) $f = $fa * Finset.prod $s' $f) := q(Finset.prod_cons $h) pure <| res.eq_trans eq) s /-- `norm_num` plugin for evaluating sums of finsets. If your finset is not supported, you can add it to the match in `Finset.proveEmptyOrCons`. -/ @[norm_num @Finset.sum _ _ _ _ _] partial def evalFinsetSum : NormNumExt where eval {u β} e := do let .app (.app (.app (.app (.app (.const ``Finset.sum [v, _]) α) β') _) s) f ← whnfR e | failure guard <| ← withNewMCtxDepth <| isDefEq β β' have α : Q(Type v) := α have s : Q(Finset $α) := s have f : Q($α → $β) := f let instCS : Q(CommSemiring $β) ← synthInstanceQ q(CommSemiring $β) <|> throwError "not a commutative semiring: {β}" let n : Q(ℕ) := mkRawNatLit 0 let pf : Q(IsNat (Finset.sum ∅ $f) $n) := q(@Finset.sum_empty $β $α $instCS $f) let res_empty := Result.isNat _ n pf evalFinsetBigop q(Finset.sum) f res_empty (fun {a s' h} res_fa res_sum_s' ↦ do let fa : Q($β) := Expr.app f a let res ← evalAdd.core q($fa + Finset.sum $s' $f) q(HAdd.hAdd) _ _ res_fa res_sum_s' let eq : Q(Finset.sum (Finset.cons $a $s' $h) $f = $fa + Finset.sum $s' $f) := q(Finset.sum_cons $h) pure <| res.eq_trans eq) s end NormNum end Meta end Mathlib
Tactic\NormNum\Core.lean
/- Copyright (c) 2022 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ import Mathlib.Tactic.NormNum.Result import Mathlib.Util.Qq import Lean.Elab.Tactic.Location /-! ## `norm_num` core functionality This file sets up the `norm_num` tactic and the `@[norm_num]` attribute, which allow for plugging in new normalization functionality around a simp-based driver. The actual behavior is in `@[norm_num]`-tagged definitions in `Tactic.NormNum.Basic` and elsewhere. -/ open Lean hiding Rat mkRat open Lean.Meta Qq Lean.Elab Term /-- Attribute for identifying `norm_num` extensions. -/ syntax (name := norm_num) "norm_num " term,+ : attr namespace Mathlib namespace Meta.NormNum initialize registerTraceClass `Tactic.norm_num /-- An extension for `norm_num`. -/ structure NormNumExt where /-- The extension should be run in the `pre` phase when used as simp plugin. -/ pre := true /-- The extension should be run in the `post` phase when used as simp plugin. -/ post := true /-- Attempts to prove an expression is equal to some explicit number of the relevant type. -/ eval {u : Level} {α : Q(Type u)} (e : Q($α)) : MetaM (Result e) /-- The name of the `norm_num` extension. -/ name : Name := by exact decl_name% variable {u : Level} /-- Read a `norm_num` extension from a declaration of the right type. -/ def mkNormNumExt (n : Name) : ImportM NormNumExt := do let { env, opts, .. } ← read IO.ofExcept <| unsafe env.evalConstCheck NormNumExt opts ``NormNumExt n /-- Each `norm_num` extension is labelled with a collection of patterns which determine the expressions to which it should be applied. -/ abbrev Entry := Array (Array DiscrTree.Key) × Name /-- The state of the `norm_num` extension environment -/ structure NormNums where /-- The tree of `norm_num` extensions. -/ tree : DiscrTree NormNumExt := {} /-- Erased `norm_num`s. -/ erased : PHashSet Name := {} deriving Inhabited /-- Configuration for `DiscrTree`. -/ def discrTreeConfig : WhnfCoreConfig := {} /-- Environment extensions for `norm_num` declarations -/ initialize normNumExt : ScopedEnvExtension Entry (Entry × NormNumExt) NormNums ← -- we only need this to deduplicate entries in the DiscrTree have : BEq NormNumExt := ⟨fun _ _ ↦ false⟩ /- Insert `v : NormNumExt` into the tree `dt` on all key sequences given in `kss`. -/ let insert kss v dt := kss.foldl (fun dt ks ↦ dt.insertCore ks v) dt registerScopedEnvExtension { mkInitial := pure {} ofOLeanEntry := fun _ e@(_, n) ↦ return (e, ← mkNormNumExt n) toOLeanEntry := (·.1) addEntry := fun { tree, erased } ((kss, n), ext) ↦ { tree := insert kss ext tree, erased := erased.erase n } } /-- Run each registered `norm_num` extension on an expression, returning a `NormNum.Result`. -/ def derive {α : Q(Type u)} (e : Q($α)) (post := false) : MetaM (Result e) := do if e.isRawNatLit then let lit : Q(ℕ) := e return .isNat (q(instAddMonoidWithOneNat) : Q(AddMonoidWithOne ℕ)) lit (q(IsNat.raw_refl $lit) : Expr) profileitM Exception "norm_num" (← getOptions) do let s ← saveState let normNums := normNumExt.getState (← getEnv) let arr ← normNums.tree.getMatch e discrTreeConfig for ext in arr do if (bif post then ext.post else ext.pre) && ! normNums.erased.contains ext.name then try let new ← withReducibleAndInstances <| ext.eval e trace[Tactic.norm_num] "{ext.name}:\n{e} ==> {new}" return new catch err => trace[Tactic.norm_num] "{e} failed: {err.toMessageData}" s.restore throwError "{e}: no norm_nums apply" /-- Run each registered `norm_num` extension on a typed expression `e : α`, returning a typed expression `lit : ℕ`, and a proof of `isNat e lit`. -/ def deriveNat {α : Q(Type u)} (e : Q($α)) (_inst : Q(AddMonoidWithOne $α) := by with_reducible assumption) : MetaM ((lit : Q(ℕ)) × Q(IsNat $e $lit)) := do let .isNat _ lit proof ← derive e | failure pure ⟨lit, proof⟩ /-- Run each registered `norm_num` extension on a typed expression `e : α`, returning a typed expression `lit : ℤ`, and a proof of `IsInt e lit` in expression form. -/ def deriveInt {α : Q(Type u)} (e : Q($α)) (_inst : Q(Ring $α) := by with_reducible assumption) : MetaM ((lit : Q(ℤ)) × Q(IsInt $e $lit)) := do let some ⟨_, lit, proof⟩ := (← derive e).toInt | failure pure ⟨lit, proof⟩ /-- Run each registered `norm_num` extension on a typed expression `e : α`, returning a rational number, typed expressions `n : ℤ` and `d : ℕ` for the numerator and denominator, and a proof of `IsRat e n d` in expression form. -/ def deriveRat {α : Q(Type u)} (e : Q($α)) (_inst : Q(DivisionRing $α) := by with_reducible assumption) : MetaM (ℚ × (n : Q(ℤ)) × (d : Q(ℕ)) × Q(IsRat $e $n $d)) := do let some res := (← derive e).toRat' | failure pure res /-- Run each registered `norm_num` extension on a typed expression `p : Prop`, and returning the truth or falsity of `p' : Prop` from an equivalence `p ↔ p'`. -/ def deriveBool (p : Q(Prop)) : MetaM ((b : Bool) × BoolResult p b) := do let .isBool b prf ← derive (α := (q(Prop) : Q(Type))) p | failure pure ⟨b, prf⟩ /-- Run each registered `norm_num` extension on a typed expression `p : Prop`, and returning the truth or falsity of `p' : Prop` from an equivalence `p ↔ p'`. -/ def deriveBoolOfIff (p p' : Q(Prop)) (hp : Q($p ↔ $p')) : MetaM ((b : Bool) × BoolResult p' b) := do let ⟨b, pb⟩ ← deriveBool p match b with | true => return ⟨true, q(Iff.mp $hp $pb)⟩ | false => return ⟨false, q((Iff.not $hp).mp $pb)⟩ /-- Run each registered `norm_num` extension on an expression, returning a `Simp.Result`. -/ def eval (e : Expr) (post := false) : MetaM Simp.Result := do if e.isExplicitNumber then return { expr := e } let ⟨_, _, e⟩ ← inferTypeQ' e (← derive e post).toSimpResult /-- Erases a name marked `norm_num` by adding it to the state's `erased` field and removing it from the state's list of `Entry`s. -/ def NormNums.eraseCore (d : NormNums) (declName : Name) : NormNums := { d with erased := d.erased.insert declName } /-- Erase a name marked as a `norm_num` attribute. Check that it does in fact have the `norm_num` attribute by making sure it names a `NormNumExt` found somewhere in the state's tree, and is not erased. -/ def NormNums.erase {m : Type → Type} [Monad m] [MonadError m] (d : NormNums) (declName : Name) : m NormNums := do unless d.tree.values.any (·.name == declName) && ! d.erased.contains declName do throwError "'{declName}' does not have [norm_num] attribute" return d.eraseCore declName initialize registerBuiltinAttribute { name := `norm_num descr := "adds a norm_num extension" applicationTime := .afterCompilation add := fun declName stx kind ↦ match stx with | `(attr| norm_num $es,*) => do let env ← getEnv unless (env.getModuleIdxFor? declName).isNone do throwError "invalid attribute 'norm_num', declaration is in an imported module" if (IR.getSorryDep env declName).isSome then return -- ignore in progress definitions let ext ← mkNormNumExt declName let keys ← MetaM.run' <| es.getElems.mapM fun stx ↦ do let e ← TermElabM.run' <| withSaveInfoContext <| withAutoBoundImplicit <| withReader ({ · with ignoreTCFailures := true }) do let e ← elabTerm stx none let (_, _, e) ← lambdaMetaTelescope (← mkLambdaFVars (← getLCtx).getFVars e) return e DiscrTree.mkPath e discrTreeConfig normNumExt.add ((keys, declName), ext) kind | _ => throwUnsupportedSyntax erase := fun declName => do let s := normNumExt.getState (← getEnv) let s ← s.erase declName modifyEnv fun env => normNumExt.modifyState env fun _ => s } /-- A simp plugin which calls `NormNum.eval`. -/ def tryNormNum (post := false) (e : Expr) : SimpM Simp.Step := do try return .done (← eval e post) catch _ => return .continue variable (ctx : Simp.Context) (useSimp := true) in mutual /-- A discharger which calls `norm_num`. -/ partial def discharge (e : Expr) : SimpM (Option Expr) := do (← deriveSimp e).ofTrue /-- A `Methods` implementation which calls `norm_num`. -/ partial def methods : Simp.Methods := if useSimp then { pre := Simp.preDefault #[] >> tryNormNum post := Simp.postDefault #[] >> tryNormNum (post := true) discharge? := discharge } else { pre := tryNormNum post := tryNormNum (post := true) discharge? := discharge } /-- Traverses the given expression using simp and normalises any numbers it finds. -/ partial def deriveSimp (e : Expr) : MetaM Simp.Result := (·.1) <$> Simp.main e ctx (methods := methods) end -- FIXME: had to inline a bunch of stuff from `simpGoal` here /-- The core of `norm_num` as a tactic in `MetaM`. * `g`: The goal to simplify * `ctx`: The simp context, constructed by `mkSimpContext` and containing any additional simp rules we want to use * `fvarIdsToSimp`: The selected set of hypotheses used in the location argument * `simplifyTarget`: true if the target is selected in the location argument * `useSimp`: true if we used `norm_num` instead of `norm_num1` -/ def normNumAt (g : MVarId) (ctx : Simp.Context) (fvarIdsToSimp : Array FVarId) (simplifyTarget := true) (useSimp := true) : MetaM (Option (Array FVarId × MVarId)) := g.withContext do g.checkNotAssigned `norm_num let mut g := g let mut toAssert := #[] let mut replaced := #[] for fvarId in fvarIdsToSimp do let localDecl ← fvarId.getDecl let type ← instantiateMVars localDecl.type let ctx := { ctx with simpTheorems := ctx.simpTheorems.eraseTheorem (.fvar localDecl.fvarId) } let r ← deriveSimp ctx useSimp type match r.proof? with | some _ => let some (value, type) ← applySimpResultToProp g (mkFVar fvarId) type r | return none toAssert := toAssert.push { userName := localDecl.userName, type, value } | none => if r.expr.isConstOf ``False then g.assign (← mkFalseElim (← g.getType) (mkFVar fvarId)) return none g ← g.replaceLocalDeclDefEq fvarId r.expr replaced := replaced.push fvarId if simplifyTarget then let res ← g.withContext do let target ← instantiateMVars (← g.getType) let r ← deriveSimp ctx useSimp target let some proof ← r.ofTrue | some <$> applySimpResultToTarget g target r g.assign proof pure none let some gNew := res | return none g := gNew let (fvarIdsNew, gNew) ← g.assertHypotheses toAssert let toClear := fvarIdsToSimp.filter fun fvarId ↦ !replaced.contains fvarId let gNew ← gNew.tryClearMany toClear return some (fvarIdsNew, gNew) open Tactic in /-- Constructs a simp context from the simp argument syntax. -/ def getSimpContext (cfg args : Syntax) (simpOnly := false) : TacticM Simp.Context := do let config ← elabSimpConfigCore cfg let simpTheorems ← if simpOnly then simpOnlyBuiltins.foldlM (·.addConst ·) {} else getSimpTheorems let mut { ctx, simprocs := _, starArg } ← elabSimpArgs args[0] (eraseLocal := false) (kind := .simp) (simprocs := {}) { config, simpTheorems := #[simpTheorems], congrTheorems := ← getSimpCongrTheorems } unless starArg do return ctx let mut simpTheorems := ctx.simpTheorems for h in ← getPropHyps do unless simpTheorems.isErased (.fvar h) do simpTheorems ← simpTheorems.addTheorem (.fvar h) (← h.getDecl).toExpr pure { ctx with simpTheorems } open Elab.Tactic in /-- Elaborates a call to `norm_num only? [args]` or `norm_num1`. * `args`: the `(simpArgs)?` syntax for simp arguments * `loc`: the `(location)?` syntax for the optional location argument * `simpOnly`: true if `only` was used in `norm_num` * `useSimp`: false if `norm_num1` was used, in which case only the structural parts of `simp` will be used, not any of the post-processing that `simp only` does without lemmas -/ -- FIXME: had to inline a bunch of stuff from `mkSimpContext` and `simpLocation` here def elabNormNum (cfg args loc : Syntax) (simpOnly := false) (useSimp := true) : TacticM Unit := do let ctx ← getSimpContext cfg args (!useSimp || simpOnly) let g ← getMainGoal let res ← match expandOptLocation loc with | .targets hyps simplifyTarget => normNumAt g ctx (← getFVarIds hyps) simplifyTarget useSimp | .wildcard => normNumAt g ctx (← g.getNondepPropHyps) (simplifyTarget := true) useSimp match res with | none => replaceMainGoal [] | some (_, g) => replaceMainGoal [g] end Meta.NormNum namespace Tactic open Lean.Parser.Tactic Meta.NormNum /-- Normalize numerical expressions. Supports the operations `+` `-` `*` `/` `⁻¹` `^` and `%` over numerical types such as `ℕ`, `ℤ`, `ℚ`, `ℝ`, `ℂ` and some general algebraic types, and can prove goals of the form `A = B`, `A ≠ B`, `A < B` and `A ≤ B`, where `A` and `B` are numerical expressions. It also has a relatively simple primality prover. -/ elab (name := normNum) "norm_num" cfg:(config ?) only:&" only"? args:(simpArgs ?) loc:(location ?) : tactic => elabNormNum cfg args loc (simpOnly := only.isSome) (useSimp := true) /-- Basic version of `norm_num` that does not call `simp`. -/ elab (name := normNum1) "norm_num1" loc:(location ?) : tactic => elabNormNum mkNullNode mkNullNode loc (simpOnly := true) (useSimp := false) open Lean Elab Tactic @[inherit_doc normNum1] syntax (name := normNum1Conv) "norm_num1" : conv /-- Elaborator for `norm_num1` conv tactic. -/ @[tactic normNum1Conv] def elabNormNum1Conv : Tactic := fun _ ↦ withMainContext do let ctx ← getSimpContext mkNullNode mkNullNode true Conv.applySimpResult (← deriveSimp ctx (← instantiateMVars (← Conv.getLhs)) (useSimp := false)) @[inherit_doc normNum] syntax (name := normNumConv) "norm_num" (config)? &" only"? (simpArgs)? : conv /-- Elaborator for `norm_num` conv tactic. -/ @[tactic normNumConv] def elabNormNumConv : Tactic := fun stx ↦ withMainContext do let ctx ← getSimpContext stx[1] stx[3] !stx[2].isNone Conv.applySimpResult (← deriveSimp ctx (← instantiateMVars (← Conv.getLhs)) (useSimp := true)) /-- The basic usage is `#norm_num e`, where `e` is an expression, which will print the `norm_num` form of `e`. Syntax: `#norm_num` (`only`)? (`[` simp lemma list `]`)? `:`? expression This accepts the same options as the `#simp` command. You can specify additional simp lemmas as usual, for example using `#norm_num [f, g] : e`. (The colon is optional but helpful for the parser.) The `only` restricts `norm_num` to using only the provided lemmas, and so `#norm_num only : e` behaves similarly to `norm_num1`. Unlike `norm_num`, this command does not fail when no simplifications are made. `#norm_num` understands local variables, so you can use them to introduce parameters. -/ macro (name := normNumCmd) "#norm_num" cfg:(config)? o:(&" only")? args:(Parser.Tactic.simpArgs)? " :"? ppSpace e:term : command => `(command| #conv norm_num $(cfg)? $[only%$o]? $(args)? => $e)
Tactic\NormNum\DivMod.lean
/- Copyright (c) 2023 Anne Baanen. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Anne Baanen, Mario Carneiro -/ import Mathlib.Tactic.NormNum.Basic import Mathlib.Tactic.NormNum.Ineq /-! # `norm_num` extension for integer div/mod and divides This file adds support for the `%`, `/`, and `∣` (divisibility) operators on `ℤ` to the `norm_num` tactic. -/ namespace Mathlib open Lean hiding Rat mkRat open Meta namespace Meta.NormNum open Qq lemma isInt_ediv_zero : ∀ {a b r : ℤ}, IsInt a r → IsNat b (nat_lit 0) → IsNat (a / b) (nat_lit 0) | _, _, _, ⟨rfl⟩, ⟨rfl⟩ => ⟨by simp [Int.ediv_zero]⟩ lemma isInt_ediv {a b q m a' : ℤ} {b' r : ℕ} (ha : IsInt a a') (hb : IsNat b b') (hm : q * b' = m) (h : r + m = a') (h₂ : Nat.blt r b' = true) : IsInt (a / b) q := ⟨by obtain ⟨⟨rfl⟩, ⟨rfl⟩⟩ := ha, hb simp only [Nat.blt_eq] at h₂; simp only [← h, ← hm, Int.cast_id] rw [Int.add_mul_ediv_right _ _ (Int.ofNat_ne_zero.2 ((Nat.zero_le ..).trans_lt h₂).ne')] rw [Int.ediv_eq_zero_of_lt, zero_add] <;> [simp; simpa using h₂]⟩ lemma isInt_ediv_neg {a b q q' : ℤ} (h : IsInt (a / -b) q) (hq : -q = q') : IsInt (a / b) q' := ⟨by rw [Int.cast_id, ← hq, ← @Int.cast_id q, ← h.out, ← Int.ediv_neg, Int.neg_neg]⟩ lemma isNat_neg_of_isNegNat {a : ℤ} {b : ℕ} (h : IsInt a (.negOfNat b)) : IsNat (-a) b := ⟨by simp [h.out]⟩ /-- The `norm_num` extension which identifies expressions of the form `Int.ediv a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℤ) / _, Int.ediv _ _] partial def evalIntDiv : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure -- We assert that the default instance for `HDiv` is `Int.div` when the first parameter is `ℤ`. guard <|← withNewMCtxDepth <| isDefEq f q(HDiv.hDiv (α := ℤ)) haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℤ := ⟨⟩ haveI' : $e =Q ($a / $b) := ⟨⟩ let rℤ : Q(Ring ℤ) := q(Int.instRing) let ⟨za, na, pa⟩ ← (← derive a).toInt rℤ match ← derive (u := .zero) b with | .isNat inst nb pb => assumeInstancesCommute if nb.natLit! == 0 then have _ : $nb =Q nat_lit 0 := ⟨⟩ return .isNat q(instAddMonoidWithOne) q(nat_lit 0) q(isInt_ediv_zero $pa $pb) else let ⟨zq, q, p⟩ := core a na za pa b nb pb return .isInt rℤ q zq p | .isNegNat _ nb pb => assumeInstancesCommute let ⟨zq, q, p⟩ := core a na za pa q(-$b) nb q(isNat_neg_of_isNegNat $pb) have q' := mkRawIntLit (-zq) have : Q(-$q = $q') := (q(Eq.refl $q') :) return .isInt rℤ q' (-zq) q(isInt_ediv_neg $p $this) | _ => failure where /-- Given a result for evaluating `a b` in `ℤ` where `b > 0`, evaluate `a / b`. -/ core (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) (b : Q(ℤ)) (nb : Q(ℕ)) (pb : Q(IsNat $b $nb)) : ℤ × (q : Q(ℤ)) × Q(IsInt ($a / $b) $q) := let b := nb.natLit! let q := za / b have nq := mkRawIntLit q let r := za.natMod b have nr : Q(ℕ) := mkRawNatLit r let m := q * b have nm := mkRawIntLit m have pf₁ : Q($nq * $nb = $nm) := (q(Eq.refl $nm) :) have pf₂ : Q($nr + $nm = $na) := (q(Eq.refl $na) :) have pf₃ : Q(Nat.blt $nr $nb = true) := (q(Eq.refl true) :) ⟨q, nq, q(isInt_ediv $pa $pb $pf₁ $pf₂ $pf₃)⟩ lemma isInt_emod_zero : ∀ {a b r : ℤ}, IsInt a r → IsNat b (nat_lit 0) → IsInt (a % b) r | _, _, _, e, ⟨rfl⟩ => by simp [e] lemma isInt_emod {a b q m a' : ℤ} {b' r : ℕ} (ha : IsInt a a') (hb : IsNat b b') (hm : q * b' = m) (h : r + m = a') (h₂ : Nat.blt r b' = true) : IsNat (a % b) r := ⟨by obtain ⟨⟨rfl⟩, ⟨rfl⟩⟩ := ha, hb simp only [← h, ← hm, Int.add_mul_emod_self] rw [Int.emod_eq_of_lt] <;> [simp; simpa using h₂]⟩ lemma isInt_emod_neg {a b : ℤ} {r : ℕ} (h : IsNat (a % -b) r) : IsNat (a % b) r := ⟨by rw [← Int.emod_neg, h.out]⟩ /-- The `norm_num` extension which identifies expressions of the form `Int.emod a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℤ) % _, Int.emod _ _] partial def evalIntMod : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure -- We assert that the default instance for `HMod` is `Int.mod` when the first parameter is `ℤ`. guard <|← withNewMCtxDepth <| isDefEq f q(HMod.hMod (α := ℤ)) haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℤ := ⟨⟩ haveI' : $e =Q ($a % $b) := ⟨⟩ let rℤ : Q(Ring ℤ) := q(Int.instRing) let some ⟨za, na, pa⟩ := (← derive a).toInt rℤ | failure go a na za pa b (← derive (u := .zero) b) where /-- Given a result for evaluating `a b` in `ℤ`, evaluate `a % b`. -/ go (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) (b : Q(ℤ)) : Result b → Option (Result q($a % $b)) | .isNat inst nb pb => do assumeInstancesCommute if nb.natLit! == 0 then have _ : $nb =Q nat_lit 0 := ⟨⟩ return .isInt q(Int.instRing) na za q(isInt_emod_zero $pa $pb) else let ⟨r, p⟩ := core a na za pa b nb pb return .isNat q(instAddMonoidWithOne) r p | .isNegNat _ nb pb => do assumeInstancesCommute let ⟨r, p⟩ := core a na za pa q(-$b) nb q(isNat_neg_of_isNegNat $pb) return .isNat q(instAddMonoidWithOne) r q(isInt_emod_neg $p) | _ => none /-- Given a result for evaluating `a b` in `ℤ` where `b > 0`, evaluate `a % b`. -/ core (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) (b : Q(ℤ)) (nb : Q(ℕ)) (pb : Q(IsNat $b $nb)) : (r : Q(ℕ)) × Q(IsNat ($a % $b) $r) := let b := nb.natLit! let q := za / b have nq := mkRawIntLit q let r := za.natMod b have nr : Q(ℕ) := mkRawNatLit r let m := q * b have nm := mkRawIntLit m have pf₁ : Q($nq * $nb = $nm) := (q(Eq.refl $nm) :) have pf₂ : Q($nr + $nm = $na) := (q(Eq.refl $na) :) have pf₃ : Q(Nat.blt $nr $nb = true) := (q(Eq.refl true) :) ⟨nr, q(isInt_emod $pa $pb $pf₁ $pf₂ $pf₃)⟩ theorem isInt_dvd_true : {a b : ℤ} → {a' b' c : ℤ} → IsInt a a' → IsInt b b' → Int.mul a' c = b' → a ∣ b | _, _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨_, rfl⟩ theorem isInt_dvd_false : {a b : ℤ} → {a' b' : ℤ} → IsInt a a' → IsInt b b' → Int.mod b' a' != 0 → ¬a ∣ b | _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, e => mt Int.mod_eq_zero_of_dvd (by simpa using e) /-- The `norm_num` extension which identifies expressions of the form `(a : ℤ) ∣ b`, such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℤ) ∣ _] def evalIntDvd : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q Prop := ⟨⟩ haveI' : $e =Q ($a ∣ $b) := ⟨⟩ -- We assert that the default instance for `Dvd` is `Int.dvd` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(Dvd.dvd (α := ℤ)) let rℤ : Q(Ring ℤ) := q(Int.instRing) let ⟨za, na, pa⟩ ← (← derive a).toInt rℤ let ⟨zb, nb, pb⟩ ← (← derive b).toInt rℤ if zb % za == 0 then let zc := zb / za have c := mkRawIntLit zc haveI' : Int.mul $na $c =Q $nb := ⟨⟩ return .isTrue q(isInt_dvd_true $pa $pb (.refl $nb)) else have : Q(Int.mod $nb $na != 0) := (q(Eq.refl true) : Expr) return .isFalse q(isInt_dvd_false $pa $pb $this) end Mathlib.Meta.NormNum