source
stringlengths
17
118
lean4
stringlengths
0
335k
.lake/packages/aesop/Aesop/Forward/RuleInfo.lean
module public import Aesop.Forward.SlotIndex public import Aesop.RulePattern import Aesop.Index.DiscrTreeConfig import Aesop.Util.UnionFind import Lean.Util.CollectLevelParams import Batteries.Data.Array.Basic public section set_option linter.missingDocs true open Lean Lean.Meta namespace Aesop /-- A slot represents a maximal premise of a forward rule, i.e. a premise with no forward dependencies. The goal of forward reasoning is to assign a hypothesis to each slot in such a way that the assignments agree on all variables shared between them. Exceptionally, a slot can also represent the rule pattern substitution. Rules with a rule pattern have exactly one such slot, which is assigned an arbitrary premise index. -/ structure Slot where /-- Discrimination tree keys for the type of this slot. If the slot is for the rule pattern, it is not associated with a premise, so doesn't have discrimination tree keys. -/ typeDiscrTreeKeys? : Option (Array DiscrTree.Key) /-- Index of the slot. Slots are always part of a list of slots, and `index` is the 0-based index of this slot in that list. -/ index : SlotIndex /-- 0-based index of the premise represented by this slot in the rule type. Note that the slots array may use a different ordering than the original order of premises, so we *don't* always have `index ≤ premiseIndex`. Rule pattern slots are assigned an arbitrary premise index. -/ premiseIndex : PremiseIndex /-- The previous premises that the premise of this slot depends on. -/ deps : Std.HashSet PremiseIndex /-- Common variables shared between this slot and the previous slots. -/ common : Std.HashSet PremiseIndex /-- The forward dependencies of this slot. These are all the premises that appear in slots *after* this one. -/ forwardDeps : Array PremiseIndex deriving Inhabited local instance : BEq Slot := ⟨λ s₁ s₂ => s₁.premiseIndex == s₂.premiseIndex⟩ local instance : Hashable Slot := ⟨(hash ·.premiseIndex)⟩ /-- Information about the decomposed type of a forward rule. -/ structure ForwardRuleInfo where /-- The rule's number of premises. -/ numPremises : Nat /-- The number of distinct level parameters and level metavariables occurring in the rule's type. We expect that these turn into level metavariables when the rule is elaborated. -/ numLevelParams : Nat /-- Slots representing the maximal premises of the forward rule, partitioned into metavariable clusters. -/ slotClusters : Array (Array Slot) /-- The premises that appear in the rule's conclusion. -/ conclusionDeps : Array PremiseIndex /-- The rule's rule pattern and the premise index that was assigned to it. -/ rulePatternInfo? : Option (RulePattern × PremiseIndex) deriving Inhabited namespace ForwardRuleInfo /-- Is this rule a constant rule (i.e., does it have neither premises nor a rule pattern)? -/ def isConstant (r : ForwardRuleInfo) : Bool := r.numPremises == 0 && r.rulePatternInfo?.isNone /-- Construct a `ForwardRuleInfo` for the theorem `thm`. -/ def ofExpr (thm : Expr) (rulePattern? : Option RulePattern) (immediate : UnorderedArraySet PremiseIndex) : MetaM ForwardRuleInfo := withNewMCtxDepth do let e ← instantiateMVars (← inferType thm) let numLevelParams := (collectLevelParams {} e).params.size + (collectLevelMVars {} e).result.size let (premises, _, conclusion) ← withReducible do forallMetaTelescope e let premises := premises.map (·.mvarId!) let mut premiseToIdx : Std.HashMap MVarId PremiseIndex := ∅ for h : i in [:premises.size] do premiseToIdx := premiseToIdx.insert premises[i] ⟨i⟩ let mut slots : Array Slot := Array.mkEmpty premises.size let mut allDeps : Std.HashSet PremiseIndex := ∅ for h : i in [:premises.size] do let mvarId := premises[i] let typeDiscrTreeKeys ← mkDiscrTreePath (← mvarId.getType) let mut deps : Std.HashSet PremiseIndex := ∅ for dep in ← mvarId.getMVarDependencies do if let some idx := premiseToIdx[dep]? then deps := deps.insert idx -- We update the `default` fields with correct info later. slots := slots.push { typeDiscrTreeKeys? := typeDiscrTreeKeys index := default premiseIndex := ⟨i⟩ common := default forwardDeps := default deps } allDeps := allDeps.insertMany deps -- Slots are created only for premises which are maximal, i.e. which do not -- appear in any other premises, and which are not bound by the rule pattern. let patBoundPremises : Std.HashSet PremiseIndex := rulePattern?.map (.ofArray $ ·.boundPremises.map (⟨·⟩)) |>.getD ∅ slots := slots.filter λ s => let idx := s.premiseIndex ! allDeps.contains idx && ! patBoundPremises.contains idx && immediate.contains idx -- If the rule has a pattern, an additional slot is created for the rule -- pattern substitution. Again, we update the `default` fields with correct -- info later. if rulePattern?.isSome then slots := slots.push { typeDiscrTreeKeys? := none index := default premiseIndex := ⟨premises.size⟩ common := default deps := patBoundPremises forwardDeps := default } -- Slots are clustered into metavariable clusters and sorted as indicated -- below. let slotClusters := cluster (·.deps.toArray) slots |>.map sortSlots -- The sorting ensures that for each slot in a cluster (except the first), the -- slot has some variables in common with the previous slots. assert! ! slotClusters.any λ cluster => cluster.any λ slot => slot.index.toNat > 0 && slot.common.isEmpty let conclusionDeps := (← getMVars conclusion).filterMap (premiseToIdx[·]?) let rulePatternInfo? := rulePattern?.map (·, ⟨premises.size⟩) return { numPremises := premises.size slotClusters, rulePatternInfo?, conclusionDeps, numLevelParams } where /-- Sort slots such that each slot has at least one variable in common with the previous slots. -/ sortSlots (slots : Array Slot) : Array Slot := Id.run do if slots.isEmpty then panic! "empty slot cluster" -- Slots with more dependencies are preferred. In case of a tie, later slots -- are preferred since they tend to be more specific. have : Ord Slot := ⟨λ x y => compareOn (·.deps.size) x y |>.then (compareOn (·.premiseIndex) x y) ⟩ let firstSlot := slots.maxI let mut unseen := slots |>.erase firstSlot let firstSlotForwardDeps : Std.HashSet PremiseIndex := unseen.foldl (init := ∅) λ deps s => deps.insertMany s.deps let firstSlot := { firstSlot with index := ⟨0⟩ common := ∅ forwardDeps := firstSlotForwardDeps.toArray } let mut newSlots := Array.mkEmpty slots.size |>.push firstSlot let mut previousDeps := firstSlot.deps let mut i := 1 while newSlots.size != slots.size do let candidates := unseen.filter (·.deps.any (previousDeps.contains ·)) let some slot := candidates.max? | panic! "no suitable slot found" unseen := unseen.erase slot let common := previousDeps.filter (slot.deps.contains ·) let forwardDeps : Std.HashSet PremiseIndex := unseen.foldl (init := ∅) λ deps s => deps.insertMany s.deps let forwardDeps := forwardDeps.toArray newSlots := newSlots.push { slot with index := ⟨i⟩, common, forwardDeps } previousDeps := previousDeps.insertMany slot.deps i := i + 1 return newSlots end ForwardRuleInfo
.lake/packages/aesop/Aesop/Forward/PremiseIndex.lean
module public section namespace Aesop structure PremiseIndex where toNat : Nat deriving Inhabited, BEq, Hashable, DecidableEq, Ord instance : LT PremiseIndex where lt i j := i.toNat < j.toNat instance : DecidableRel (α := PremiseIndex) (· < ·) := λ i j => inferInstanceAs $ Decidable (i.toNat < j.toNat) instance : LE PremiseIndex where le i j := i.toNat ≤ j.toNat instance : DecidableRel (α := PremiseIndex) (· ≤ ·) := λ i j => inferInstanceAs $ Decidable (i.toNat ≤ j.toNat) instance : ToString PremiseIndex where toString i := toString i.toNat end Aesop
.lake/packages/aesop/Aesop/Forward/CompleteMatchQueue.lean
import Batteries.Data.BinomialHeap.Basic import Aesop.Forward.Match set_option linter.missingDocs true namespace Aesop open Batteries (BinomialHeap) /-- A complete match queue. -/ abbrev CompleteMatchQueue := BinomialHeap ForwardRuleMatch ForwardRuleMatch.le namespace CompleteMatchQueue /-- Drop elements satisfying `f` from the front of `queue` until we reach an element that does not satisfy `f` (or until the queue is empty). -/ partial def dropInitial (queue : CompleteMatchQueue) (f : ForwardRuleMatch → Bool) : CompleteMatchQueue := match queue.deleteMin with | none => queue | some (m, queue') => if f m then dropInitial queue' f else queue end Aesop.CompleteMatchQueue
.lake/packages/aesop/Aesop/Forward/Match/Types.lean
module public import Aesop.Rule.Forward public section set_option linter.missingDocs true namespace Aesop open Lean /-- A match associates hypotheses to (a prefix of) the slots of a slot cluster. -/ structure Match where /-- The substitution induced by the hyps or pattern substitutions added to the slots. -/ subst : Substitution /-- The pattern substitutions that have been added to the match. -/ patInstSubsts : Array Substitution /-- The match's level is the index of the maximal slot for which a hyp or pattern substitution has been added to the match. -/ level : SlotIndex /-- Premises that appear in slots which are as yet unassigned in this match (i.e., in slots with index greater than `level`). This is a property of the rule, but we include it here because it's used to check whether two matches are equivalent. -/ forwardDeps : Array PremiseIndex /-- Premises that appear in the rule's conclusion. This is a property of the rule, but we include it here because it's used to check whether two matches are equivalent. -/ conclusionDeps : Array PremiseIndex deriving Inhabited namespace Match /-- Two matches are equivalent if (a) they have the same level; (b) for each premise that appears in a slot greater than the matches' level, their substitution assigns the same value; (c) for each premise that appears in the rule's conclusion, their substitution assigns the same value. If we already have a match `m₁` and we obtain an equivalent match `m₂`, then `m₂` is redundant. This is because if the matches are partial, then `m₂` can be completed by exactly the hypotheses that complete `m₁`, since they agree on the premise instantiations that are relevant for the possible completions. And if the matches are complete, then they assign the same instantiations to the variables that appear in the rule's conclusion, and these are the only ones that ultimately matter. -/ -- TODO I believe we could be even more aggressive when forwardDeps and -- conclusionDeps contain proofs of propositions and consider any such proofs -- equal. protected def equiv (m₁ m₂ : Match) : Bool := m₁.level == m₂.level && m₁.forwardDeps.all (λ p => m₁.subst.find? p == m₂.subst.find? p) && m₁.conclusionDeps.all (λ p => m₁.subst.find? p == m₂.subst.find? p) instance : BEq Match := ⟨Match.equiv⟩ instance : Hashable Match where hash m := let h := hash m.level let h := m.forwardDeps.foldl (init := h) λ h p => mixHash h $ hash (m.subst.find? p) m.conclusionDeps.foldl (init := h) λ h p => mixHash h $ hash (m.subst.find? p) instance : Ord Match where compare m₁ m₂ := compare m₁.level m₂.level |>.then $ if m₁ == m₂ then .eq else compare m₁.subst m₂.subst instance : ToMessageData Match where toMessageData m := m!"{m.subst}" end Match set_option linter.missingDocs false in /-- A complete match contains complete matches for each slot cluster. This means there is one match for each slot cluster and each such match contains a hypothesis for each of the slots. -/ structure CompleteMatch where clusterMatches : Array Match deriving Inhabited, BEq, Hashable -- TODO hash as a computed field instance : EmptyCollection CompleteMatch := ⟨{ clusterMatches := ∅ }⟩ instance : Ord CompleteMatch where compare m₁ m₂ := compareArraySizeThenLex compare m₁.clusterMatches m₂.clusterMatches /-- An entry in the forward state queues. Represents a complete match. -/ structure ForwardRuleMatch where /-- The rule to which this match belongs. -/ rule : ForwardRule /-- The match. -/ «match» : CompleteMatch deriving Inhabited, BEq, Hashable namespace ForwardRuleMatch /-- Compare two queue entries by rule priority, rule name and the expressions contained in the match. Higher-priority rules are considered less (since the queues are min-queues). The ordering on expressions is arbitrary. -/ protected instance ord : Ord ForwardRuleMatch where compare m₁ m₂ := compare m₁.rule m₂.rule |>.then $ compare m₁.match m₂.match @[inherit_doc ForwardRuleMatch.ord] protected def le (m₁ m₂ : ForwardRuleMatch) : Bool := compare m₁ m₂ |>.isLE end Aesop.ForwardRuleMatch
.lake/packages/aesop/Aesop/Forward/State/ApplyGoalDiff.lean
module public import Aesop.Forward.State public import Aesop.RuleSet import Aesop.RPINF public section namespace Aesop open Lean Lean.Meta /-- Apply a goal diff to the state, adding and removing hypotheses as indicated by the diff. -/ def ForwardState.applyGoalDiff (rs : LocalRuleSet) (diff : GoalDiff) (fs : ForwardState) : BaseM (ForwardState × Array ForwardRuleMatch) := do if ! aesop.dev.statefulForward.get (← getOptions) then -- We still update the hyp types since these are also used by stateless -- forward reasoning. return ({ fs with hypTypes := ← updateHypTypes fs.hypTypes } , #[]) let fs ← diff.oldGoal.withContext do diff.removedFVars.foldM (init := fs) λ fs h => do eraseHyp h fs diff.newGoal.withContext do let (fs, ruleMatches) ← diff.addedFVars.foldM (init := (fs, ∅)) λ (fs, ruleMatches) h => do addHyp h fs ruleMatches if ← diff.targetChanged' then updateTarget fs ruleMatches else return (fs, ruleMatches) where eraseHyp (h : FVarId) (fs : ForwardState) : BaseM ForwardState := withConstAesopTraceNode .forward (return m!"erase hyp {Expr.fvar h} ({h.name})") do return fs.eraseHyp h (← rpinf (← h.getType)) addHyp (h : FVarId) (fs : ForwardState) (ruleMatches : Array ForwardRuleMatch) : BaseM (ForwardState × Array ForwardRuleMatch) := do let rules ← rs.applicableForwardRules (← h.getType) let patInsts ← rs.forwardRulePatternSubstsInLocalDecl (← h.getDecl) fs.addHypWithPatSubstsCore ruleMatches diff.newGoal h rules patInsts updateTarget (fs : ForwardState) (ruleMatches : Array ForwardRuleMatch) : BaseM (ForwardState × Array ForwardRuleMatch) := do let patInsts ← rs.forwardRulePatternSubstsInExpr (← diff.newGoal.getType) fs.updateTargetPatSubstsCore ruleMatches diff.newGoal patInsts updateHypTypes (hypTypes : PHashSet RPINF) : BaseM (PHashSet RPINF) := do let mut hypTypes := hypTypes for fvarId in diff.removedFVars do let type ← diff.oldGoal.withContext do rpinf (← fvarId.getType) hypTypes := hypTypes.erase type for fvarId in diff.addedFVars do let type ← diff.newGoal.withContext do rpinf (← fvarId.getType) hypTypes := hypTypes.insert type return hypTypes end Aesop
.lake/packages/aesop/Aesop/Forward/State/Initial.lean
module public import Aesop.Forward.State public import Aesop.RuleSet import Aesop.RPINF public section open Lean Lean.Meta namespace Aesop.LocalRuleSet def mkInitialForwardState (goal : MVarId) (rs : LocalRuleSet) : BaseM (ForwardState × Array ForwardRuleMatch) := goal.withContext do if ! aesop.dev.statefulForward.get (← getOptions) then -- We still initialise the hyp types since these are also used by -- stateless forward reasoning. let mut hypTypes := ∅ for ldecl in ← getLCtx do if ! ldecl.isImplementationDetail then hypTypes := hypTypes.insert (← rpinf ldecl.type) return ({ (∅ : ForwardState) with hypTypes }, #[]) let mut fs : ForwardState := ∅ let mut ruleMatches := rs.constForwardRuleMatches aesop_trace[forward] do for m in ruleMatches do aesop_trace![forward] "match for constant rule {m.rule.name}" for ldecl in ← show MetaM _ from getLCtx do if ldecl.isImplementationDetail then continue let rules ← rs.applicableForwardRules ldecl.type let patInsts ← rs.forwardRulePatternSubstsInLocalDecl ldecl let (fs', ruleMatches') ← fs.addHypWithPatSubstsCore ruleMatches goal ldecl.fvarId rules patInsts fs := fs' ruleMatches := ruleMatches' let patInsts ← rs.forwardRulePatternSubstsInExpr (← goal.getType) fs.addPatSubstsCore ruleMatches goal patInsts end Aesop.LocalRuleSet
.lake/packages/importGraph/ImportGraphTest.lean
import ImportGraphTest.AnotherFileWithTransitiveImports import ImportGraphTest.Dot import ImportGraphTest.FileWithTransitiveImports import ImportGraphTest.Imports import ImportGraphTest.Unused import ImportGraphTest.Used
.lake/packages/importGraph/README.md
# importGraph A simple tool to create import graphs of lake packages. ## Requirements For creating different output formats than `.dot` (for example to create a `.pdf` file), you should have [`graphviz`](https://graphviz.org/) installed. ## Usage If you are using mathlib, the tool will already be available. If not, see installation notes below. Once available in your project, you can create import graphs with ```bash lake exe graph ``` A typical command is ```bash lake exe graph --to MyModule my_graph.pdf ``` where `MyModule` follows the same module naming you would use to `import` it in lean. See `lake exe graph --help` for more options. You can specify multiple sources and targets e.g. as ```bash lake exe graph --from MyModule1,MyModule2 --to MyModule3,MyModule4 my_graph.pdf ``` ### Troubleshoot * make sure to `lake build` your project (or the specified `--to` module) before using `lake exe graph`! ### Json To create a Json file, you can use `.xdot_json` as output type: ```bash lake exe graph my_graph.xdot_json ``` ### HTML ``` lake exe graph my_graph.html ``` creates a stand-alone HTML file visualising the import structure. ## Commands There are a few commands implemented, which help you analysing the imports of a file. These are accessible by adding `import ImportGraph.Imports` to your lean file. * `#redundant_imports`: lists any transitively redundant imports in the current module. * `#min_imports`: attempts to construct a minimal set of imports for the declarations in the current file. (Must be run at the end of the file. Tactics and macros may result in incorrect output.) * `#find_home decl`: suggests files higher up the import hierarchy to which `decl` could be moved. ## Other executables `lake exe unused_transitive_imports m1 m2 ...` For each specified module `m`, prints those `n` from the argument list which are imported, but transitively unused by `m`. ## Installation The installation works exactly like for any [Lake package](https://reservoir.lean-lang.org/), see [Lake docs](https://github.com/leanprover/lean4/tree/master/src/lake#supported-sources). *This only relevant if your project does not already require `importGraph` through another lake package (e.g. mathlib). If it does, do not follow these instructions; instead just use the tool with `lake exe graph`!* You can import this in any lean projects by the following line to your `lakefile.lean`: ```lean require "leanprover-community" / "importGraph" @ git "main" ``` or, if you have a `lakefile.toml`, it would be ```toml [[require]] name = "importGraph" source = "leanprover-community" rev = "main" ``` Then, you might need to call `lake update -R importGraph` in your project. ## Contribution Please open PRs/Issues if you have troubles or would like to contribute new features! ## Credits The main tool has been extracted from [mathlib](https://github.com/leanprover-community/mathlib4), originally written by Kim Morrison and other mathlib contributors. The HTML visualisation has been incorporated from [a project by Eric Wieser](https://github.com/eric-wieser/mathlib-import-graph). ### Maintainers Primarily maintained by [Jon Eugster](https://leanprover.zulipchat.com/#narrow/dm/385895-Jon-Eugster), Kim Morrison, and the wider leanprover community.
.lake/packages/importGraph/Main.lean
import ImportGraph.Cli /-! # `lake exe graph` This is a replacement for Lean 3's `leanproject import-graph` tool. -/ open Cli /-- Setting up command line options and help text for `lake exe graph`. -/ def graph : Cmd := `[Cli| graph VIA importGraphCLI; ["0.0.3"] "Generate representations of a Lean import graph. \ By default generates the import graph up to `Mathlib`. \ If you are working in a downstream project, use `lake exe graph --to MyProject`." FLAGS: "show-transitive"; "Show transitively redundant edges." "to" : Array ModuleName; "Only show the upstream imports of the specified modules." "from" : Array ModuleName; "Only show the downstream dependencies of the specified modules." "exclude-meta"; "Exclude any files starting with `Mathlib.[Tactic|Lean|Util|Mathport]`." "include-direct"; "Include directly imported files from other libraries" "include-deps"; "Include used files from other libraries (not including Lean itself and `std`)" "include-std"; "Include used files from the Lean standard library (implies `--include-deps`)" "include-lean"; "Include used files from Lean itself (implies `--include-deps` and `--include-std`)" "mark-package"; "Visually highlight the package containing the first `--to` target (used in combination with some `--include-XXX`)." ARGS: ...outputs : String; "Filename(s) for the output. \ If none are specified, generates `import_graph.dot`. \ Automatically chooses the format based on the file extension. \ Currently supported formats are `.dot`, `.gexf`, `.html`, \ and if you have `graphviz` installed then any supported output format is allowed." ] /-- `lake exe graph` -/ def main (args : List String) : IO UInt32 := graph.validate args
.lake/packages/importGraph/ImportGraph.lean
module public import ImportGraph.Cli public import ImportGraph.Imports public import ImportGraph.Meta public import ImportGraph.CurrentModule public import ImportGraph.Lean.Name public import ImportGraph.RequiredModules
.lake/packages/importGraph/html-template/README.md
# Visualised import graph ## Instructions To test this, place a file `imports.gexf` inside this directory. You can create such a file with ``` lake exe graph html-template/imports.gexf ``` Then open `index.html` in any browser and you should see the graph. ## Development Currently `lake exe graph output.html` will use the files here to create a stand-alone HTML file. It does so by search-replacing the JS-scripts, the `fetch('imports.gexf')` statement, and the `<h1>` header. Therefore any modifications to these lines need to be reflected in `ImportGraph/Cli.lean`! # Credits This tool has been adapted from its [Lean 3 version](https://github.com/eric-wieser/mathlib-import-graph) written by Eric Wieser, which was published under the [MIT License](./LICENSE_source) included here. Adaptation by Jon Eugster.
.lake/packages/importGraph/ImportGraphTest/Unused.lean
/-! This is a dummy file to test that unused files are shaded in the import graph. -/
.lake/packages/importGraph/ImportGraphTest/AnotherFileWithTransitiveImports.lean
import ImportGraph.Meta import ImportGraphTest.Unused import ImportGraphTest.FileWithTransitiveImports /-- info: The following are already imported (possibly transitively): ImportGraphTest.FileWithTransitiveImports --- info: Found 2 additional imports: ImportGraphTest.FileWithTransitiveImports ImportGraphTest.Used -/ #guard_msgs in #import_diff ImportGraphTest.FileWithTransitiveImports /-- info: The following are already imported (possibly transitively): ImportGraphTest.FileWithTransitiveImports ImportGraphTest.Used --- info: Found 2 additional imports: ImportGraphTest.FileWithTransitiveImports ImportGraphTest.Used -/ #guard_msgs in #import_diff ImportGraphTest.FileWithTransitiveImports ImportGraphTest.Used /-- error: File SomeBogusFilename cannot be found. -/ #guard_msgs in #import_diff SomeBogusFilename
.lake/packages/importGraph/ImportGraphTest/Imports.lean
import ImportGraph.RequiredModules import ImportGraphTest.Used import ImportGraph.Meta open Lean def importTest : CoreM Unit := do let x ← redundantImports logInfo s!"{x.toArray}" /-- info: Found the following transitively redundant imports: ImportGraph.RequiredModules -/ #guard_msgs in #redundant_imports /-- info: import ImportGraph.Imports -/ #guard_msgs in #min_imports /-- info: [ImportGraph.Imports] -/ #guard_msgs in #find_home importTest open Elab Command /-- Reports unused transitive imports amongst the specified modules. -/ elab "#unused_transitive_imports" names:ident* : command => do let imports := (names.map Syntax.getId).toList let unused ← Elab.Command.liftCoreM (unusedTransitiveImports imports) for (n, u) in unused do if !u.isEmpty then logInfo <| s!"Transitively unused imports of {n}:\n{"\n".intercalate (u.map (fun i => s!" {i}"))}" /-- info: Transitively unused imports of Init.System.IO: Init.Control.StateRef Init.Control.Reader -/ #guard_msgs in #unused_transitive_imports Init.Control.StateRef Init.System.IO Init.Control.Reader Init.Control.Basic /-- info: Transitively unused imports of ImportGraphTest.Used: ImportGraphTest.Unused -/ #guard_msgs in #unused_transitive_imports ImportGraphTest.Used ImportGraphTest.Unused Init.Control.Reader elab "#transitivelyRequiredModules_test" : command => do let env ← getEnv let unused ← liftCoreM <| env.transitivelyRequiredModules `ImportGraph.RequiredModules logInfo s!"{unused.contains `Init.Data.Option.Lemmas}" /-- info: true -/ #guard_msgs in #transitivelyRequiredModules_test elab "#my_test" : command => do -- functionality of `#redundant_imports` let expected := #[`ImportGraph.RequiredModules] let ri ← liftCoreM redundantImports if (ri.toArray != expected) then logError s!"Failed: `redundantImports` returned {ri.toArray} instead of {expected}" -- functionality of `#find_home` let expected := #[`ImportGraph.Imports] let mi ← liftCoreM <| Lean.Name.findHome `importTest none if (mi.toArray != expected) then logError s!"Failed: `findHome` returned {mi.toArray} instead of {expected}" -- functionality of `#find_home!` let expected := #[`ImportGraph.Imports] let mi! ← liftCoreM <| Lean.Name.findHome `importTest (← getEnv) if (mi!.toArray != expected) then logError s!"Failed: `findHome (!)` returned {mi!.toArray} instead of {expected}" logInfo s!"{mi.toArray}" pure () /-- info: #[ImportGraph.Imports] -/ #guard_msgs in #my_test
.lake/packages/importGraph/ImportGraphTest/FileWithTransitiveImports.lean
import ImportGraph.Meta import ImportGraphTest.Used /-- info: The following are already imported (possibly transitively): ImportGraphTest.Used --- info: Found 2 additional imports: ImportGraphTest.Unused ImportGraphTest.Used -/ #guard_msgs in #import_diff ImportGraphTest.Used
.lake/packages/importGraph/ImportGraphTest/Dot.lean
import ImportGraph.Cli def readFile (path : System.FilePath) : IO String := IO.FS.readFile path def runGraphCommand : IO Unit := do let _ ← IO.Process.output { cmd := "lake" args := #["exe", "graph", "--to", "ImportGraphTest.Used", "ImportGraphTest/produced.dot"] } def compareOutputs (expected : String) (actual : String) : IO Bool := do let expectedLines := expected.splitOn "\n" |>.filter (·.trim.length > 0) |>.map String.trim let actualLines := actual.splitOn "\n" |>.filter (·.trim.length > 0) |>.map String.trim pure (expectedLines == actualLines) /-- info: Test passed: The graph command output matches the expected.dot file. -/ #guard_msgs in #eval show IO Unit from do runGraphCommand let expectedOutput ← readFile "ImportGraphTest/expected.dot" let actualOutput ← readFile "ImportGraphTest/produced.dot" let isEqual ← compareOutputs expectedOutput actualOutput if isEqual then IO.println "Test passed: The graph command output matches the expected.dot file." else IO.println "Test failed: The graph command output does not match the expected.dot file." IO.println s!"Expected:\n{expectedOutput}" IO.println s!"Actual:\n{actualOutput}"
.lake/packages/importGraph/ImportGraphTest/Used.lean
import ImportGraphTest.Unused
.lake/packages/importGraph/ImportGraph/RequiredModules.lean
module public import Lean.CoreM public import Lean.Data.NameMap public import Lean.Environment public import Lean.Util.FoldConsts meta import Lean.MonadEnv meta import Lean.CoreM public section namespace Lean /-- Return the name of the module in which a declaration was defined. -/ def Environment.getModuleFor? (env : Environment) (declName : Name) : Option Name := match env.getModuleIdxFor? declName with | none => if env.constants.map₂.contains declName then env.header.mainModule else none | some idx => env.header.moduleNames[idx.toNat]! open Lean /-- Return the names of the modules in which constants used in the specified declaration were defined. Note that this will *not* account for tactics and syntax used in the declaration, so the results may not suffice as imports. -/ def Name.requiredModules (n : Name) : CoreM NameSet := do let env ← getEnv let mut requiredModules : NameSet := {} let ci ← getConstInfo n for n in ci.getUsedConstantsAsSet do match env.getModuleFor? n with | some m => if ¬ (`Init).isPrefixOf m then requiredModules := requiredModules.insert m | none => pure () return requiredModules /-- Return the names of the constants used in the specified declarations, and the constants they use transitively. -/ def NameSet.transitivelyUsedConstants (s : NameSet) : CoreM NameSet := do let mut usedConstants : NameSet := {} let mut toProcess : NameSet := s while !toProcess.isEmpty do let current := toProcess.min! toProcess := toProcess.erase current usedConstants := usedConstants.insert current for m in (← getConstInfo current).getUsedConstantsAsSet do if !usedConstants.contains m then toProcess := toProcess.insert m return usedConstants /-- Return the names of the constants used in the specified declaration, and the constants they use transitively. -/ def Name.transitivelyUsedConstants (n : Name) : CoreM NameSet := NameSet.transitivelyUsedConstants {n} /-- Return the names of the modules in which constants used transitively in the specified declarations were defined. Note that this will *not* account for tactics and syntax used in the declaration, so the results may not suffice as imports. -/ def NameSet.transitivelyRequiredModules (s : NameSet) (env : Environment) : CoreM NameSet := do let mut requiredModules : NameSet := {} for m in (← s.transitivelyUsedConstants) do if let some module := env.getModuleFor? m then requiredModules := requiredModules.insert module return requiredModules /-- Return the names of the modules in which constants used transitively in the specified declaration were defined. Note that this will *not* account for tactics and syntax used in the declaration, so the results may not suffice as imports. -/ def Name.transitivelyRequiredModules (n : Name) (env : Environment) : CoreM NameSet := NameSet.transitivelyRequiredModules {n} env /-- Finds all constants defined in the specified module, and identifies all modules containing constants which are transitively required by those constants. -/ def Environment.transitivelyRequiredModules (env : Environment) (module : Name) : CoreM NameSet := do let constants := env.constants.map₁.values.map (·.name) |>.filter (! ·.isInternal) |>.filter (env.getModuleFor? · = some module) (NameSet.ofList constants).transitivelyRequiredModules env /-- Computes all the modules transitively required by the specified modules. Should be equivalent to calling `transitivelyRequiredModules` on each module, but shares more of the work. -/ partial def Environment.transitivelyRequiredModules' (env : Environment) (modules : List Name) (verbose : Bool := false) : CoreM (NameMap NameSet) := do let N := env.header.moduleNames.size let mut c2m : NameMap (BitVec N) := {} let mut pushed : NameSet := {} let mut result : NameMap NameSet := {} for m in modules do if verbose then IO.println s!"Processing module {m}" let mut r : BitVec N := 0 for n in env.header.moduleData[(env.header.moduleNames.idxOf? m).getD 0]!.constNames do if ! n.isInternal then -- This is messy: Mathlib is big enough that writing a recursive function causes a stack overflow. -- So we use an explicit stack instead. We visit each constant twice: -- once to record the constants transitively used by it, -- and again to record the modules which defined those constants. let mut stack : List (Name × Option NameSet) := [⟨n, none⟩] pushed := pushed.insert n while !stack.isEmpty do match stack with | [] => panic! "Stack is empty" | (c, used?) :: tail => stack := tail match used? with | none => if !c2m.contains c then let used := (← getConstInfo c).getUsedConstantsAsSet stack := ⟨c, some used⟩ :: stack for u in used do if !pushed.contains u then stack := ⟨u, none⟩ :: stack pushed := pushed.insert u | some used => let usedModules : NameSet := used.foldl (init := {}) (fun s u => if let some m := env.getModuleFor? u then s.insert m else s) let transitivelyUsed : BitVec N := used.foldl (init := toBitVec usedModules) (fun s u => s ||| ((c2m.find? u).getD 0)) c2m := c2m.insert c transitivelyUsed r := r ||| ((c2m.find? n).getD 0) result := result.insert m (toNameSet r) return result where toBitVec {N : Nat} (s : NameSet) : BitVec N := s.foldl (init := 0) (fun b n => b ||| BitVec.twoPow _ ((env.header.moduleNames.idxOf? n).getD 0)) toNameSet {N : Nat} (b : BitVec N) : NameSet := env.header.moduleNames.zipIdx.foldl (init := {}) (fun s (n, i) => if b.getLsbD i then s.insert n else s) /-- Return the names of the modules in which constants used in the current file were defined. Note that this will *not* account for tactics and syntax used in the file, so the results may not suffice as imports. -/ def Environment.requiredModules (env : Environment) : NameSet := Id.run do let localConstantInfos := env.constants.map₂ let mut requiredModules : NameSet := {} for (_, ci) in localConstantInfos do for n in ci.getUsedConstantsAsSet do match env.getModuleFor? n with | some m => if ¬ (`Init).isPrefixOf m then requiredModules := requiredModules.insert m | none => pure () return requiredModules end Lean
.lake/packages/importGraph/ImportGraph/Imports.lean
module public import Lean.Elab.Command public import Lean.Server.GoTo public import Lean.Widget.UserWidget public import ImportGraph.RequiredModules public section open Lean namespace Lean.Environment /-- Find the imports of a given module. -/ def importsOf (env : Environment) (n : Name) : Array Name := if n = env.header.mainModule then env.header.imports.map Import.module else match env.getModuleIdx? n with | .some idx => env.header.moduleData[idx.toNat]!.imports.map Import.module |>.erase `Init | .none => #[] /-- Construct the import graph of the current file. -/ partial def importGraph (env : Environment) : NameMap (Array Name) := let main := env.header.mainModule let imports := env.header.imports.map Import.module imports.foldl (fun m i => process env i m) (({} : NameMap _).insert main imports) |>.erase Name.anonymous where process (env) (i) (m) : NameMap (Array Name) := if m.contains i then m else let imports := env.importsOf i imports.foldr (fun i m => process env i m) (m.insert i imports) /-- Return the redundant imports (i.e. those transitively implied by another import) amongst a candidate list of imports. -/ partial def findRedundantImports (env : Environment) (imports : Array Name) : NameSet := let run := visit env.importGraph imports let (_, seen) := imports.foldl (fun ⟨v, s⟩ n => run v s n) ({}, {}) seen where visit (Γ) (targets) (visited) (seen) (n) : NameSet × NameSet := if visited.contains n then (visited, seen) else let imports := (Γ.find? n).getD #[] let (visited', seen') := imports.foldl (fun ⟨v, s⟩ t => visit Γ targets v s t) (visited, seen) (visited'.insert n, imports.foldl (fun s t => if targets.contains t then s.insert t else s) seen') end Lean.Environment namespace Lean.NameMap /-- Compute the transitive closure of an import graph. -/ public partial def transitiveClosure (m : NameMap (Array Name)) : NameMap NameSet := m.foldl (fun r n i => process r n i) {} where process (r : NameMap NameSet) (n : Name) (i : Array Name) : NameMap NameSet := if r.contains n then r else let r' := i.foldr (fun i r => process r i ((m.find? i).getD #[])) r let t := i.foldr (fun j s => ((r'.find? j).getD {}).foldl NameSet.insert s) (.ofList i.toList) r'.insert n t /-- Compute the transitive reduction of an import graph. Typical usage is `transitiveReduction (← importGraph)`. -/ def transitiveReduction (m : NameMap (Array Name)) : NameMap (Array Name) := let c := transitiveClosure m m.foldl (fun r n a => r.insert n (a.foldr (fun i b => b.filter (fun j => ! ((c.find? i).getD {}).contains j)) a)) {} /-- Restrict an import graph to only the downstream dependencies of some set of modules. -/ def downstreamOf (m : NameMap (Array Name)) (targets : NameSet) : NameMap (Array Name) := let tc := transitiveClosure m let P (n : Name) := targets.contains n || ((tc.find? n).getD {}).any fun j => targets.contains j m.foldl (init := {}) fun r n i => if P n then r.insert n (i.filter P) else r /-- Restrict an import graph to only the transitive imports of some set of modules. -/ def upstreamOf (m : NameMap (Array Name)) (targets : NameSet) : NameMap (Array Name) := let tc := transitiveClosure m let P (n : Name) := targets.contains n || targets.any fun t => ((tc.find? t).getD {}).contains n m.foldl (init := {}) fun r n i => if P n then r.insert n (i.filter P) else r /-- Filter the list of edges `… → node` inside `graph` by the function `filter`. Any such upstream node `source` where `filter source` returns true will be replaced by all its upstream nodes. This results in a list of all unfiltered nodes in the `graph` that either had an edge to `node` or had an indirect edge to `node` going through filtered nodes. Will panic if the `node` is not in the `graph`. -/ partial def transitiveFilteredUpstream (node : Name) (graph : NameMap (Array Name)) (filter : Name → Bool) (replacement : Option Name := none): List Name := (graph.get! node).toList.flatMap fun source => ( if filter source then -- Add the transitive edges going through the filtered node `source`. -- If there is a replacement node, add an additional edge `repl → node`. match replacement with | none => transitiveFilteredUpstream source graph filter | some repl => .cons repl <| transitiveFilteredUpstream source graph filter -- If the node is not filtered, we leave the edge `source → node` intact. else [source]).eraseDups /-- Filters the `graph` removing all nodes where `filter n` returns false. Additionally, replace edges from removed nodes by all the transitive edges. This means there is a path between two nodes in the filtered graph iff there exists such a path in the original graph. If the optional `(replacement : Name)` is provided, a corresponding node will be added together with edges to all nodes which had an incoming edge from any filtered node. -/ def filterGraph (graph : NameMap (Array Name)) (filter : Name → Bool) (replacement : Option Name := none) : NameMap (Array Name) := -- Create a list of all files imported by any of the filtered files -- and remove all imports starting with `Mathlib` to avoid loops. let replImports := graph.toList.flatMap (fun ⟨n, i⟩ => if filter n then i.toList else []) |>.eraseDups |>.filter (¬ Name.isPrefixOf `Mathlib ·) |>.toArray let graph := graph.filterMap (fun node edges => if filter node then none else some <| -- If the node `node` is not filtered, modify the `edges` going into `node`. edges.toList.flatMap (fun source => if filter source then transitiveFilteredUpstream source graph filter (replacement := replacement) else [source]) |>.eraseDups.toArray) -- Add a replacement node if provided. match replacement with | none => graph | some repl => graph.insert repl replImports end Lean.NameMap /-- Returns a `List (Name × List Name)` with a key for each module `n` in `amongst`, whose corresponding value is the list of modules `m` in `amongst` which are transitively imported by `n`, but no declaration in `n` makes use of a declaration in `m`. -/ def unusedTransitiveImports (amongst : List Name) (verbose : Bool := false) : CoreM (List (Name × List Name)) := do let env ← getEnv let transitiveImports := env.importGraph.transitiveClosure let transitivelyRequired ← env.transitivelyRequiredModules' amongst verbose amongst.mapM fun n => do return (n, let unused := (transitiveImports.find? n).getD {} \ (transitivelyRequired.find? n |>.getD {}) amongst.filter (fun m => unused.contains m)) def Core.withImportModules (modules : Array Name) {α} (f : CoreM α) : IO α := do initSearchPath (← findSysroot) unsafe Lean.withImportModules (modules.map (fun m => {module := m})) {} (trustLevel := 1024) fun env => Prod.fst <$> Core.CoreM.toIO (ctx := { fileName := "<CoreM>", fileMap := default }) (s := { env := env }) do f /-- Return the redundant imports (i.e. those transitively implied by another import) of a specified module (or the current module if `none` is specified). -/ def redundantImports (n? : Option Name := none) : CoreM NameSet := do let env ← getEnv let imports := env.importsOf (n?.getD (env.header.mainModule)) return env.findRedundantImports imports
.lake/packages/importGraph/ImportGraph/UnusedTransitiveImports.lean
import ImportGraph.Imports open Lean /-- `lake exe unused_transitive_imports m1 m2 ...` For each specified module `m`, prints those `n` from the argument list which are imported, but transitively unused by `m`. -/ def main (args : List String) : IO UInt32 := do let (flags, args) := args.partition (fun s => s.startsWith "-") let mut modules := args.map (fun s => s.toName) Core.withImportModules modules.toArray do let r ← unusedTransitiveImports modules (verbose := flags.contains "-v" || flags.contains "--verbose") for (n, u) in r do IO.println s!"{n}: {u}" return 0
.lake/packages/importGraph/ImportGraph/Meta.lean
module public meta import ImportGraph.Imports public meta section /-! # Tools for analyzing imports. Provides the commands * `#redundant_imports` which lists any transitively redundant imports in the current module. * `#min_imports` which attempts to construct a minimal set of imports for the declarations in the current file. (Must be run at the end of the file. Tactics and macros may result in incorrect output.) * `#find_home decl` suggests files higher up the import hierarchy to which `decl` could be moved. -/ open Lean /-- List the imports in this file which can be removed because they are transitively implied by another import. -/ elab "#redundant_imports" : command => do let redundant := (← Elab.Command.liftCoreM do redundantImports) if redundant.isEmpty then logInfo "No transitively redundant imports found." else logInfo <| "Found the following transitively redundant imports:\n" ++ m!"{Format.joinSep redundant.toList "\n"}" /-- Return the names of the modules in which constants used in the current file were defined, with modules already transitively imported removed. Note that this will *not* account for tactics and syntax used in the file, so the results may not suffice as imports. -/ def Lean.Environment.minimalRequiredModules (env : Environment) : Array Name := let required := env.requiredModules.toArray.erase env.header.mainModule let redundant := findRedundantImports env required required.filter fun n => ¬ redundant.contains n /-- Try to compute a minimal set of imports for this file, by analyzing the declarations. This must be run at the end of the file, and is not aware of syntax and tactics, so the results will likely need to be adjusted by hand. -/ elab "#min_imports" : command => do let imports := (← getEnv).minimalRequiredModules.qsort (·.toString < ·.toString) |>.toList.map (fun n => "import " ++ n.toString) logInfo <| Format.joinSep imports "\n" -- deprecated since 2024-07-06 elab "#minimize_imports" : command => do logWarning m!"'#minimize_imports' is deprecated: please use '#min_imports'" Elab.Command.elabCommand (← `(command| #min_imports)) /-- Find locations as high as possible in the import hierarchy where the named declaration could live. -/ def Lean.Name.findHome (n : Name) (env : Option Environment) : CoreM NameSet := do let current? := match env with | some env => env.header.mainModule | _ => default let required := (← n.requiredModules).toArray.erase current? let imports := (← getEnv).importGraph.transitiveClosure let mut candidates : NameSet := {} for (n, i) in imports do if required.all fun r => n == r || i.contains r then candidates := candidates.insert n for c in candidates do for i in candidates do if imports.find? i |>.getD {} |>.contains c then candidates := candidates.erase i return candidates open Server in /-- Tries to resolve the module `modName` to a source file URI. This has to be done in the Lean server since the `Environment` does not keep track of source URIs. -/ @[server_rpc_method] def getModuleUri (modName : Name) : RequestM (RequestTask Lsp.DocumentUri) := RequestM.asTask do let some uri ← documentUriFromModule? modName | throw $ RequestError.invalidParams s!"couldn't find URI for module '{modName}'" return uri structure GoToModuleLinkProps where modName : Name deriving Server.RpcEncodable /-- When clicked, this widget component jumps to the source of the module `modName`, assuming a source URI can be found for the module. -/ @[widget_module] def GoToModuleLink : Widget.Module where javascript := " import * as React from 'react' import { EditorContext, useRpcSession } from '@leanprover/infoview' export default function(props) { const ec = React.useContext(EditorContext) const rs = useRpcSession() return React.createElement('a', { className: 'link pointer dim', onClick: async () => { try { const uri = await rs.call('getModuleUri', props.modName) ec.revealPosition({ uri, line: 0, character: 0 }) } catch {} } }, props.modName) } " open Elab Command in /-- Find locations as high as possible in the import hierarchy where the named declaration could live. Using `#find_home!` will forcefully remove the current file. Note that this works best if used in a file with `import Mathlib`. The current file could still be the only suggestion, even using `#find_home! lemma`. The reason is that `#find_home!` scans the import graph below the current file, selects all the files containing declarations appearing in `lemma`, excluding the current file itself and looks for all least upper bounds of such files. For a simple example, if `lemma` is in a file importing only `A.lean` and `B.lean` and uses one lemma from each, then `#find_home! lemma` returns the current file. -/ elab "#find_home" bang:"!"? n:ident : command => do let stx ← getRef let mut homes : Array MessageData := #[] let n ← liftCoreM <| realizeGlobalConstNoOverloadWithInfo n let env? ← bang.mapM fun _ => getEnv for modName in (← Elab.Command.liftCoreM do n.findHome env?) do let p : GoToModuleLinkProps := { modName } homes := homes.push $ .ofWidget (← liftCoreM $ Widget.WidgetInstance.ofHash GoToModuleLink.javascriptHash $ Server.RpcEncodable.rpcEncode p) (toString modName) logInfoAt stx[0] m!"{homes}" /-- `#import_diff foo bar ...` computes the new transitive imports that are added to a given file when modules `foo, bar, ...` are added to the set of imports of the file. More precisely, it computes the import diff between when `foo, bar, ...` are added to the imports and when `foo, bar, ...` are removed from the imports. Note: the command also works when some of the modules passed as arguments are already present in the file's imports. -/ elab "#import_diff" n:ident* : command => do let name_arr : Array Name := n.map (·.getId) let sp ← searchPathRef.get -- First, make sure the files exist. for name in name_arr do if (← sp.findWithExt "olean" name).isSome then continue throwError m!"File {name} cannot be found." let env ← getEnv -- Next, check for redundancies: let current_all_imports := env.allImportedModuleNames let redundancies := name_arr.filter current_all_imports.contains unless redundancies.isEmpty do let out := "\n".intercalate <| redundancies.map Name.toString |>.qsort (· < ·) |>.toList Lean.logInfo <| m!"The following are already imported (possibly transitively):\n{out}" -- Now compute the import diffs. let current_imports := env.imports let reduced_imports := env.imports.filter (!name_arr.contains ·.module) let extended_imports := current_imports ++ (name_arr.map ({ module := · })) let reduced_all_imports := (← Lean.importModules reduced_imports {}).allImportedModuleNames let extended_all_imports := (← Lean.importModules extended_imports {}).allImportedModuleNames let import_diff := extended_all_imports.filter (· ∉ reduced_all_imports) let out := "\n".intercalate <| import_diff.map Name.toString |>.qsort (· < ·) |>.toList Lean.logInfo s!"Found {import_diff.size} additional imports:\n{out}"
.lake/packages/importGraph/ImportGraph/Gexf.lean
module public import Lean.Data.Name public import Lean.AuxRecursor public import Lean.MonadEnv public import Lean.Meta.Match.MatcherInfo @[expose] public section open Lean namespace ImportGraph open Elab Meta in /-- Filter Lean internal declarations -/ def isBlackListed (env : Environment) (declName : Name) : Bool := declName == ``sorryAx || declName matches .str _ "inj" || declName matches .str _ "noConfusionType" || declName.isInternalDetail || isAuxRecursor env declName || isNoConfusion env declName || isRecCore env declName || isMatcherCore env declName /-- Get number of non-blacklisted declarations per file. -/ def getNumberOfDeclsPerFile (env: Environment) : NameMap Nat := env.const2ModIdx.fold (fun acc n (idx : ModuleIdx) => let mod := env.allImportedModuleNames[idx]! if isBlackListed env n then acc else acc.insert mod ((acc.getD mod 0) + 1) ) {} /-- Gexf template for a node in th graph. -/ def Gexf.nodeTemplate (n module : Name) (size : Nat) := s!"<node id=\"{n}\" label=\"{n}\"><attvalues><attvalue for=\"0\" value=\"{size}\" /><attvalue for=\"1\" value=\"{module.isPrefixOf n}\" /></attvalues></node>\n " /-- Gexf template for an edge in the graph -/ def Gexf.edgeTemplate (source target : Name) := s!"<edge source=\"{source}\" target=\"{target}\" id=\"{source}--{target}\" />\n " open Gexf in /-- Creates a `.gexf` file of the graph, see https://gexf.net/ Metadata can be stored in forms of attributes, currently we record the following: * `decl_count` (Nat): number of declarations in the file * `in_module` (Bool): whether the file belongs to the main module (used to strip the first part of the name when displaying). -/ def Graph.toGexf (graph : NameMap (Array Name)) (module : Name) (env : Environment) : String := let sizes : NameMap Nat := getNumberOfDeclsPerFile env let nodes : String := graph.foldl (fun acc n _ => acc ++ nodeTemplate n module (sizes.getD n 0)) "" let edges : String := graph.foldl (fun acc n i => acc ++ (i.foldl (fun b j => b ++ edgeTemplate j n) "")) "" s!"<?xml version='1.0' encoding='utf-8'?> <gexf xmlns=\"http://www.gexf.net/1.2draft\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd\" version=\"1.2\"> <meta> <creator>Lean ImportGraph</creator> </meta> <graph defaultedgetype=\"directed\" mode=\"static\" name=\"\"> <attributes mode=\"static\" class=\"node\"> <attribute id=\"0\" title=\"decl_count\" type=\"long\" /> <attribute id=\"1\" title=\"in_module\" type=\"boolean\" /> </attributes> <nodes> {nodes.trim} </nodes> <edges> {edges.trim} </edges> </graph> </gexf> "
.lake/packages/importGraph/ImportGraph/CurrentModule.lean
module public import Lake.Load.Manifest @[expose] public section open Lean (Name) namespace ImportGraph /-- Read the name of the main module from the `lake-manifest`. -/ def getCurrentModule : IO Name := do match (← Lake.Manifest.load? ⟨"lake-manifest.json"⟩) with | none => -- TODO: should this be caught? pure .anonymous | some manifest => -- TODO: This assumes that the `package` and the default `lean_lib` -- have the same name up to capitalisation. -- Would be better to read the `.defaultTargets` from the -- `← getRootPackage` from `Lake`, but I can't make that work with the monads involved. return manifest.name.capitalize /-- Helper which only returns `true` if the `module` is provided and the name `n` lies inside it. -/ def isInModule (module : Option Name) (n : Name) := match module with | some m => m.isPrefixOf n | none => false
.lake/packages/importGraph/ImportGraph/Cli.lean
module public import Cli.Basic public import ImportGraph.CurrentModule public import ImportGraph.Imports public import ImportGraph.Lean.Name public import ImportGraph.Gexf @[expose] public section open Cli open Lean open ImportGraph /-- Write an import graph, represented as a `NameMap (Array Name)` to the ".dot" graph format. * Nodes in the `unused` set will be shaded light gray. * If `markedPackage` is provided: * Nodes which start with the `markedPackage` will be highlighted in green and drawn closer together. * Edges from `directDeps` into the module are highlighted in green * Nodes in `directDeps` are marked with a green border and green text. -/ def asDotGraph (graph : NameMap (Array Name)) (unused : NameSet := {}) (header := "import_graph") (markedPackage : Option Name := none) (directDeps : NameSet := {}) (from_ to : NameSet := {}): String := Id.run do let mut lines := #[s!"digraph \"{header}\" " ++ "{"] for (n, is) in graph do let shape := if from_.contains n then "invhouse" else if to.contains n then "house" else "ellipse" if markedPackage.isSome ∧ directDeps.contains n then -- note: `fillcolor` defaults to `color` if not specified let fill := if unused.contains n then "#e0e0e0" else "white" lines := lines.push s!" \"{n}\" [style=filled, fontcolor=\"#4b762d\", color=\"#71b144\", fillcolor=\"{fill}\", penwidth=2, shape={shape}];" else if unused.contains n then lines := lines.push s!" \"{n}\" [style=filled, fillcolor=\"#e0e0e0\", shape={shape}];" else if isInModule markedPackage n then -- mark node lines := lines.push s!" \"{n}\" [style=filled, fillcolor=\"#96ec5b\", shape={shape}];" else lines := lines.push s!" \"{n}\" [shape={shape}];" -- Then add edges for i in is do if isInModule markedPackage n then if isInModule markedPackage i then -- draw the main project close together lines := lines.push s!" \"{i}\" -> \"{n}\" [weight=100];" else -- mark edges into the main project lines := lines.push s!" \"{i}\" -> \"{n}\" [penwidth=2, color=\"#71b144\"];" else lines := lines.push s!" \"{i}\" -> \"{n}\";" lines := lines.push "}" return "\n".intercalate lines.toList open Lean Core System open IO.FS IO.Process Name in /-- Implementation of the import graph command line program. -/ def importGraphCLI (args : Cli.Parsed) : IO UInt32 := do -- file extensions that should be created let extensions : Std.HashSet String := match args.variableArgsAs! String with | #[] => {"dot"} | outputs => outputs.foldl (fun acc (o : String) => match FilePath.extension o with | none => acc.insert "dot" | some "gexf" => acc.insert "gexf" | some "html" => acc.insert "gexf" -- currently all other formats are handled by passing the `.dot` file to -- graphviz | some _ => acc.insert "dot" ) {} let to ← match args.flag? "to" with | some to => pure <| to.as! (Array ModuleName) | none => pure #[← getCurrentModule] let from? : Option (Array Name) := match args.flag? "from" with | some fr => some <| fr.as! (Array ModuleName) | none => none initSearchPath (← findSysroot) unsafe Lean.enableInitializersExecution let outFiles ← try unsafe withImportModules (to.map ({module := ·})) {} (trustLevel := 1024) fun env => do let toModule := ImportGraph.getModule to[0]! let mut graph := env.importGraph let unused ← match args.flag? "to" with | some _ => let ctx := { options := {}, fileName := "<input>", fileMap := default } let state := { env } let used ← Prod.fst <$> (CoreM.toIO (env.transitivelyRequiredModules' to.toList) ctx state) let used := used.foldl (init := NameSet.empty) (fun s _ t => s ∪ t) pure <| graph.foldl (fun acc n _ => if used.contains n then acc else acc.insert n) NameSet.empty | none => pure NameSet.empty if let Option.some f := from? then graph := graph.downstreamOf (NameSet.ofArray f) let includeLean := args.hasFlag "include-lean" let includeStd := args.hasFlag "include-std" || includeLean let includeDeps := args.hasFlag "include-deps" || includeStd -- Note: `includeDirect` does not imply `includeDeps`! -- e.g. if the package contains `import Lean`, the node `Lean` will be included with -- `--include-direct`, but not included with `--include-deps`. let includeDirect := args.hasFlag "include-direct" -- `directDeps` contains files which are not in the package -- but directly imported by a file in the package let directDeps : NameSet := graph.foldl (init := .empty) (fun acc n deps => if toModule.isPrefixOf n then deps.filter (!toModule.isPrefixOf ·) |>.foldl (init := acc) NameSet.insert else acc) let filter (n : Name) : Bool := toModule.isPrefixOf n || bif isPrefixOf `Std n then includeStd else bif isPrefixOf `Lean n || isPrefixOf `Init n then includeLean else includeDeps let filterDirect (n : Name) : Bool := includeDirect ∧ directDeps.contains n graph := graph.filterMap (fun n i => if filter n then -- include node regularly (i.filter (fun m => filterDirect m || filter m)) else if filterDirect n then -- include node as direct dependency; drop any further deps. some #[] else -- not included none) if args.hasFlag "exclude-meta" then -- Mathlib-specific exclusion of tactics let filterMathlibMeta : Name → Bool := fun n => ( isPrefixOf `Mathlib.Tactic n ∨ isPrefixOf `Mathlib.Lean n ∨ isPrefixOf `Mathlib.Mathport n ∨ isPrefixOf `Mathlib.Util n) graph := graph.filterGraph filterMathlibMeta (replacement := `«Mathlib.Tactics») if !args.hasFlag "show-transitive" then graph := graph.transitiveReduction let markedPackage : Option Name := if args.hasFlag "mark-package" then toModule else none -- Create all output files that are requested let mut outFiles : Std.HashMap String String := {} if extensions.contains "dot" then let dotFile := asDotGraph graph (unused := unused) (markedPackage := markedPackage) (directDeps := directDeps) (to := NameSet.ofArray to) (from_ := NameSet.ofArray (from?.getD #[])) outFiles := outFiles.insert "dot" dotFile if extensions.contains "gexf" then -- filter out the top node as it makes the graph less pretty let graph₂ := match args.flag? "to" with | none => graph.filter (fun n _ => ! if to.contains `Mathlib then #[`Mathlib, `Mathlib.Tactic].contains n else to.contains n) | some _ => graph let gexfFile := Graph.toGexf graph₂ toModule env outFiles := outFiles.insert "gexf" gexfFile return outFiles catch err => -- TODO: try to build `to` first, so this doesn't happen throw <| IO.userError <| s!"{err}\nIf the error above says `object file ... does not exist`, " ++ s!"try if `lake build {" ".intercalate (to.toList.map Name.toString)}` fixes the issue" throw err match args.variableArgsAs! String with | #[] => writeFile "import_graph.dot" (outFiles["dot"]!) | outputs => for o in outputs do let fp : FilePath := o match fp.extension with | none | "dot" => writeFile fp (outFiles["dot"]!) | "gexf" => IO.FS.writeFile fp (outFiles["gexf"]!) | "html" => let gexfFile := (outFiles["gexf"]!) -- use `html-template/index.html` and insert any dependencies to make it -- a stand-alone HTML file. -- note: changes in `index.html` might need to be reflected here! let exeDir := (FilePath.parent (← IO.appPath) |>.get!) / ".." / ".." / ".." let mut html ← IO.FS.readFile <| ← IO.FS.realPath ( exeDir / "html-template" / "index.html") for dep in (#[ "vendor" / "sigma.min.js", "vendor" / "graphology.min.js", "vendor" / "graphology-library.min.js" ] : Array FilePath) do let depContent ← IO.FS.readFile <| ← IO.FS.realPath (exeDir / "html-template" / dep) html := html.replace s!"<script src=\"{dep}\"></script>" s!"<script>{depContent}</script>" -- inline the graph data -- note: changes in `index.html` might need to be reflected here! let escapedFile := gexfFile.replace "\n" "" |>.replace "\"" "\\\"" let toFormatted : String := ", ".intercalate <| (to.map toString).toList html := html |>.replace "fetch(\"imports.gexf\").then((res) => res.text()).then(render_gexf)" s!"render_gexf(\"{escapedFile}\")" |>.replace "<h1>Import Graph</h1>" s!"<h1>Import Graph for {toFormatted}</h1>" |>.replace "<title>import graph</title>" s!"<title>import graph for {toFormatted}</title>" IO.FS.writeFile fp html | some ext => try _ ← IO.Process.output { cmd := "dot", args := #["-T" ++ ext, "-o", o] } outFiles["dot"]! catch ex => IO.eprintln s!"Error occurred while writing out {fp}." IO.eprintln s!"Make sure you have `graphviz` installed and the file is writable." throw ex return 0
.lake/packages/importGraph/ImportGraph/Lean/Name.lean
module public import Lean.Data.Name public import Lean.CoreM public import Lean.Meta.Match.MatcherInfo public import Std.Data.HashMap public section /-! TODO: Some declarations in this file are duplicated from mathlib, but especially `isBlacklisted` is deemed to specific for upstreaming to Batteries. -/ namespace Lean.Name open Lean Meta Elab namespace ImportGraph /-- Note: copied from `Mathlib.Lean.Name` -/ private def isBlackListed (declName : Name) : CoreM Bool := do if declName.toString.startsWith "Lean" then return true let env ← getEnv pure $ declName.isInternalDetail || isAuxRecursor env declName || isNoConfusion env declName <||> isRec declName <||> isMatcher declName /-- Retrieve all names in the environment satisfying a predicate. Note: copied from `Mathlib.Lean.Name` -/ def allNames (p : Name → Bool) : CoreM (Array Name) := do (← getEnv).constants.foldM (init := #[]) fun names n _ => do if p n && !(← isBlackListed n) then return names.push n else return names /-- Retrieve all names in the environment satisfying a predicate, gathered together into a `HashMap` according to the module they are defined in. Note: copied from `Mathlib.Lean.Name` -/ def allNamesByModule (p : Name → Bool) : CoreM (Std.HashMap Name (Array Name)) := do (← getEnv).constants.foldM (init := ∅) fun names n _ => do if p n && !(← isBlackListed n) then let some m ← findModuleOf? n | return names -- TODO use `modify`/`alter` when available match names[m]? with | some others => return names.insert m (others.push n) | none => return names.insert m #[n] else return names /-- Returns the very first part of a name: for `ImportGraph.Lean.NameMap` it returns `ImportGraph`. -/ def getModule (name : Name) (s := "") : Name := match name with | .anonymous => .mkSimple s | .num _ _ => panic s!"panic in `getModule`: did not expect numerical name: {name}." | .str pre s => getModule pre s
.lake/packages/LeanSearchClient/README.md
# LeanSearchClient LeanSearchClient provides syntax for search using the [leansearch API](https://leansearch.net/) and the [LeanStateSearch](https://premise-search.com) API from within Lean. It allows you to search for Lean tactics and theorems using natural language. It also allows searches on [Loogle](https://loogle.lean-lang.org/json) from within Lean. We provide syntax to make a query and generate `TryThis` options to click or use a code action to use the results. The queries are of four forms: * `Command` syntax: `#search "search query"` as a command. * `Term` syntax: `#search "search query"` as a term. * `Tactic` syntax: `#search "search query"` as a tactic. * `Tactic` syntax based on state: `#search`. In all cases results are displayed in the Lean Infoview and clicking these replaces the query text. In the cases of a query for tactics only valid tactics are displayed. Which backend is used is determined by the `leansearchclient.backend` option. ## Examples The following are examples of using the leansearch API. The search is triggered when the sentence ends with a full stop (period) or a question mark. ### Query Command The common command for all backends: ```lean #search "If a natural number n is less than m, then the successor of n is less than the successor of m." ``` We also have commands for specific backend: ```lean #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." ``` ### Query Term The general command: ```lean example := #search "If a natural number n is less than m, then the successor of n is less than the successor of m." ``` For `leansearch`: ```lean example := #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." ``` ### Query Tactic Note that only valid tactics are displayed. The general command has two variants. With a string, calling LeanSearch: ```lean example : 3 ≤ 5 := by #search "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry ``` Without a string, calling LeanStateSearch ```lean example : 3 ≤ 5 := by #search sorry ``` There are also specific commands for the different backends. For `leansearch`: ```lean example : 3 ≤ 5 := by #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry ``` For LeanStateSearch: ```lean example : 3 ≤ 5 := by #statesearch sorry ``` ## Loogle Search The `#loogle` command can also be used in all three modes. The syntax in this case is `#loogle <search query>` as in the following examples. ```lean #loogle List ?a → ?a example := #loogle List ?a → ?a example : 3 ≤ 5 := by #loogle Nat.succ_le_succ sorry ```
.lake/packages/LeanSearchClient/LeanSearchClientTest.lean
module public meta import LeanSearchClientTest.Examples
.lake/packages/LeanSearchClient/LeanSearchClient.lean
-- This module serves as the root of the `LeanSearchClient` library. -- Import modules here that should be built as part of the library. module public meta import LeanSearchClient.Basic public meta import LeanSearchClient.Syntax public meta import LeanSearchClient.LoogleSyntax
.lake/packages/LeanSearchClient/LeanSearchClientTest/LoogleExamples.lean
module public meta import LeanSearchClient.LoogleSyntax public meta section /-! # Loogle Examples Examples of using the Loogle API. The search is triggered by the word at the end of the query. -/ -- #loogle List ?a → ?a -- example := #loogle List ?a → ?a -- set_option loogle.queries 1 -- example : 3 ≤ 5 := by -- #loogle Nat.succ_le_succ -- sorry example : 3 ≤ 5 := by #loogle decide #loogle Dist.dist, edist -- example : 3 ≤ 5 := by -- #loogle -- decide -- /-! -- More examples to test comments do not interfere with the search or caching. -- -/ -- #loogle ?a * _ < ?a * _ ↔ _ -- #loogle ?a * _ < ?a * _ ↔ _ /- foo -/ -- #loogle ?a * _ < ?a * _ ↔ _ -- comment #loogle ?a * _ < ?a * _ ↔ _ /-- info: Loogle Search Results • #check Option.get! -- {α : Type u} [Inhabited α] : Option α → α Extracts the value from an `Option`, panicking on `none`. -/ #guard_msgs in #loogle Option ?a → ?a, "get!" /- hello -/
.lake/packages/LeanSearchClient/LeanSearchClientTest/StateSearchExamples.lean
module public meta import LeanSearchClient.Syntax public meta section /-! # LeanStateSearch Examples Examples of using LeanStateSearch API. The search is triggered by the tactic `#statesearch`. -/ set_option statesearch.queries 1 -- set the number of results to 6 set_option statesearch.revision "v4.16.0" -- set the revision to v4.16.0 /-- info: Try these: • #check Int.one_pos --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 0 < 1 := by #statesearch sorry set_option statesearch.queries 6 /-- info: From: Nat.zero_lt_one (type: 0 < 1) • apply Nat.zero_lt_one • have : 0 < 1 := Nat.zero_lt_one --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 0 < 1 := by #statesearch sorry set_option statesearch.revision "v4.15.0" /-- error: error: "Invalid parameter value" description: "Lean State Search does not support the specified revision" -/ #guard_msgs in example : 0 ≤ 1 := by #statesearch /-! Tests using `search` with `statesearch` as the backend. -/ set_option statesearch.queries 1 -- set the number of results to 6 set_option statesearch.revision "v4.16.0" -- set the revision to v4.16.0 /-- info: Try these: • #check Int.one_pos --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 0 < 1 := by #search sorry set_option statesearch.queries 6 /-- info: From: Nat.zero_lt_one (type: 0 < 1) • apply Nat.zero_lt_one • have : 0 < 1 := Nat.zero_lt_one --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 0 < 1 := by #search sorry set_option statesearch.revision "v4.15.0" /-- error: error: "Invalid parameter value" description: "Lean State Search does not support the specified revision" -/ #guard_msgs in example : 0 ≤ 1 := by #search
.lake/packages/LeanSearchClient/LeanSearchClientTest/Examples.lean
module public meta import LeanSearchClient.Syntax public meta section /-! # Lean Search Examples Examples of using the leansearch API. The search is triggered when the sentence ends with a full stop (period) or a question mark. -/ /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. -/ #guard_msgs in #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m" /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. -/ #guard_msgs in example := #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m" set_option leansearch.queries 1 /-- info: From: Nat.le_of_succ_le_succ (type: ∀ {n m : Nat}, LE.le n.succ m.succ → LE.le n m) [apply] apply Nat.le_of_succ_le_succ [apply] have : ∀ {n m : Nat}, LE.le n.succ m.succ → LE.le n m := Nat.le_of_succ_le_succ --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 3 ≤ 5 := by #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. -/ #guard_msgs in example : 3 ≤ 5 := by #leansearch decide /-! # Lean Search Examples using `#search` -/ set_option leansearchclient.backend "leansearch" /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. -/ #guard_msgs in #search "If a natural number n is less than m, then the successor of n is less than the successor of m" /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. -/ #guard_msgs in example := #search "If a natural number n is less than m, then the successor of n is less than the successor of m" set_option leansearch.queries 1 /-- info: From: Nat.le_of_succ_le_succ (type: ∀ {n m : Nat}, LE.le n.succ m.succ → LE.le n m) [apply] apply Nat.le_of_succ_le_succ [apply] have : ∀ {n m : Nat}, LE.le n.succ m.succ → LE.le n m := Nat.le_of_succ_le_succ --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 3 ≤ 5 := by #search "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry /-- warning: #leansearch query should be a string that ends with a `.` or `?`. Note this command sends your query to an external service at https://leansearch.net/. --- warning: declaration uses 'sorry' -/ #guard_msgs in example : 3 ≤ 5 := #search set_option leansearchclient.backend "magic" /-- error: Invalid backend magic, must be leansearch -/ #guard_msgs in #search "Every slice knot is ribbon."
.lake/packages/LeanSearchClient/LeanSearchClient/LoogleSyntax.lean
module public meta import Lean.Elab.Tactic.Meta public meta import Lean.Parser.Basic public meta import Lean.Meta.Tactic.TryThis public meta import LeanSearchClient.Basic public meta import LeanSearchClient.Syntax public meta section /-! # LeanSearchClient In this file, we provide syntax for search using the [leansearch API](https://leansearch.net/). from within Lean. It allows you to search for Lean tactics and theorems using natural language. We provide syntax to make a query and generate `TryThis` options to click or use a code action to use the results. The queries are of three forms. For leansearch these are: * `Command` syntax: `#leansearch "search query"` as a command. * `Term` syntax: `#leansearch "search query"` as a term. * `Tactic` syntax: `#leansearch "search query"` as a tactic. In all cases results are displayed in the Lean Infoview and clicking these replaces the query text. In the cases of a query for tactics only valid tactics are displayed. -/ namespace LeanSearchClient open Lean Meta Elab Tactic Parser Term structure LoogleMatch where name : String type : String doc? : Option String deriving Inhabited, Repr inductive LoogleResult where | empty : LoogleResult | success : Array SearchResult → LoogleResult | failure (error : String) (suggestions: Option <| List String) : LoogleResult deriving Inhabited, Repr initialize loogleCache : IO.Ref (Std.HashMap (String × Nat) LoogleResult) ← IO.mkRef {} def getLoogleQueryJson (s : String) (num_results : Nat := 6) : CoreM <| LoogleResult:= do let s := s.splitOn "/-" |>.getD 0 s |>.trim let s := s.replace "\n" " " let cache ← loogleCache.get match cache.get? (s, num_results) with | some r => return r | none => do let apiUrl := (← IO.getEnv "LEANSEARCHCLIENT_LOOGLE_API_URL").getD "https://loogle.lean-lang.org/json" let s' := System.Uri.escapeUri s if s.trim == "" then return LoogleResult.empty let q := apiUrl ++ s!"?q={s'}" let out ← IO.Process.output {cmd := "curl", args := #["-X", "GET", "--user-agent", ← useragent, q]} match Json.parse out.stdout with | Except.error _ => IO.throwServerError s!"Could not contact Loogle server" | Except.ok js => let result? := js.getObjValAs? Json "hits" |>.toOption let result? := result?.filter fun js => js != Json.null match result? with | some result => do match result.getArr? with | Except.ok arr => let arr := arr[0:num_results] |>.toArray let xs : Array SearchResult ← arr.mapM fun js => do let doc? := js.getObjValAs? String "doc" |>.toOption let name? := js.getObjValAs? String "name" let type? := js.getObjValAs? String "type" match name?, type? with | Except.ok name, Except.ok type => pure <| {name := name, type? := some type, docString? := doc?, doc_url? := none, kind? := none} | _, _ => IO.throwServerError s!"Could not obtain name and type from {js}" loogleCache.modify fun m => m.insert (s, num_results) (LoogleResult.success xs) return LoogleResult.success xs | Except.error e => IO.throwServerError s!"Could not obtain array from {js}; error: {e}, query :{s'}, hits: {result}" | _ => let error? := js.getObjValAs? String "error" match error? with | Except.ok error => let suggestions? := js.getObjValAs? (List String) "suggestions" |>.toOption loogleCache.modify fun m => m.insert (s, num_results) (LoogleResult.failure error suggestions?) pure <| LoogleResult.failure error suggestions? | _ => IO.throwServerError s!"Could not obtain hits or error from {js}" -- #eval getLoogleQueryJson "List" def loogleUsage : String := "Loogle Usage Loogle finds definitions and lemmas in various ways: By constant: 🔍 Real.sin finds all lemmas whose statement somehow mentions the sine function. By lemma name substring: 🔍 \"differ\" finds all lemmas that have \"differ\" somewhere in their lemma name. By subexpression: 🔍 _ * (_ ^ _) finds all lemmas whose statements somewhere include a product where the second argument is raised to some power. The pattern can also be non-linear, as in 🔍 Real.sqrt ?a * Real.sqrt ?a If the pattern has parameters, they are matched in any order. Both of these will find List.map: 🔍 (?a -> ?b) -> List ?a -> List ?b 🔍 List ?a -> (?a -> ?b) -> List ?b By main conclusion: 🔍 |- tsum _ = _ * tsum _ finds all lemmas where the conclusion (the subexpression to the right of all → and ∀) has the given shape. As before, if the pattern has parameters, they are matched against the hypotheses of the lemma in any order; for example, 🔍 |- _ < _ → tsum _ < tsum _ will find tsum_lt_tsum even though the hypothesis f i < g i is not the last. If you pass more than one such search filter, separated by commas Loogle will return lemmas which match all of them. The search 🔍 Real.sin, \"two\", tsum, _ * _, _ ^ _, |- _ < _ → _ woould find all lemmas which mention the constants Real.sin and tsum, have \"two\" as a substring of the lemma name, include a product and a power somewhere in the type, and have a hypothesis of the form _ < _ (if there were any such lemmas). Metavariables (?a) are assigned independently in each filter." open Lean.Parser def unicode_turnstile := nonReservedSymbol "⊢ " def ascii_turnstile := nonReservedSymbol "|- " /-- The turnstyle uesd bin `#find`, unicode or ascii allowed -/ syntax turnstyle := patternIgnore(unicode_turnstile <|> ascii_turnstile) /-- a single `#find` filter. The `term` can also be an ident or a strlit, these are distinguished in `parseFindFilters` -/ syntax loogle_filter := (turnstyle term) <|> term /-- The argument to `#find`, a list of filters -/ syntax loogle_filters := loogle_filter,* open Command /-- Search [Loogle](https://loogle.lean-lang.org/json) from within Lean. This can be used as a command, term or tactic as in the following examples. In the case of a tactic, only valid tactics are displayed. ```lean #loogle List ?a → ?a example := #loogle List ?a → ?a example : 3 ≤ 5 := by #loogle Nat.succ_le_succ sorry ``` ## Loogle Usage Loogle finds definitions and lemmas in various ways: By constant: 🔍 Real.sin finds all lemmas whose statement somehow mentions the sine function. By lemma name substring: 🔍 \"differ\" finds all lemmas that have \"differ\" somewhere in their lemma name. By subexpression: 🔍 _ * (_ ^ _) finds all lemmas whose statements somewhere include a product where the second argument is raised to some power. The pattern can also be non-linear, as in 🔍 Real.sqrt ?a * Real.sqrt ?a If the pattern has parameters, they are matched in any order. Both of these will find List.map: 🔍 (?a -> ?b) -> List ?a -> List ?b 🔍 List ?a -> (?a -> ?b) -> List ?b By main conclusion: 🔍 |- tsum _ = _ * tsum _ finds all lemmas where the conclusion (the subexpression to the right of all → and ∀) has the given shape. As before, if the pattern has parameters, they are matched against the hypotheses of the lemma in any order; for example, 🔍 |- _ < _ → tsum _ < tsum _ will find tsum_lt_tsum even though the hypothesis f i < g i is not the last. If you pass more than one such search filter, separated by commas Loogle will return lemmas which match all of them. The search 🔍 Real.sin, \"two\", tsum, _ * _, _ ^ _, |- _ < _ → _ woould find all lemmas which mention the constants Real.sin and tsum, have \"two\" as a substring of the lemma name, include a product and a power somewhere in the type, and have a hypothesis of the form _ < _ (if there were any such lemmas). Metavariables (?a) are assigned independently in each filter. You can modify the Loogle server URL by setting the `LEANSEARCHCLIENT_LOOGLE_API_URL` environment variable. -/ syntax (name := loogle_cmd) "#loogle" loogle_filters : command @[command_elab loogle_cmd] def loogleCmdImpl : CommandElab := fun stx => Command.liftTermElabM do match stx with | `(command| #loogle $args:loogle_filters) => let s := (← PrettyPrinter.ppCategory ``loogle_filters args).pretty let result ← getLoogleQueryJson s match result with | LoogleResult.empty => logInfo loogleUsage | LoogleResult.success xs => let suggestions := xs.map SearchResult.toCommandSuggestion if suggestions.isEmpty then logWarning "Loogle search returned no results" logInfo loogleUsage else TryThis.addSuggestions stx suggestions (header := s!"Loogle Search Results") | LoogleResult.failure error suggestions? => logWarning s!"Loogle search failed with error: {error}" logInfo loogleUsage match suggestions? with | some suggestions => let suggestions : List TryThis.Suggestion := suggestions.map fun s => {suggestion := .string s!"#loogle {s}"} unless suggestions.isEmpty do TryThis.addSuggestions stx suggestions.toArray (header := s!"Did you maybe mean") | none => pure () | _ => throwUnsupportedSyntax @[inherit_doc loogle_cmd] syntax (name := just_loogle_cmd)(priority := low) "#loogle" loogle_filters : command @[command_elab just_loogle_cmd] def justLoogleCmdImpl : CommandElab := fun _ => return @[inherit_doc loogle_cmd] syntax (name := loogle_term) "#loogle" loogle_filters : term @[term_elab loogle_term] def loogleTermImpl : TermElab := fun stx expectedType? => do match stx with | `(#loogle $args) => let s := (← PrettyPrinter.ppCategory ``loogle_filters args).pretty let result ← getLoogleQueryJson s match result with | LoogleResult.empty => logInfo loogleUsage | LoogleResult.success xs => let suggestions := xs.map SearchResult.toTermSuggestion if suggestions.isEmpty then logWarning "Loogle search returned no results" logInfo loogleUsage else TryThis.addSuggestions stx suggestions (header := s!"Loogle Search Results") | LoogleResult.failure error suggestions? => logWarning s!"Loogle search failed with error: {error}" logInfo loogleUsage match suggestions? with | some suggestions => let suggestions : List TryThis.Suggestion := suggestions.map fun s => let s := s.replace "\"" "\\\"" {suggestion := .string s!"#loogle \"{s}\""} unless suggestions.isEmpty do TryThis.addSuggestions stx suggestions.toArray (header := s!"Did you maybe mean") | none => pure () defaultTerm expectedType? | _ => throwUnsupportedSyntax @[inherit_doc loogle_cmd] syntax (name := loogle_tactic) withPosition("#loogle" (ppSpace colGt (loogle_filters))) : tactic @[tactic loogle_tactic] def loogleTacticImpl : Tactic := fun stx => do match stx with | `(tactic|#loogle $args) => let s := (← PrettyPrinter.ppCategory ``loogle_filters args).pretty let result ← getLoogleQueryJson s match result with | LoogleResult.empty => logInfo loogleUsage | LoogleResult.success xs => do let suggestionGroups := xs.map fun sr => (sr.name, sr.toTacticSuggestions) for (name, sg) in suggestionGroups do let sg ← sg.filterM fun s => let sugTxt := s.suggestion match sugTxt with | .string s => do let stx? := runParserCategory (← getEnv) `tactic s match stx? with | Except.ok stx => let n? ← checkTactic (← getMainTarget) stx return n?.isSome | Except.error _ => pure false | _ => pure false unless sg.isEmpty do TryThis.addSuggestions stx sg (header := s!"From: {name}") | LoogleResult.failure error suggestions? => logWarning s!"Loogle search failed with error: {error}" logInfo loogleUsage match suggestions? with | some suggestions => let suggestions : List TryThis.Suggestion := suggestions.map fun s => {suggestion := .string s!"#loogle \"{s}\""} unless suggestions.isEmpty do TryThis.addSuggestions stx suggestions.toArray (header := s!"Did you maybe mean") | none => pure () | _ => throwUnsupportedSyntax syntax (name := just_loogle_tactic)(priority := low) "#loogle" : tactic @[tactic just_loogle_tactic] def justLoogleTacticImpl : Tactic := fun _ => do logWarning loogleUsage example : 3 ≤ 5 := by -- #loogle Nat.succ_le_succ decide -- example := #loogle List ?a → ?b end LeanSearchClient -- #loogle "sin", Real → Real, |- Real
.lake/packages/LeanSearchClient/LeanSearchClient/Syntax.lean
module public meta import Lean.Elab.Tactic.Meta public meta import Lean.Meta.Tactic.TryThis public meta import LeanSearchClient.Basic public meta import Lean.Server.Utils public meta import Lean.Elab.Command public meta section /-! # LeanSearchClient In this file, we provide syntax for search using the [leansearch API](https://leansearch.net/). from within Lean. It allows you to search for Lean tactics and theorems using natural language. We provide syntax to make a query and generate `TryThis` options to click or use a code action to use the results. The queries are of three forms. For leansearch these are: * `Command` syntax: `#leansearch "search query"` as a command. * `Term` syntax: `#leansearch "search query"` as a term. * `Tactic` syntax: `#leansearch "search query"` as a tactic. In all cases results are displayed in the Lean Infoview and clicking these replaces the query text. In the cases of a query for tactics only valid tactics are displayed. -/ namespace LeanSearchClient open Lean Meta Elab Tactic Parser Term def useragent : CoreM String := return leansearchclient.useragent.get (← getOptions) initialize leanSearchCache : IO.Ref (Std.HashMap (String × Nat) (Array Json)) ← IO.mkRef {} initialize stateSearchCache : IO.Ref (Std.HashMap (String × Nat × String) (Array Json)) ← IO.mkRef {} def getLeanSearchQueryJson (s : String) (num_results : Nat := 6) : CoreM <| Array Json := do let cache ← leanSearchCache.get match cache.get? (s, num_results) with | some jsArr => return jsArr | none => do let apiUrl := (← IO.getEnv "LEANSEARCHCLIENT_LEANSEARCH_API_URL").getD "https://leansearch.net/search" -- let q := apiUrl ++ s!"?query={s'}&num_results={num_results}" let js := Json.mkObj [("query", Json.arr #[toJson s]), ("num_results", num_results)] let out ← IO.Process.output {cmd := "curl", args := #["-X", "POST", apiUrl, "--user-agent", ← useragent, "-H", "accept: application/json", "-H", "Content-Type: application/json", "--data", js.pretty]} let js ← match Json.parse out.stdout with | Except.ok js => pure js | Except.error e => IO.throwServerError s!"Could not parse response from LeanSearch server, error: {e}" match js.getArr? with | Except.ok jsArr => do match jsArr[0]!.getArr? with | Except.ok jsArr => leanSearchCache.modify fun m => m.insert (s, num_results) jsArr return jsArr | Except.error e => IO.throwServerError s!"Could not obtain inner array from {js}; error: {e}" | Except.error e => IO.throwServerError s!"Could not obtain outer array from {js}; error: {e}" def getStateSearchQueryJson (s : String) (num_results : Nat := 6) (rev : String) : CoreM <| Array Json := do let cache ← stateSearchCache.get match cache.get? (s, num_results, rev) with | .some jsArr => return jsArr | none => do let apiUrl := (← IO.getEnv "LEANSEARCHCLIENT_LEANSTATESEARCH_API_URL").getD "https://premise-search.com/api/search" let s' := System.Uri.escapeUri s let q := apiUrl ++ s!"?query={s'}&results={num_results}&rev={rev}" let out ← IO.Process.output {cmd := "curl", args := #["-X", "GET", "--user-agent", ← useragent, q]} let js ← match Json.parse out.stdout |>.toOption with | some js => pure js | none => IO.throwServerError s!"Could not contact LeanStateSearch server" match js.getArr? with | Except.ok jsArr => do stateSearchCache.modify fun m => m.insert (s, num_results, rev) jsArr return jsArr | Except.error e => let .ok err := js.getObjVal? "error" | IO.throwServerError s!"{e}" let .ok schema := js.getObjVal? "schema" | IO.throwServerError s!"{e}" let .ok desc := schema.getObjVal? "description" | IO.throwServerError s!"{e}" IO.throwServerError s!"error: {err}\ndescription: {desc}" structure SearchResult where name : String type? : Option String docString? : Option String doc_url? : Option String kind? : Option String deriving Repr namespace SearchResult def ofLeanSearchJson? (js : Json) : Option SearchResult := match js.getObjVal? "result" with | Except.ok js => match js.getObjValAs? (List String) "name" with | Except.ok nameList => let name := nameList.foldl (init := "") fun acc s => if acc == "" then s else acc ++ "." ++ s let type? := js.getObjValAs? String "type" |>.toOption let doc? := js.getObjValAs? String "docstring" |>.toOption let doc? := doc?.filter fun s => s != "" let docurl? := js.getObjValAs? String "doc_url" |>.toOption let kind? := js.getObjValAs? String "kind" |>.toOption some {name := name, type? := type?, docString? := doc?, doc_url? := docurl?, kind? := kind?} | _ => none | _ => none def ofLoogleJson? (js : Json) : Option SearchResult := match js.getObjValAs? String "name" with | Except.ok name => let type? := js.getObjValAs? String "type" |>.toOption let doc? := js.getObjValAs? String "doc" |>.toOption let doc? := doc?.filter fun s => s != "" some {name := name, type? := type?, docString? := doc?, doc_url? := none, kind? := none} | _ => none def ofStateSearchJson? (js : Json) : Option SearchResult := match js.getObjValAs? String "name" with | Except.ok name => let type? := js.getObjValAs? String "formal_type" |>.toOption let doc? := js.getObjValAs? String "doc" |>.toOption let doc? := doc?.filter fun s => s != "" let kind? := js.getObjValAs? String "kind" |>.toOption some {name := name, type? := type?, docString? := doc?, doc_url? := none, kind? := kind?} | _ => none def toCommandSuggestion (sr : SearchResult) : TryThis.Suggestion := let data := match sr.docString? with | some doc => s!"{doc}\n" | none => "" {suggestion := s!"#check {sr.name}", postInfo? := sr.type?.map fun s => s!" -- {s}" ++ s!"\n{data}"} def toTermSuggestion (sr : SearchResult) : TryThis.Suggestion := match sr.type? with | some type => {suggestion := sr.name, postInfo? := some s!" (type: {type})"} | none => {suggestion := sr.name} def toTacticSuggestions (sr : SearchResult) : Array TryThis.Suggestion := match sr.type? with | some type => #[{suggestion := s!"apply {sr.name}"}, {suggestion := s!"have : {type} := {sr.name}"}, {suggestion := s!"rw [{sr.name}]"}, {suggestion := s!"rw [← {sr.name}]" }] | none => #[] end SearchResult def queryLeanSearch (s : String) (num_results : Nat) : MetaM <| Array SearchResult := do let jsArr ← getLeanSearchQueryJson s num_results return jsArr.filterMap SearchResult.ofLeanSearchJson? def queryStateSearch (s : String) (num_results : Nat) (rev : String): MetaM <| Array SearchResult := do let jsArr ← getStateSearchQueryJson s num_results rev return jsArr.filterMap SearchResult.ofStateSearchJson? def defaultTerm (expectedType? : Option Expr) : MetaM Expr := do match expectedType? with | some type => if !type.hasExprMVar then mkAppM ``sorryAx #[type, mkConst ``false] else return mkConst ``True.intro | none => return mkConst ``True.intro def checkTactic (target : Expr) (tac : Syntax) : TermElabM (Option Nat) := withoutModifyingState do try let goal ← mkFreshExprMVar target let (goals, _) ← withoutErrToSorry do Elab.runTactic goal.mvarId! tac (← read) (← get) return some goals.length catch _ => return none structure SearchServer where name : String url : String cmd: String query : String → Nat → MetaM (Array SearchResult) queryNum : CoreM Nat def leanSearchServer : SearchServer := {name := "LeanSearch", cmd := "#leansearch", url := "https://leansearch.net/", query := queryLeanSearch, queryNum := return leansearch.queries.get (← getOptions)} instance : Inhabited SearchServer := ⟨leanSearchServer⟩ namespace SearchServer def getCommandSuggestions (ss : SearchServer) (s : String) (num_results : Nat) : MetaM (Array TryThis.Suggestion) := do let suggestions ← ss.query s num_results return suggestions.map SearchResult.toCommandSuggestion def getTermSuggestions (ss : SearchServer) (s : String) (num_results : Nat) : MetaM (Array TryThis.Suggestion) := do let suggestions ← ss.query s num_results return suggestions.map SearchResult.toTermSuggestion def getTacticSuggestionGroups (ss : SearchServer) (s : String) (num_results : Nat) : MetaM (Array (String × Array TryThis.Suggestion)) := do let suggestions ← ss.query s num_results return suggestions.map fun sr => let fullName := match sr.type? with | some type => s!"{sr.name} (type: {type})" | none => sr.name (fullName, sr.toTacticSuggestions) def incompleteSearchQuery (ss : SearchServer) : String := s!"{ss.cmd} query should be a string that ends with a `.` or `?`.\n\ Note this command sends your query to an external service at {ss.url}." open Command def searchCommandSuggestions (ss: SearchServer) (stx: Syntax) (s: TSyntax `str) : CommandElabM Unit := Command.liftTermElabM do let s := s.getString if s.endsWith "." || s.endsWith "?" then let suggestions ← ss.getCommandSuggestions s (← ss.queryNum) TryThis.addSuggestions stx suggestions (header := s!"{ss.name} Search Results") else logWarning <| ss.incompleteSearchQuery def searchTermSuggestions (ss: SearchServer) (stx: Syntax) (s: TSyntax `str) : TermElabM Unit := do let s := s.getString if s.endsWith "." || s.endsWith "?" then let suggestions ← ss.getTermSuggestions s (← ss.queryNum) TryThis.addSuggestions stx suggestions (header := s!"{ss.name} Search Results") else logWarning <| ss.incompleteSearchQuery def searchTacticSuggestions (ss: SearchServer) (stx: Syntax) (s: TSyntax `str) : TacticM Unit := do let s := s.getString if s.endsWith "." || s.endsWith "?" then let target ← getMainTarget let suggestionGroups ← ss.getTacticSuggestionGroups s (← ss.queryNum) for (name, sg) in suggestionGroups do let sg ← sg.filterM fun s => let sugTxt := s.suggestion match sugTxt with | .string s => do let stx? := runParserCategory (← getEnv) `tactic s match stx? with | Except.ok stx => let n? ← checkTactic target stx return n?.isSome | Except.error _ => pure false | _ => pure false unless sg.isEmpty do TryThis.addSuggestions stx sg (header := s!"From: {name}") else logWarning <| ss.incompleteSearchQuery end SearchServer open Command /-- Search [LeanSearch](https://leansearch.net/) from within Lean. Queries should be a string that ends with a `.` or `?`. This works as a command, as a term and as a tactic as in the following examples. In tactic mode, only valid tactics are displayed. ```lean #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." example := #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." example : 3 ≤ 5 := by #leansearch "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry ``` You can modify the LeanSearch URL by setting the `LEANSEARCHCLIENT_LEANSEARCH_API_URL` environment variable. -/ syntax (name := leansearch_search_cmd) "#leansearch" (str)? : command @[command_elab leansearch_search_cmd] def leanSearchCommandImpl : CommandElab := fun stx => match stx with | `(command| #leansearch $s) => do leanSearchServer.searchCommandSuggestions stx s | `(command| #leansearch) => do logWarning leanSearchServer.incompleteSearchQuery | _ => throwUnsupportedSyntax /-- Search from within Lean, depending on the option `leansearchclient.backend` (currently only leansearch). Queries should be a string that ends with a `.` or `?`. This works as a command, as a term and as a tactic as in the following examples. In tactic mode, only valid tactics are displayed. ```lean #search "If a natural number n is less than m, then the successor of n is less than the successor of m." example := #search "If a natural number n is less than m, then the successor of n is less than the successor of m." example : 3 ≤ 5 := by #search "If a natural number n is less than m, then the successor of n is less than the successor of m." sorry In tactic mode, if the query string is not supplied, then [LeanStateSearch](https://premise-search.com) is queried based on the goal state. ``` -/ syntax (name := search_cmd) "#search" (str)? : command @[command_elab search_cmd] def searchCommandImpl : CommandElab := fun stx => do let server ← match leansearchclient.backend.get (← getOptions) with | "leansearch" => pure leanSearchServer | s => throwError s!"Invalid backend {s}, must be leansearch" match stx with | `(command| #search $s) => do server.searchCommandSuggestions stx s | `(command| #search) => do logWarning server.incompleteSearchQuery | _ => throwUnsupportedSyntax @[inherit_doc leansearch_search_cmd] syntax (name := leansearch_search_term) "#leansearch" (str)? : term @[term_elab leansearch_search_term] def leanSearchTermImpl : TermElab := fun stx expectedType? => do match stx with | `(#leansearch $s) => leanSearchServer.searchTermSuggestions stx s defaultTerm expectedType? | `(#leansearch) => do logWarning leanSearchServer.incompleteSearchQuery defaultTerm expectedType? | _ => throwUnsupportedSyntax @[inherit_doc search_cmd] syntax (name := search_term) "#search" (str)? : term @[term_elab search_term] def searchTermImpl : TermElab := fun stx expectedType? => do let server ← match leansearchclient.backend.get (← getOptions) with | "leansearch" => pure leanSearchServer | s => throwError s!"Invalid backend {s}, should be leansearch" match stx with | `(#search $s) => server.searchTermSuggestions stx s defaultTerm expectedType? | `(#search) => do logWarning server.incompleteSearchQuery defaultTerm expectedType? | _ => throwUnsupportedSyntax @[inherit_doc leansearch_search_cmd] syntax (name := leansearch_search_tactic) withPosition("#leansearch" (colGt str)?) : tactic @[tactic leansearch_search_tactic] def leanSearchTacticImpl : Tactic := fun stx => withMainContext do match stx with | `(tactic|#leansearch $s) => leanSearchServer.searchTacticSuggestions stx s | `(tactic|#leansearch) => do logWarning leanSearchServer.incompleteSearchQuery | _ => throwUnsupportedSyntax /-- Search [LeanStateSearch](https://premise-search.com) from within Lean. Your current main goal is sent as query. The revision to search can be set using the `statesearch.revision` option. The number of results can be set using the `statesearch.queries` option. Hint: If you want to modify the query, you need to use the web interface. ```lean set_option statesearch.queries 1 set_option statesearch.revision "v4.16.0" example : 0 ≤ 1 := by #statesearch sorry ``` You can modify the LeanStateSearch URL by setting the `LEANSEARCHCLIENT_LEANSTATESEARCH_API_URL` environment variable. -/ syntax (name := statesearch_search_tactic) withPosition("#statesearch") : tactic @[tactic statesearch_search_tactic] def stateSearchTacticImpl : Tactic := fun stx => withMainContext do let goal ← getMainGoal let target ← getMainTarget let state := (← Meta.ppGoal goal).pretty let num_results := (statesearch.queries.get (← getOptions)) let rev := (statesearch.revision.get (← getOptions)) match stx with | `(tactic|#statesearch) => let results ← queryStateSearch state num_results rev let suggestionGroups := results.map fun sr => let fullName := match sr.type? with | some type => s!"{sr.name} (type: {type})" | none => sr.name (fullName, sr.toTacticSuggestions) let mut foundValidTactic := false for (name, sg) in suggestionGroups do let sg ← sg.filterM fun s => let sugTxt := s.suggestion match sugTxt with | .string s => do let stx? := runParserCategory (← getEnv) `tactic s match stx? with | Except.ok stx => let n? ← checkTactic target stx return n?.isSome | Except.error _ => pure false | _ => pure false unless sg.isEmpty do foundValidTactic := true TryThis.addSuggestions stx sg (header := s!"From: {name}") unless foundValidTactic do TryThis.addSuggestions stx (results.map SearchResult.toCommandSuggestion) | _ => throwUnsupportedSyntax @[inherit_doc search_cmd] syntax (name := search_tactic) "#search" (str)? : tactic @[tactic search_tactic] def searchTacticImpl : Tactic := fun stx => withMainContext do match stx with | `(tactic|#search $s) => let server ← match leansearchclient.backend.get (← getOptions) with | "leansearch" => pure leanSearchServer | s => throwError s!"Invalid backend {s}, should be leansearch" server.searchTacticSuggestions stx s | `(tactic|#search) => do evalTactic (← `(tactic|#statesearch)) | _ => throwUnsupportedSyntax end LeanSearchClient
.lake/packages/LeanSearchClient/LeanSearchClient/Basic.lean
module public meta import Lean.Data.Options public meta section register_option leansearch.queries : Nat := { defValue := 6 group := "leansearch" descr := "Number of results requested from leansearch (default 6)" } register_option loogle.queries : Nat := { defValue := 6 group := "loogle" descr := "Number of results requested from loogle (default 6)" } register_option statesearch.queries : Nat := { defValue := 6 group := "statesearch" descr := "Number of results requested from statesearch (default 6)" } register_option statesearch.revision : String := { defValue := s!"v{Lean.versionString}" group := "statesearch" descr := "Revision of LeanStateSearch to use" } register_option leansearchclient.useragent : String := { defValue := "LeanSearchClient" group := "leansearchclient" descr := "Username for leansearchclient" } register_option leansearchclient.backend : String := { defValue := "leansearch" group := "leansearchclient" descr := "The backend to use by default, currently only leansearch" }
.lake/packages/Qq/Qq.lean
module public import Qq.Macro public import Qq.Delab public import Qq.MetaM public import Qq.Simp public import Qq.Match public import Qq.AssertInstancesCommute public import Qq.Commands
.lake/packages/Qq/README.md
# Expression quotations for Lean 4 This package implements type-safe expression quotations, which are a particularly convenient way of constructing object-level expressions (`Expr`) in meta-level code. It combines the intuitiveness of modal sequent calculus with the power and speed of Lean 4's metaprogramming facilities. ## Show me some code! ```lean import Qq open Qq Lean -- Construct an expression def a : Expr := q([42 + 1]) -- Construct a typed expression def b : Q(List Nat) := q([42 + 1]) -- Antiquotations def c (n : Q(Nat)) := q([42 + $n]) -- Dependently-typed antiquotations def d (u : Level) (n : Q(Nat)) (x : Q(Type u × Fin ($n + 1))) : Q(Fin ($n + 3)) := q(⟨$x.2, Nat.lt_of_lt_of_le $x.2.2 (Nat.le_add_right _ 2)⟩) -- Use `q()` to generate a large term in a regular definition def add_self_37 {α : Type u} [Add α] (a : α) : α := by_elabq return (List.range 36).foldr (init := q($a)) fun _ acc => q($acc + $a) ``` ## Typing rules The `Q(·)` modality quotes types: `Q(α)` denotes an expression of type `α`. The type former comes with the following natural introduction rule: ``` $a₁ : α₁, …, $aₙ : αₙ ⊢ t : Type --------------------------------------------- a₁ : Q(α₁), …, aₙ : Q(αₙ) ⊢ Q(t) : Type ``` The lower-case `q(·)` macro serves as the modal inference rule, allowing us to construct values in `Q(·)`: ``` $a₁ : α₁, …, $aₙ : αₙ ⊢ t : β --------------------------------------------- a₁ : Q(α₁), …, aₙ : Q(αₙ) ⊢ q(t) : Q(β) ``` ## Example Let us write a type-safe version of `mkApp`: ```lean import Qq open Qq set_option trace.compiler.ir.result true in -- Note: `betterApp` actually has two additional parameters -- `{u v : Lean.Level}` auto-generated due to option -- `autoBoundImplicitLocal`. def betterApp {α : Q(Sort u)} {β : Q($α → Sort v)} (f : Q((a : α) → $β a)) (a : Q($α)) : Q($β $a) := q($f $a) #eval betterApp q(Int.toNat) q(42) ``` There are many things going on here: 1. The `betterApp` function compiles to a single `betaRev` call. 1. It does not require the `MetaM` monad (in contrast to `AppBuilder.lean` in the Lean 4 code). 1. `Q(…)` is definitionally equal to `Expr`, so each variable in the example is just an `Expr`. 1. Nevertheless, implicit arguments of the definition (such as `α` or `u`) get filled in by type inference, which reduces the potential for errors even in the absence of strong type safety at the meta level. 1. All quoted expressions, i.e. all code inside `Q(·)` and `q(·)`, are type-safe (under the assumption that the values of `α`, `f`, etc. really have their declared types). 1. The second argument in the `#eval` example, `q(42)`, correctly constructs an expression of type `Int`, as determined by the first argument. Because `betterApp` takes `α` and `u` (and `β` and `v`) as arguments, it can also perform more interesting tasks compared to the untyped function `mkApp`: for example, we can change `q($f $a)` into `q(id $f $a)` without changing the interface (even though the resulting expression now contains both the type and the universe level). The arguments do not need to refer to concrete types like `Int` either: `List ((u : Level) × (α : Q(Sort u)) × List Q(Option $α))` does what you think it does! In fact it is a crucial feature that we can write metaprograms transforming terms of nonconcrete types in inconsistent contexts: ```lean def tryProve (n : Q(Nat)) (i : Q(Fin $n)) : Option Q($i > 0) := ... ``` If the `i > 0` in the return type were a concrete type in the metalanguage, then we could not call `tryProve` with `n := 0` (because we would need to provide a value for `i : Fin 0`). Furthermore, if `n` were a concrete value, then we could not call `tryProve` on the subterm `t` of `fun n : Nat => t`. ## Implementation The type family on which this package is built is called `QQ`: ```lean def QQ (α : Expr) := Expr ``` The intended meaning of `e : QQ t` is that `e` is an expression of type `t`. Or if you will, `isDefEq (← inferType e) t`. (This invariant is not enforced though, but it can be checked with `QQ.check`.) The `QQ` type is not meant to be used manually. You should only interact with it using the `Q(·)` and `q(·)` macros. ## Comparison Template Haskell provides a similar mechanism for type-safe quotations, writing `Q Int` for an expression of type `Int`. This is subtly different to the `QQ` type family considered here: in Lean notation, TH's family has the type `Q : Type u → Type`, while ours has the type `QQ : Expr → Type`. In Lean, `Q` is not sufficiently expressive due to universe polymorphism: we might only know at runtime which universe the type is in, but `Q` version fixes the universe at compile time. Another lack of expressivity concerns dependent types: a telescope such as `{α : Q Type} (a : Q α)` is not well-typed with TH's `Q` constructor, because `α` is not a type. ## To do - This has almost certainly been done before somewhere else, by somebody else. - `ql(imax u (v+1))` - Automatically create free variables for recursion. Maybe something like this: ```lean def turnExistsIntoForall : Q(Prop) → MetaM Q(Prop) | ~q(∃ x, $p x) => do q(∀ x, $(x => turnExistsIntoForall q($p $x))) | e => e ``` - Matching should provide control over type-class diamonds, such as ```lean ~q((a + b : α) where Semiring α commutes ∀ n, OfNat α n a + a defEq 0) ``` - Matching on types should be possible, that is, `match (e : Expr) with | ~q($p ∧ $q) => ...`. - Other bug fixes, documentation, and assorted polishing.
.lake/packages/Qq/QqTest/betterApp.lean
import Qq open Qq -- set_option trace.compiler.ir.result true in -- Note: `betterApp` actually has two additional parameters -- `{u v : Lean.Level}` auto-generated due to option -- `autoBoundImplicitLocal`. def betterApp {α : Q(Sort $u)} {β : Q($α → Sort $v)} (f : Q((a : $α) → $β a)) (a : Q($α)) : Q($β $a) := q($f $a) /-- info: (Lean.Expr.const `Int.toNat []).app ((((Lean.Expr.const `OfNat.ofNat [Lean.Level.zero]).app (Lean.Expr.const `Int [])).app (Lean.Expr.lit (Lean.Literal.natVal 42))).app ((Lean.Expr.const `instOfNat []).app (Lean.Expr.lit (Lean.Literal.natVal 42)))) -/ #guard_msgs in #eval betterApp q(Int.toNat) q(42) /-- info: betterApp q(fun a => ⟨a, ⋯⟩) q(42) : Q(Fin (42 + 1)) -/ #guard_msgs in #check betterApp (β := q(fun n : Nat => Fin (n+1))) q(fun a => ⟨a, Nat.lt_succ_self _⟩) q(42)
.lake/packages/Qq/QqTest/hoMatching.lean
import Qq open Qq Lean -- TODO: this linter crashes on the `def` below set_option linter.constructorNameAsVariable false in def turnExistsIntoForall : Q(Prop) → MetaM Q(Prop) | ~q(∃ x y, $p x y) => return q(∀ x y, $p x y) | e => return e /-- info: ∀ (x : String) (y : Nat), x.length ≤ y + 42 -/ #guard_msgs in run_cmd Elab.Command.liftTermElabM do Lean.logInfo <| ← turnExistsIntoForall q(∃ a b, String.length a ≤ b + 42)
.lake/packages/Qq/QqTest/typeclass.lean
import Qq open Qq set_option linter.unusedVariables false in def typeClassArgument (α : Q(Sort u)) (inst : Q(Inhabited $α)) : Q($α) := q(Inhabited.default) example : Q(Nat) := typeClassArgument q(Nat) q(inferInstance) /-- info: ((Expr.const `Inhabited.default [Level.zero.succ]).app (Expr.const `Nat [])).app (((Expr.const `inferInstance [Level.zero.succ]).app ((Expr.const `Inhabited [Level.zero.succ]).app (Expr.const `Nat []))).app (Expr.const `instInhabitedNat [])) -/ #guard_msgs in open Lean in #eval show MetaM Q(Nat) from do let _ ← synthInstanceQ q(Inhabited Nat) return typeClassArgument (u := levelOne) q(Nat) q(inferInstance)
.lake/packages/Qq/QqTest/matching.lean
import Qq open Qq Lean partial def summands {α : Q(Type $u)} (inst : Q(Add $α)) : Q($α) → MetaM (List Q($α)) | ~q($x + $y) => return (← summands inst x) ++ (← summands inst y) | n => return [n] opaque k : Nat opaque m : Nat abbrev double (a : Nat) := a + a /-- info: [Expr.const `k [], Expr.const `k [], Expr.const `m []] -/ #guard_msgs in #eval summands q(inferInstance) q(double k + m) /-- info: false --- trace: x : Q(Nat) := q(k + m) a b : Q(Nat) match_eq✝ : (k + m) =Q «$a».add «$b» ⊢ Bool -/ #guard_msgs in #eval show MetaM Bool from do let x : Q(Nat) := q(k + m) match x with | ~q(Nat.add $a $b) => return by trace_state; exact true | _ => return false abbrev square (a : Nat) := have : Add Nat := ⟨(· * ·)⟩ a + a /-- info: 100 -/ #guard_msgs in #eval square 10 /-- info: [Expr.const `k [], (Expr.const `square []).app ((Expr.const `square []).app (Expr.const `k []))] -/ #guard_msgs in #eval summands q(inferInstance) q(k + square (square k)) /-- info: [((((((Expr.const `HMul.hMul [Level.zero, Level.zero, Level.zero]).app (Expr.const `Nat [])).app (Expr.const `Nat [])).app (Expr.const `Nat [])).app (((Expr.const `instHMul [Level.zero]).app (Expr.const `Nat [])).app (Expr.const `instMulNat []))).app (Expr.const `k [])).app ((Expr.const `square []).app ((Expr.const `square []).app (Expr.const `k [])))] -/ #guard_msgs in #eval summands q(⟨(· * ·)⟩) q(k * square (square k)) def matchProd (e : Nat × Q(Nat)) : MetaM Bool := do let (2, ~q(1)) := e | return false return true #eval do guard <| (←matchProd (2, q(1))) == true #eval do guard <| (←matchProd (1, q(1))) == false #eval do guard <| (←matchProd (2, q(2))) == false def matchNatSigma (e : (n : Q(Type)) × Q($n)) : MetaM (Option Q(Nat)) := do let ⟨~q(Nat), ~q($n)⟩ := e | return none return some n #eval do guard <| (← matchNatSigma ⟨q(Nat), q(1)⟩) == some q(1) #eval do guard <| (← matchNatSigma ⟨q(Nat), q(2)⟩) == some q(2) #eval do guard <| (← matchNatSigma ⟨q(Int), q(2)⟩) == none /-- Given `x + y` of Nat, returns `(x, y)`. Otherwise fail. -/ def getNatAdd (e : Expr) : MetaM (Option (Q(Nat) × Q(Nat))) := do let ⟨Level.succ Level.zero, ~q(Nat), ~q($a + $b)⟩ ← inferTypeQ e | return none return some (a, b) #eval do guard <| (← getNatAdd q(1 + 2)) == some (q(1), q(2)) #eval do guard <| (← getNatAdd q((1 + 2 : Int))) == none def pairLit (u : Lean.Level) (α : Q(Type u)) (a : Q($α)) : MetaM Q($α × $α) := do match u, α, a with | 0, ~q(Nat), n => return q(($n, $n)) | 0, ~q(Int), z => return q(($z, $z)) | _, _, _ => failure #eval show MetaM Unit from do guard <| (←pairLit _ _ q(2)) == q((2, 2)) -- `generalizing := true` is a no-op def pairLit' (u : Lean.Level) (α : Q(Type u)) (a : Q($α)) : MetaM Q($α × $α) := do match (generalizing := true) u, α, a with | 0, ~q(Nat), n => return q(($n, $n)) | 0, ~q(Int), z => return q(($z, $z)) | _, _, _ => failure #eval show MetaM Unit from do guard <| (←pairLit' _ _ q(2)) == q((2, 2))
.lake/packages/Qq/QqTest/defeqs.lean
import Qq open Qq example {α : Q(Sort u)} (a : Q($α)) : Unit := have : u =QL 1 := ⟨⟩ have : $α =Q Nat := ⟨⟩ have : $a =Q 42 := ⟨⟩ by exact -- TODO have : Q($a > 10) := q(by decide) ()
.lake/packages/Qq/QqTest/matchMotive.lean
import Qq open Qq Lean open Elab Term in elab tk:"showTerm" t:term : term <= expectedType => do let t' ← withSynthesize do elabTerm t expectedType logAt tk t' (severity := MessageSeverity.information) return t' /-- info: failed to pretty print expression (use 'set_option pp.rawOnError true' for raw representation) -/ #guard_msgs in def provePositive (n : Q(Nat)) : MetaM Q(0 < $n) := showTerm match n with | ~q($m + 1) => pure q(Nat.lt_of_le_of_lt (Nat.zero_le _) (Nat.lt_succ_self _)) | ~q(1 + $m) => pure q(by show 0 < 1 + $m rw [Nat.add_comm] exact Nat.lt_of_le_of_lt (Nat.zero_le _) (Nat.lt_succ_self _)) | _ => throwError "cannot prove positive: {n}" /-- info: (((((Expr.const `Nat.lt_of_le_of_lt []).app ((((Expr.const `OfNat.ofNat [Level.zero]).app (Expr.const `Nat [])).app (Expr.lit (Literal.natVal 0))).app ((Expr.const `instOfNatNat []).app (Expr.lit (Literal.natVal 0))))).app ((((Expr.const `OfNat.ofNat [Level.zero]).app (Expr.const `Nat [])).app (Expr.lit (Literal.natVal 41))).app ((Expr.const `instOfNatNat []).app (Expr.lit (Literal.natVal 41))))).app ((((Expr.const `OfNat.ofNat [Level.zero]).app (Expr.const `Nat [])).app (Expr.lit (Literal.natVal 42))).app ((Expr.const `instOfNatNat []).app (Expr.lit (Literal.natVal 42))))).app ((Expr.const `Nat.zero_le []).app ((((Expr.const `OfNat.ofNat [Level.zero]).app (Expr.const `Nat [])).app (Expr.lit (Literal.natVal 41))).app ((Expr.const `instOfNatNat []).app (Expr.lit (Literal.natVal 41)))))).app ((Expr.const `Nat.lt_succ_self []).app ((((Expr.const `OfNat.ofNat [Level.zero]).app (Expr.const `Nat [])).app (Expr.lit (Literal.natVal 41))).app ((Expr.const `instOfNatNat []).app (Expr.lit (Literal.natVal 41))))) -/ #guard_msgs in #eval provePositive q(42) >>= fun proof => pure proof <* proof.check
.lake/packages/Qq/QqTest/commandTest.lean
import Qq.Commands import Qq.Delab open Qq Lean Elab Tactic macro "trace_state" : doElem => `(doElem| (by trace_state; exact pure ())) macro "trace_return" t:term : doElem => `(doElem| (by trace_state; exact pure $t)) -- using the goal type (interpret 2, 3 as Int) /-- trace: x : Q(Prop) inst✝ : Q(Decidable «$x») ⊢ TermElabM Q(Int) -/ #guard_msgs in def f₁ (x : Prop) [Decidable x] : Int := by_elabq trace_return q(if $x then 2 else 3) def add_self_37 {α : Type u} [Add α] (a : α) : α := by_elabq return (List.range 36).foldr (init := q($a)) fun _ acc => q($acc + $a) /-- info: f₁ (x : Prop) [Decidable x] : Int -/ #guard_msgs in #check f₁ -- without goal type /-- trace: _x : Q(Int) ⊢ TermElabM Expr -/ #guard_msgs in def f₂ (_x : Int) := by_elabq trace_return q(5) /-- info: f₂ (_x : Int) : Nat -/ #guard_msgs in #check f₂ -- tactic without capturing the goal /-- trace: a b : Q(Nat) _h : Q(«$a» = «$b») p : Q(Prop) := q(«$a» = «$b») ⊢ TacticM PUnit -/ #guard_msgs in example (a b : Nat) (_h : a = b) : True := by run_tacq let p : Q(Prop) := q($a = $b) trace_state let t ← Meta.inferType _h trivial def assignQ {α : Q(Sort u)} (mvar : Q($α)) (val : Q($α)) : Meta.MetaM Unit := mvar.mvarId!.assign val -- tactic with capturing the goal /-- info: true --- info: a = b -/ #guard_msgs in example (a b : Nat) (h : False) : a = b := by run_tacq goal => logInfo m!"{goal.isMVar}" logInfo goal.ty assignQ q($goal) q(False.elim $h) -- universes & let expressions universe u v in /-- trace: u v : Level α : Q(Type u) β : Q(Type v) f₀ : Q(«$α» → «$β») f₁ : Q(«$β» → «$α») b : Q(«$β») h : Q(«$f₀» («$f₁» «$b») = «$b») f₂ : Q(«$β» → «$β») f₂_eq✝ : «$f₂» =Q «$f₀» ∘ «$f₁» goal : Q(«$b» = «$f₂» «$b») ⊢ TacticM PUnit -/ #guard_msgs in example {α : Type u} {β : Type v} (f₀ : α → β) (f₁ : β → α) (b : β) (h : f₀ (f₁ b) = b) : let f₂ := f₀ ∘ f₁ b = f₂ b := by intro f₂ run_tacq goal => trace_state assignQ q($goal) q(Eq.symm $h) universe u v in /-- trace: u v : Level α : Q(Type u) β : Q(Type v) f₀ : Q(«$α» → «$β») f₁ : Q(«$β» → «$α») b : Q(«$β») h : Q(«$f₀» («$f₁» «$b») = «$b») f₂ : Q(«$β» → «$β») f₂_eq✝ : «$f₂» =Q «$f₀» ∘ «$f₁» ⊢ TermElabM Q(«$b» = «$f₀» («$f₁» «$b»)) -/ #guard_msgs in example {α : Type u} {β : Type v} (f₀ : α → β) (f₁ : β → α) (b : β) (h : f₀ (f₁ b) = b) : b = f₀ (f₁ b) := let f₂ := f₀ ∘ f₁ by_elabq trace_return q(Eq.symm $h)
.lake/packages/Qq/QqTest/clauseConvertType.lean
import Qq open Qq Lean /-! Type-safe conversion from values of type `Sum α β` to values of type `Sum α (Sum β Empty)`. -/ def orLevel : (ps : List ((u : Level) × Q(Type u))) → Level | [] => .zero | ⟨u, _⟩ :: ps => .max u (orLevel ps) def or1 : (ps : List ((u : Level) × Q(Type u))) → Q(Type $(orLevel ps)) | [] => q(Empty) | [⟨_, p⟩] => q($p) | ⟨u, p⟩::ps => q(Sum $p $(or1 ps)) def or2 : (ps : List ((u : Level) × Q(Type u))) → Q(Type $(orLevel ps)) | [] => q(Empty) | ⟨u, p⟩ :: ps => q(Sum $p $(or2 ps)) def orChange : (ps : List ((u : Level) × Q(Type u))) → Q($(or1 ps) → $(or2 ps)) | [] => q(id) | [⟨_, _⟩] => q(Sum.inl) | ⟨_, _⟩::(ps1::ps2) => let h := orChange (ps1::ps2) q(fun h => match h with | Sum.inl l => Sum.inl l | Sum.inr r => Sum.inr ($h r))
.lake/packages/Qq/QqTest/misc.lean
import Qq open Lean Qq def bar {α : Q(Type u)} (a : Q($α)) : Q(Prop) := q($a = $a) def bar2 {α : Q(Sort u)} (a : Q($α)) : Q($a = $a) := q(by simp) def baz (u : Level) : Type := Q(Sort u) #guard_msgs(drop info, warning, error) in #eval bar2 q([1,2, 4]) /-- info: q(∀ (x : Nat), x = x + 0) : Q(Prop) -/ #guard_msgs in #check q(∀ x, x = x + 0) example {α : Q(Type u)} (inst : Q(Inhabited $α)) : Q(∃ x : $α, x = x) := q(⟨default, by rfl⟩) -- TODO: investigate PANIC -- example : Q(let x := 5; x = x) := q(by simp) #guard_msgs(drop info, warning, error) in -- the following also panics if moved into an `example` #eval show Q(∀ n : UInt64, n.toFin = n.toFin) from q(fun _ => by simp) def foo' (n : Nat) : Q(Q($($n) = $($n))) := q(q(by simp)) #guard_msgs(drop info, warning, error) in #eval foo' 3
.lake/packages/Qq/QqTest/introQ.lean
import Qq open Qq Lean def introQ {α : Q(Sort u)} {β : Q($α → Sort v)} (mvar : Q(∀ a, $β a)) (n : Name) : MetaM ((a : Q($α)) × Q($β $a)) := do let (f, v) ← mvar.mvarId!.intro n pure ⟨.fvar f, .mvar v⟩ def assignQ {α : Q(Sort u)} (mvar : Q($α)) (val : Q($α)) : MetaM Unit := mvar.mvarId!.assign val elab "demo" : term => do let P ← mkFreshExprMVarQ q(∀ {n : Nat}, n = 1) let ⟨_, m⟩ ← introQ q(@$P) `n m.mvarId!.withContext do assignQ q($m) q(sorry) instantiateMVars P /-- info: fun {n} => sorry : ∀ {n : Nat}, n = 1 -/ #guard_msgs in #check demo
.lake/packages/Qq/QqTest/conjs.lean
import Qq open Qq Lean Elab Meta Tactic elab "print_conjs" : tactic => do for ldecl in ← getLCtx do if let some ty ← checkTypeQ (u := levelOne) ldecl.type q(Prop) then if let ~q($p ∧ $q) := ty then logInfo m!"left = {p}, right = {q}" /-- info: left = true = true, right = False -/ #guard_msgs in example (_ : true ∧ False) : true := by print_conjs trivial
.lake/packages/Qq/QqTest/strengthenInstance.lean
import Qq open Qq Lean Meta class Semigroup (M) extends Mul M where mul_assoc {a b c : M} : (a*b)*c = a*(b*c) export Semigroup (mul_assoc) def maybeReassoc {M : Q(Type $u)} (mul : Q(Mul $M)) (a b : Q($M)) : MetaM (Option Q($a*($b*$b) = ($a*$b)*$b)) := do let .some inst ← trySynthInstanceQ q(Semigroup $M) | return none assertInstancesCommute return some q(by rw [mul_assoc]) def maybeReassocAlt {M : Q(Type $u)} (mul : Q(Mul $M)) (a b : Q($M)) : MetaM (Option Q($a*($b*$b) = ($a*$b)*$b)) := do let .some inst ← trySynthInstanceQ q(Semigroup $M) | return none assumeInstancesCommute return some q(by rw [mul_assoc]) def maybeReassocPure {M : Q(Type $u)} (mul : Q(Mul $M)) (a b : Q($M)) (semigroup : Q(Semigroup $M)) : Q($a*($b*$b) = ($a*$b)*$b) := assumeInstancesCommute' q(by rw [mul_assoc])
.lake/packages/Qq/QqTest/clauseConvertProp.lean
import Qq open Qq /-! Type-safe conversion from proofs of `a ∨ b` to proofs of `a ∨ b ∨ false`. -/ def or1 : List Q(Prop) → Q(Prop) | [] => q(False) | [p] => q($p) | p::ps => q($p ∨ $(or1 ps)) def or2 : List Q(Prop) → Q(Prop) | [] => q(False) | p::ps => q($p ∨ $(or2 ps)) def orChange : (ps : List Q(Prop)) → Q($(or1 ps) → $(or2 ps)) | [] => q(id) | [p] => q(Or.inl) | p::(ps1::ps2) => q(by intro h cases h with | inl h => exact Or.inl h | inr h => exact Or.inr ($(orChange (ps1::ps2)) h))
.lake/packages/Qq/QqTest/mkPairwiseEquality.lean
import Qq open Qq def mkPairwiseEquality {α : Q(Sort u)} : List Q($α) → Q(Prop) | [a, b] => q($a = $b) | a :: b :: cs => q($a = $b ∧ $(mkPairwiseEquality (b :: cs))) | _ => q(True)
.lake/packages/Qq/Qq/SortLocalDecls.lean
module public import Lean.Meta.Basic public section open Lean Meta namespace Qq namespace SortLocalDecls structure Context where localDecls : NameMap LocalDecl := {} structure State where visited : NameSet := {} result : Array LocalDecl := #[] abbrev M := ReaderT Context $ StateRefT State MetaM mutual partial def visitExpr (e : Expr) : M Unit := do match e with | .proj _ _ e => visitExpr e | .forallE _ d b _ => visitExpr d; visitExpr b | .lam _ d b _ => visitExpr d; visitExpr b | .letE _ t v b _ => visitExpr t; visitExpr v; visitExpr b | .app f a => visitExpr f; visitExpr a | .mdata _ b => visitExpr b | .mvar _ => let v ← instantiateMVars e; unless v.isMVar do visitExpr v | .fvar fvarId => if let some localDecl := (← read).localDecls.find? fvarId.name then visitLocalDecl localDecl | _ => return () partial def visitLocalDecl (localDecl : LocalDecl) : M Unit := do unless (← get).visited.contains localDecl.fvarId.name do modify fun s => { s with visited := s.visited.insert localDecl.fvarId.name } visitExpr localDecl.type if let some val := localDecl.value? then visitExpr val modify fun s => { s with result := s.result.push localDecl } end end SortLocalDecls open SortLocalDecls in def sortLocalDecls (localDecls : Array LocalDecl) : MetaM (Array LocalDecl) := let aux : M (Array LocalDecl) := do localDecls.forM visitLocalDecl; return (← get).result aux.run { localDecls := localDecls.foldl (init := {}) fun s d => s.insert d.fvarId.name d } |>.run' {}
.lake/packages/Qq/Qq/Simp.lean
module public import Qq.MetaM public section /-! # Qq integration for `simproc`s This file adds wrappers for operations relating to `simproc`s in the `Lean.Meta.Simp` namespace. It can be used as ```lean simproc_decl some_proc (some_pattern) := Meta.Simp.Simproc.ofQ fun u α e => do sorry ``` instead of the usual ```lean simproc_decl some_proc (some_pattern) := fun e => do sorry ``` -/ open Lean Qq variable {u : Level} {α : Q(Sort u)} namespace Lean.Meta.Simp /-- A copy of `Meta.Simp.Result` with explicit types. -/ @[expose] def ResultQ (_e : Q($α)) : Type := Lean.Meta.Simp.Result /-- A copy of `Meta.Simp.Result.mk` with explicit types. -/ @[inline] def ResultQ.mk {e : Q($α)} (expr : Q($α)) (proof? : Option Q($e = $expr)) (cache : Bool := true) : ResultQ e := {expr, proof?, cache} /-- A copy of `Meta.Simp.Step` with explicit types. -/ @[expose] def StepQ (_e : Q($α)) : Type := Step @[inherit_doc Step.done, inline] def StepQ.done {e : Q($α)} (r : ResultQ e) : StepQ e := Step.done r @[inherit_doc Step.visit, inline] def StepQ.visit {e : Q($α)} (r : ResultQ e) : StepQ e := Step.visit r @[inherit_doc Step.continue, inline] def StepQ.continue {e : Q($α)} (r : Option (ResultQ e) := none) : StepQ e := Step.continue r /-- A copy of `Lean.Meta.Simproc` with explicit types. See `Simproc.ofQ` to construct terms of this type. -/ abbrev SimprocQ : Type := ∀ (u : Level) (α : Q(Sort u)) (e : Q($α)), Meta.SimpM (StepQ e) /-- Build a simproc with Qq-enabled typechecking of inputs and outputs. This calls `inferTypeQ` on the expression and passes the arguments to `proc`. -/ @[inline] def Simproc.ofQ (proc : SimprocQ) : Simproc := fun e => do let ⟨u, α, e⟩ ← inferTypeQ e proc u α e end Lean.Meta.Simp
.lake/packages/Qq/Qq/AssertInstancesCommute.lean
module public import Qq.MetaM public section namespace Qq scoped syntax "assumeInstancesCommute'" term : term namespace Impl open Lean Meta Elab Term meta def isRedundantLocalInst? (inst : FVarId) : MetaM (Option Expr) := do let ldecl ← inst.getDecl if ldecl.hasValue then return none let rest := (← getLocalInstances).filter (·.fvar != .fvar inst) withLCtx (← getLCtx) rest do let some inst ← synthInstance? ldecl.type | return none return if (← makeDefEq ldecl.toExpr inst).isSome then inst else none meta def findRedundantLocalInst? : QuoteM (Option (FVarId × Expr)) := do for {fvar, ..} in ← withUnquotedLCtx getLocalInstances do if let some (.quoted (.fvar quotedFVar)) := (← read).exprBackSubst[fvar]? then if (← quotedFVar.getDecl).hasValue then continue if let some result ← withUnquotedLCtx do isRedundantLocalInst? fvar.fvarId! then return (fvar.fvarId!, result) return none meta def findRedundantLocalInstQuoted? : TermElabM (Option (FVarId × (u : Q(Level)) × (ty : Q(Quoted (.sort $u))) × Q(Quoted $ty) × Q(Quoted $ty))) := do for ldecl in ← getLCtx do let ty ← whnfR ldecl.type if ty.isMVar then tryPostpone if ty.isAppOf ``Quoted then if (← instantiateMVars ty.appArg!).hasExprMVar then tryPostpone StateT.run' (m := MetaM) (s := { mayPostpone := (← read).mayPostpone }) do unquoteLCtx (← findRedundantLocalInst?).mapM fun (fvar, inst) => do let ty ← withUnquotedLCtx do inferType (.fvar fvar) let u ← withUnquotedLCtx do getLevel ty return ⟨fvar, ← quoteLevel u, ← quoteExpr ty, ← quoteExpr (.fvar fvar), ← quoteExpr inst⟩ scoped syntax "assertInstancesCommuteImpl" term : term elab_rules : term <= expectedType | `(assertInstancesCommuteImpl $cont) => do match ← findRedundantLocalInstQuoted? with | some ⟨fvar, _, _, lhs, rhs⟩ => let n ← mkFreshUserName ((← fvar.getUserName).eraseMacroScopes.appendAfter "_eq") let cmd := q(withNewMCtxDepth do withDefault do assertDefEqQ $lhs $rhs) elabTerm (← `($(← exprToSyntax cmd) >>= fun __defeqres => have $(mkIdent n) := __defeqres.1 assertInstancesCommuteImpl $cont)) expectedType | none => elabTerm cont expectedType elab_rules : term <= expectedType | `(assumeInstancesCommute' $cont) => do match ← findRedundantLocalInstQuoted? with | some ⟨fvar, _, _, lhs, rhs⟩ => let n ← mkFreshUserName ((← fvar.getUserName).eraseMacroScopes.appendAfter "_eq") let ty := q(QuotedDefEq $lhs $rhs) elabTerm (← `( have $(mkIdent n) : $(← exprToSyntax ty) := ⟨⟩ assumeInstancesCommute' $cont)) expectedType | none => elabTerm cont expectedType scoped syntax "assertInstancesCommuteDummy" : term macro_rules | `(assert! assertInstancesCommuteDummy; $cont) => `(assertInstancesCommuteImpl $cont) syntax "assumeInstancesCommuteDummy" : term macro_rules | `(assert! assumeInstancesCommuteDummy; $cont) => `(assumeInstancesCommute' $cont) end Impl open Impl scoped macro "assertInstancesCommute" : doElem => `(doElem| assert! assertInstancesCommuteDummy) scoped macro "assumeInstancesCommute" : doElem => `(doElem| assert! assumeInstancesCommuteDummy)
.lake/packages/Qq/Qq/MetaM.lean
module public import Qq.Macro public import Qq.Delab import Qq.Typ public section /-! # `Qq`-ified spellings of functions in `Lean.Meta` This file provides variants of the function in the `Lean.Meta` namespace, which operate with `Q(_)` instead of `Expr`. -/ open Lean Elab Term Meta namespace Qq def mkFreshExprMVarQ (ty : Q(Sort u)) (kind := MetavarKind.natural) (userName := Name.anonymous) : MetaM Q($ty) := do mkFreshExprMVar (some ty) kind userName def withLocalDeclDQ [Monad n] [MonadControlT MetaM n] (name : Name) (β : Q(Sort u)) (k : Q($β) → n α) : n α := withLocalDeclD name β k def withLocalDeclQ [Monad n] [MonadControlT MetaM n] (name : Name) (bi : BinderInfo) (β : Q(Sort u)) (k : Q($β) → n α) : n α := withLocalDecl name bi β k def trySynthInstanceQ (α : Q(Sort u)) : MetaM (LOption Q($α)) := do trySynthInstance α def synthInstanceQ (α : Q(Sort u)) : MetaM Q($α) := do synthInstance α def instantiateMVarsQ {α : Q(Sort u)} (e : Q($α)) : MetaM Q($α) := do instantiateMVars e def elabTermEnsuringTypeQ (stx : Syntax) (expectedType : Q(Sort u)) (catchExPostpone := true) (implicitLambda := true) (errorMsgHeader? : Option String := none) : TermElabM Q($expectedType) := do elabTermEnsuringType stx (some expectedType) catchExPostpone implicitLambda errorMsgHeader? /-- A `Qq`-ified version of `Lean.Meta.inferType` Instead of writing `let α ← inferType e`, this allows writing `let ⟨u, α, e⟩ ← inferTypeQ e`, which results in a context of ``` e✝ : Expr u : Level α : Q(Type u) e : Q($α) ``` Here, the new `e` is defeq to the old one, but now has `Qq`-ascribed type information. This is frequently useful when using the `~q` matching from `QQ/Match.lean`, as it allows an `Expr` to be turned into something that can be matched upon. -/ def inferTypeQ (e : Expr) : MetaM ((u : Level) × (α : Q(Sort $u)) × Q($α)) := do let α ← inferType e let .sort u ← whnf (← inferType α) | throwError "not a type{indentExpr α}" pure ⟨u, α, e⟩ /-- If `e` is a `ty`, then view it as a term of `Q($ty)`. -/ def checkTypeQ (e : Expr) (ty : Q(Sort $u)) : MetaM (Option Q($ty)) := do if ← isDefEq (← inferType e) ty then return some e else return none /-- The result of `Qq.isDefEqQ`; `MaybeDefEq a b` is an optional version of `$a =Q $b`. -/ inductive MaybeDefEq {α : Q(Sort $u)} (a b : Q($α)) where | defEq : QuotedDefEq a b → MaybeDefEq a b | notDefEq : MaybeDefEq a b instance : Repr (MaybeDefEq a b) where reprPrec := fun | .defEq _, prec => Repr.addAppParen "defEq _" prec | .notDefEq, _ => "notDefEq" /-- A version of `Lean.Meta.isDefEq` which returns a strongly-typed witness rather than a bool. -/ def isDefEqQ {α : Q(Sort $u)} (a b : Q($α)) : MetaM (MaybeDefEq a b) := do if ← isDefEq a b then return .defEq ⟨⟩ else return .notDefEq /-- Like `Qq.isDefEqQ`, but throws an error if not defeq. -/ def assertDefEqQ {α : Q(Sort $u)} (a b : Q($α)) : MetaM (PLift (QuotedDefEq a b)) := do match ← isDefEqQ a b with | .defEq witness => return ⟨witness⟩ | .notDefEq => throwError "{a} is not definitionally equal to{indentExpr b}" /-- The result of `Qq.isLevelDefEqQ`; `MaybeLevelDefEq u v` is an optional version of `$u =QL $v`. -/ inductive MaybeLevelDefEq (u v : Level) where | defEq : QuotedLevelDefEq u v → MaybeLevelDefEq u v | notDefEq : MaybeLevelDefEq u v instance : Repr (MaybeLevelDefEq u v) where reprPrec := fun | .defEq _, prec => Repr.addAppParen "defEq _" prec | .notDefEq, _ => "notDefEq" /-- A version of `Lean.Meta.isLevelDefEq` which returns a strongly-typed witness rather than a bool. -/ def isLevelDefEqQ (u v : Level) : MetaM (MaybeLevelDefEq u v) := do if ← isLevelDefEq u v then return .defEq ⟨⟩ else return .notDefEq /-- Like `Qq.isLevelDefEqQ`, but throws an error if not defeq. -/ def assertLevelDefEqQ (u v : Level) : MetaM (PLift (QuotedLevelDefEq u v)) := do match ← isLevelDefEqQ u v with | .defEq witness => return ⟨witness⟩ | .notDefEq => throwError "{u} and {v} are not definitionally equal"
.lake/packages/Qq/Qq/Typ.lean
module public import Lean public section open Lean set_option linter.unusedVariables false namespace Qq /-- `Quoted α` is the type of Lean expressions having type `α`. You should usually write this using the notation `Q($α)`. -/ @[expose] def Quoted (α : Expr) := Expr /-- Creates a quoted expression. Requires that `e` has type `α`. You should usually write this using the notation `q($e)`. -/ @[expose] protected def Quoted.unsafeMk (e : Expr) : Quoted α := e instance : BEq (Quoted α) := inferInstanceAs (BEq Expr) instance : Hashable (Quoted α) := inferInstanceAs (Hashable Expr) instance : Inhabited (Quoted α) := inferInstanceAs (Inhabited Expr) instance : ToString (Quoted α) := inferInstanceAs (ToString Expr) instance : Repr (Quoted α) := inferInstanceAs (Repr Expr) instance : CoeOut (Quoted α) Expr where coe e := e instance : CoeOut (Quoted α) MessageData where coe := .ofExpr instance : ToMessageData (Quoted α) where toMessageData := .ofExpr /-- Gets the type of a quoted expression. -/ protected abbrev Quoted.ty (t : Quoted α) : Expr := α /-- `QuotedDefEq lhs rhs` says that the expressions `lhs` and `rhs` are definitionally equal. You should usually write this using the notation `$lhs =Q $rhs`. -/ structure QuotedDefEq {α : Quoted (.sort u)} (lhs rhs : Quoted α) : Prop where /-- For a safer constructor, see `Qq.assertDefEqQ`. -/ unsafeIntro :: /-- `QuotedLevelDefEq u v` says that the levels `u` and `v` are definitionally equal. You should usually write this using the notation `$u =QL $v`. -/ structure QuotedLevelDefEq (u v : Level) : Prop where /-- For a safer constructor, see `Qq.assertLevelDefEqQ`. -/ unsafeIntro :: open Meta in /-- Check that a term `e : Q(α)` really has type `α`. -/ protected def Quoted.check (e : Quoted α) : MetaM Unit := do let α' ← inferType e unless ← isDefEq α α' do throwError "type mismatch{indentExpr e}\n{← mkHasTypeButIsExpectedMsg α' α}" open Meta in /-- Check that the claim `$lhs =Q $rhs` is actually true; that the two terms are defeq. -/ protected def QuotedDefEq.check (e : @QuotedDefEq u α lhs rhs) : MetaM Unit := do α.check lhs.check rhs.check unless ← isDefEq lhs rhs do throwError "{lhs} and {rhs} are not defeq" open Meta in /-- Check that the claim `$u =QL $v` is actually true; that the two levels are defeq. -/ protected def QuotedLevelDefEq.check (e : QuotedLevelDefEq lhs rhs) : MetaM Unit := do unless ← isLevelDefEq lhs rhs do throwError "{lhs} and {rhs} are not defeq"
.lake/packages/Qq/Qq/Macro.lean
module public import Lean public meta import Qq.ForLean.ReduceEval public meta import Qq.ForLean.ToExpr public meta import Qq.Typ meta import Lean.Elab.Term.TermElabM meta import Lean.Util.CollectLevelParams meta import Lean.Elab.SyntheticMVars public section /-! # The `q( )` and `Q( )` macros This file provides the main feature of `Qq`; the `q( )` and `Q( )` macros, which are available with `open scoped Qq`. -/ open Lean Meta namespace Qq namespace Impl meta inductive ExprBackSubstResult | quoted (e : Expr) | unquoted (e : Expr) inductive MVarSynth | term (quotedType : Expr) (unquotedMVar : MVarId) --> Quoted.unsafeMk _ _ | type (unquotedMVar : MVarId) --> Quoted _ | level (unquotedMVar : LMVarId) --> Level meta structure UnquoteState where /-- Quoted mvars in the outside lctx (of type `Level`, `Quoted _`, or `Type`). The outside mvars can also be of the form `?m x y z`. -/ mvars : List (Expr × MVarSynth) := [] /-- Maps quoted expressions (of type Level) in the old context to level parameter names in the new context -/ levelSubst : Std.HashMap Expr Level := {} /-- Maps quoted expressions (of type Expr) in the old context to expressions in the new context -/ exprSubst : Std.HashMap Expr Expr := {} /-- New unquoted local context -/ unquoted := LocalContext.empty /-- Maps free variables in the new context to expressions in the old context (of type Expr) -/ exprBackSubst : Std.HashMap Expr ExprBackSubstResult := {} /-- Maps free variables in the new context to levels in the old context (of type Level) -/ levelBackSubst : Std.HashMap Level Expr := {} /-- New free variables in the new context that were newly introduced for irreducible expressions. -/ abstractedFVars : Array FVarId := #[] levelNames : List Name := [] mayPostpone : Bool abbrev UnquoteM := StateT UnquoteState MetaM abbrev QuoteM := ReaderT UnquoteState MetaM meta instance : MonadLift QuoteM UnquoteM where monadLift k := do k (← get) meta def determineLocalInstances (lctx : LocalContext) : MetaM LocalInstances := do let mut localInsts : LocalInstances := {} for ldecl in lctx do match (← isClass? ldecl.type) with | some c => localInsts := localInsts.push { className := c, fvar := ldecl.toExpr } | none => pure () pure localInsts meta def withUnquotedLCtx [MonadControlT MetaM m] [Monad m] [MonadLiftT QuoteM m] (k : m α) : m α := do let unquoted := (← (read : QuoteM _)).unquoted withLCtx unquoted (← (determineLocalInstances unquoted : QuoteM _)) k open Name in meta def addDollar : Name → Name | anonymous => str anonymous "$" | str anonymous s => str anonymous ("$" ++ s) | str n s => str (addDollar n) s | num n i => num (addDollar n) i open Name in meta def removeDollar : Name → Option Name | anonymous => none | str anonymous "$" => some anonymous | str anonymous s => if s.startsWith "$" then str anonymous (s.drop 1) else none | str n s => (removeDollar n).map (str . s) | num n i => (removeDollar n).map (num . i) open Name in meta def stripDollars : Name → Name | anonymous => anonymous | str n "$" => stripDollars n | str anonymous s => let s := s.dropWhile (· = '$') if s = "" then anonymous else str anonymous s | str n s => str (stripDollars n) s | num n i => num (stripDollars n) i meta def addSyntaxDollar : Syntax → Syntax | .ident info rawVal val preresolved => .ident info rawVal (addDollar val) preresolved | stx => panic! s!"addSyntaxDollar {stx}" meta def mkAbstractedLevelName (e : Expr) : MetaM Name := return e.getAppFn.constName?.getD `udummy ++ (← mkFreshId) meta def isAssignablePattern (e : Expr) : MetaM Bool := do let e ← instantiateMVars (← whnf e) let .mvar mvarId := e.getAppFn | return false unless ← mvarId.isAssignable do return false if (← mvarId.getKind) matches .synthetic then return false return e.getAppArgs.all (·.isFVar) && e.getAppArgs.allDiff meta def isBad (e : Expr) : Bool := Id.run do if let .const (.str _ "rec") _ := e.getAppFn then return true return false -- https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/How.20to.20WHNF.20without.20exposing.20recursors.3F/near/249743042 meta def whnf (e : Expr) (e0 : Expr := e) : MetaM Expr := do let e ← whnfCore e let e0 := if isBad e then e0 else e match ← unfoldDefinition? e with | some e => whnf e (if isBad e then e0 else e) | none => pure e0 meta def whnfR (e : Expr) : MetaM Expr := withReducible (whnf e) mutual meta partial def unquoteLevel (e : Expr) : UnquoteM Level := do let e ← whnf e if let some l := (← get).levelSubst[e]? then return l if e.isAppOfArity ``Level.zero 0 then pure .zero else if e.isAppOfArity ``Level.succ 1 then return .succ (← unquoteLevel (e.getArg! 0)) else if e.isAppOfArity ``Level.max 2 then return .max (← unquoteLevel (e.getArg! 0)) (← unquoteLevel (e.getArg! 1)) else if e.isAppOfArity ``Level.imax 2 then return .imax (← unquoteLevel (e.getArg! 0)) (← unquoteLevel (e.getArg! 1)) else if e.isAppOfArity ``Level.param 1 then return .param (← reduceEval (e.getArg! 0)) else if e.isAppOfArity ``Level.mvar 1 then return .mvar (← reduceEval (e.getArg! 0)) else if ← isAssignablePattern e then return ← unquoteLevelMVar e if (← get).mayPostpone && e.getAppFn.isMVar then Elab.throwPostpone let name ← mkAbstractedLevelName e let l := .param name modify fun s => { s with levelSubst := s.levelSubst.insert e l levelBackSubst := s.levelBackSubst.insert l e } pure l meta partial def unquoteLevelMVar (mvar : Expr) : UnquoteM Level := do let newMVar ← mkFreshLevelMVar modify fun s => { s with levelSubst := s.levelSubst.insert mvar newMVar levelBackSubst := s.levelBackSubst.insert newMVar mvar mvars := (mvar, .level newMVar.mvarId!) :: s.mvars } return newMVar end meta def unquoteLevelList (e : Expr) : UnquoteM (List Level) := do let e ← whnf e if e.isAppOfArity ``List.nil 1 then pure [] else if e.isAppOfArity ``List.cons 3 then return (← unquoteLevel (e.getArg! 1)) :: (← unquoteLevelList (e.getArg! 2)) else throwFailedToEval e meta def mkAbstractedName (e : Expr) : UnquoteM Name := do have base : Name := match e.getAppFn.constName? with | some (.str _ s) => .mkSimple s!"${s}" | _ => `unknown let mut i := 0 repeat i := i + 1 let n := base.appendIndexAfter i unless (← get).unquoted.usesUserName n do return n unreachable! @[inline] meta opaque betaRev' (e : Expr) (revArgs : List Expr) : Expr := e.betaRev revArgs.toArray meta def makeZetaReduce (a : FVarId) (b : Expr) : MetaM (Option LocalContext) := do let decl ← a.getDecl if decl.isLet then return none let bFVars := (← b.collectFVars.run {}).2 let toRevert := (← collectForwardDeps #[.fvar a] (preserveOrder := true)).map (·.fvarId!) for y in toRevert do if bFVars.fvarSet.contains y then return none let oldLCtx ← getLCtx let newLCtx := toRevert.foldl (init := oldLCtx) (·.erase ·) let newLCtx := newLCtx.mkLetDecl decl.fvarId decl.userName decl.type b (kind := decl.kind) let newLCtx := toRevert.filter (· != a) |>.foldl (init := newLCtx) (·.addDecl <| oldLCtx.get! ·) return newLCtx meta def makeDefEq (a b : Expr) : MetaM (Option LocalContext) := do if let .fvar a ← whnf a then if let some lctx ← makeZetaReduce a b then return lctx if let .fvar b ← whnf b then if let some lctx ← makeZetaReduce b a then return lctx return none mutual meta def unquoteExprList (e : Expr) : UnquoteM (List Expr) := do let e ← whnf e if e.isAppOfArity ``List.nil 1 then pure [] else if e.isAppOfArity ``List.cons 3 then return (← unquoteExpr (e.getArg! 1)) :: (← unquoteExprList (e.getArg! 2)) else throwFailedToEval e meta def unquoteExprMVar (mvar : Expr) : UnquoteM Expr := do let ty ← instantiateMVars (← whnfR (← inferType mvar)) unless ty.isAppOf ``Quoted do throwError "not of type Q(_):{indentExpr ty}" have et := ty.getArg! 0 let newET ← unquoteExpr et let newMVar ← withUnquotedLCtx do mkFreshExprMVar newET modify fun s => { s with exprSubst := s.exprSubst.insert mvar newMVar exprBackSubst := s.exprBackSubst.insert newMVar (.quoted mvar) mvars := (mvar, .term et newMVar.mvarId!) :: s.mvars } return newMVar meta def unquoteExpr (e : Expr) : UnquoteM Expr := do if e.isAppOfArity ``Quoted.unsafeMk 2 then return ← unquoteExpr (e.getArg! 1) if e.isAppOfArity ``toExpr 3 then return e.getArg! 2 let e ← instantiateMVars (← whnf e) let eTy ← whnfR (← inferType e) if eTy.isAppOfArity ``Quoted 1 then if let some e' := (← get).exprSubst[e]? then return e' if ← isAssignablePattern e then return ← unquoteExprMVar e if (← get).mayPostpone && e.getAppFn.isMVar then Elab.throwPostpone let ty ← unquoteExpr (eTy.getArg! 0) let fvarId := FVarId.mk (← mkFreshId) let name ← mkAbstractedName e let fv := .fvar fvarId modify fun s => { s with unquoted := s.unquoted.mkLocalDecl fvarId name ty exprSubst := s.exprSubst.insert e fv exprBackSubst := s.exprBackSubst.insert fv (.quoted e) abstractedFVars := s.abstractedFVars.push fvarId } return fv let e ← whnf e let .const c _ := e.getAppFn | throwError "unquoteExpr: {e} : {eTy}" let nargs := e.getAppNumArgs match c, nargs with | ``betaRev', 2 => return betaRev' (← unquoteExpr (e.getArg! 0)) (← unquoteExprList (e.getArg! 1)) | ``Expr.bvar, 1 => return .bvar (← reduceEval (e.getArg! 0)) | ``Expr.sort, 1 => return .sort (← unquoteLevel (e.getArg! 0)) | ``Expr.const, 2 => return .const (← reduceEval (e.getArg! 0)) (← unquoteLevelList (e.getArg! 1)) | ``Expr.app, 2 => return .app (← unquoteExpr (e.getArg! 0)) (← unquoteExpr (e.getArg! 1)) | ``Expr.lam, 4 => return .lam (← reduceEval (e.getArg! 0)) (← unquoteExpr (e.getArg! 1)) (← unquoteExpr (e.getArg! 2)) (← reduceEval (e.getArg! 3)) | ``Expr.forallE, 4 => return .forallE (← reduceEval (e.getArg! 0)) (← unquoteExpr (e.getArg! 1)) (← unquoteExpr (e.getArg! 2)) (← reduceEval (e.getArg! 3)) | ``Expr.letE, 5 => return .letE (← reduceEval (e.getArg! 0)) (← unquoteExpr (e.getArg! 1)) (← unquoteExpr (e.getArg! 2)) (← unquoteExpr (e.getArg! 3)) (← reduceEval (e.getArg! 4)) | ``Expr.lit, 1 => return .lit (← reduceEval (e.getArg! 0)) | ``Expr.proj, 3 => return .proj (← reduceEval (e.getArg! 0)) (← reduceEval (e.getArg! 1)) (← unquoteExpr (e.getArg! 2)) | _, _ => throwError "unquoteExpr: {e} : {eTy}" end meta def substLevel (a : Name) (b : Level) : UnquoteM Unit := modify fun s => { s with levelSubst := .ofList <| s.levelSubst.toList |>.map fun (x, u) => (x, u.instantiateParams [a] [b]) } meta def unquoteLevelLCtx (addDefEqs := true) : UnquoteM Unit := do for ldecl in (← getLCtx) do let fv := ldecl.toExpr let ty := ldecl.type let whnfTy ← withReducible <| whnf ty if whnfTy.isAppOfArity ``Level 0 then modify fun s => { s with levelNames := ldecl.userName :: s.levelNames levelSubst := s.levelSubst.insert fv (.param ldecl.userName) } else if let .app (.app (.const ``QuotedLevelDefEq ..) u) v := whnfTy then let u' ← unquoteLevel u let v' ← unquoteLevel v if addDefEqs then if let .param n := u' then if !u'.occurs v' then substLevel n v'; continue if let .param n := v' then if !v'.occurs u' then substLevel n u'; continue meta def unquoteLCtx : UnquoteM Unit := do unquoteLevelLCtx for ldecl in (← getLCtx) do let fv := ldecl.toExpr let ty := ldecl.type let whnfTy ← withReducible <| whnf ty if whnfTy.isAppOfArity ``QuotedLevelDefEq 2 || whnfTy.isAppOfArity ``Level 0 then pure () -- see above if whnfTy.isAppOfArity ``Quoted 1 then let qTy := whnfTy.appArg! let newTy ← unquoteExpr qTy modify fun s => { s with unquoted := s.unquoted.mkLocalDecl ldecl.fvarId (addDollar ldecl.userName) newTy ldecl.binderInfo ldecl.kind exprBackSubst := s.exprBackSubst.insert fv (.quoted fv) exprSubst := s.exprSubst.insert fv fv } else if whnfTy.isAppOfArity ``QuotedDefEq 4 then let tyLevel ← unquoteLevel (whnfTy.getArg! 0) let ty ← unquoteExpr (whnfTy.getArg! 1) let lhs ← unquoteExpr (whnfTy.getArg! 2) let rhs ← unquoteExpr (whnfTy.getArg! 3) let eqTy := mkApp3 (.const ``Eq [tyLevel]) ty lhs rhs let unquoted := (← get).unquoted let unquoted := unquoted.mkLocalDecl ldecl.fvarId (addDollar ldecl.userName) eqTy ldecl.binderInfo ldecl.kind let unquoted := (← withUnquotedLCtx do makeDefEq lhs rhs).getD unquoted modify fun s => { s with unquoted exprBackSubst := s.exprBackSubst.insert fv (.unquoted (mkApp2 (.const ``Eq.refl [tyLevel]) ty lhs)) -- exprSubst := s.exprSubst.insert fv fv } else let .succ u ← getLevel ty | pure () let LOption.some inst ← trySynthInstance (mkApp (.const ``ToExpr [u]) ty) | pure () modify fun s => { s with unquoted := s.unquoted.addDecl (ldecl.setUserName (addDollar ldecl.userName)) exprBackSubst := s.exprBackSubst.insert fv (.quoted (mkApp3 (.const ``toExpr [u]) ty inst fv)) exprSubst := s.exprSubst.insert fv fv } meta def isLevelFVar (n : Name) : MetaM (Option Expr) := do match (← getLCtx).findFromUserName? n with | none => pure none | some decl => return if ← isDefEq decl.type (.const ``Level []) then some decl.toExpr else none meta def quoteLevel : Level → QuoteM Expr | .zero => return .const ``Level.zero [] | .succ u => return mkApp (.const ``Level.succ []) (← quoteLevel u) | l@(.mvar ..) => do if let some e := (← read).levelBackSubst[l]? then return e throwError "cannot quote level mvar {l}" | .max a b => return mkApp2 (.const ``Level.max []) (← quoteLevel a) (← quoteLevel b) | .imax a b => return mkApp2 (.const ``Level.imax []) (← quoteLevel a) (← quoteLevel b) | l@(.param n) => do match (← read).levelBackSubst[l]? with | some e => return e | none => match ← isLevelFVar n with | some fv => return fv | none => throwError "universe parameter {n} not of type Level" meta def quoteLevelList : List Level → QuoteM Expr | [] => return mkApp (.const ``List.nil [.zero]) (.const ``Level []) | l::ls => do return mkApp3 (.const ``List.cons [.zero]) (.const ``Level []) (← quoteLevel l) (← quoteLevelList ls) meta def quoteExpr : Expr → QuoteM Expr | .bvar i => return mkApp (.const ``Expr.bvar []) (toExpr i) | e@(.fvar ..) => do let some r := (← read).exprBackSubst[e]? | throwError "unknown free variable {e}" match r with | .quoted r => return r | .unquoted r => quoteExpr r | e@(.mvar ..) => do if let some (.quoted r) := (← read).exprBackSubst[e]? then return r throwError "resulting term contains metavariable {e}" | .sort u => return mkApp (.const ``Expr.sort []) (← quoteLevel u) | .const n ls => return mkApp2 (.const ``Expr.const []) (toExpr n) (← quoteLevelList ls) | e@(.app _ _) => do let fn ← quoteExpr e.getAppFn let args ← e.getAppArgs.mapM quoteExpr if e.getAppFn.isFVar then -- TODO make configurable return mkApp2 (.const ``betaRev' []) fn $ args.foldl (flip $ mkApp3 (.const ``List.cons [.zero]) (.const ``Expr [])) (mkApp (.const ``List.nil [.zero]) (.const ``Expr [])) else pure $ args.foldl (mkApp2 (.const ``Expr.app [])) fn | .lam n t b d => do return mkApp4 (.const ``Expr.lam []) (toExpr n.eraseMacroScopes) (← quoteExpr t) (← quoteExpr b) (toExpr d) | .forallE n t b d => do return mkApp4 (.const ``Expr.forallE []) (toExpr $ if b.hasLooseBVar 0 then n.eraseMacroScopes else Name.anonymous) (← quoteExpr t) (← quoteExpr b) (toExpr d) | .letE n t v b d => do return mkApp5 (.const ``Expr.letE []) (toExpr n.eraseMacroScopes) (← quoteExpr t) (← quoteExpr v) (← quoteExpr b) (toExpr d) | .lit l => return mkApp (.const ``Expr.lit []) (toExpr l) | .proj n i e => return mkApp3 (.const ``Expr.proj []) (toExpr n) (toExpr i) (← quoteExpr e) | .mdata _ e => quoteExpr e /-- Translates an arbitrary local context to a context of Q-annotated expressions. Used by `by_elabq` and `run_tacq`. -/ meta def quoteLCtx (ctx : LocalContext) (levelNames : List Name) : UnquoteM (LocalContext × Array Expr) := do let mut quotedCtx := LocalContext.empty let mut assignments : Array Expr := #[] for nm in levelNames do let fid ← mkFreshFVarId quotedCtx := quotedCtx.mkLocalDecl fid nm (mkConst ``Level) .default .default modify fun s => { s with levelBackSubst := s.levelBackSubst.insert (.param nm) (.fvar fid) } assignments := assignments.push (toExpr (Level.param nm)) for decl in ctx do if decl.kind != .default then continue let fid ← mkFreshFVarId let type ← instantiateMVars decl.type let quotedType ← quoteExpr type quotedCtx := quotedCtx.mkLocalDecl fid decl.userName (mkApp (mkConst ``Quoted) quotedType) decl.binderInfo assignments := assignments.push (toExpr (Expr.fvar decl.fvarId)) if decl.isLet then let eqFid ← mkFreshFVarId let level ← getLevel type let quotedLevel ← quoteLevel (← instantiateLevelMVars level) let quotedValue ← quoteExpr (← instantiateMVars decl.value) quotedCtx := quotedCtx.mkLocalDecl eqFid (← mkFreshUserName (decl.userName.appendAfter "_eq")) (mkApp4 (mkConst ``QuotedDefEq) quotedLevel quotedType (.fvar fid) quotedValue) assignments := assignments.push (mkApp4 (mkConst ``QuotedDefEq.unsafeIntro) quotedLevel quotedType (.fvar fid) quotedValue) modify fun s => { s with exprBackSubst := s.exprBackSubst.insert (.fvar decl.fvarId) (.quoted (.fvar fid)) } return (quotedCtx, assignments) meta def unquoteMVarCore (mvar : Expr) : UnquoteM Unit := do let ty ← instantiateMVars (← whnfR (← inferType mvar)) if ty.isAppOf ``Quoted then _ ← unquoteExprMVar mvar else if ty.isAppOf ``Level then _ ← unquoteLevelMVar mvar else if ty.isSort then let newMVar ← withUnquotedLCtx do mkFreshTypeMVar modify fun s => { s with exprSubst := s.exprSubst.insert mvar newMVar exprBackSubst := s.exprBackSubst.insert newMVar (.quoted mvar) mvars := (mvar, .type newMVar.mvarId!) :: s.mvars } else throwError "unsupported expected type for quoted expression{indentExpr ty}" meta def unquoteMVar (mvar : Expr) : UnquoteM Unit := do unquoteLCtx unquoteMVarCore mvar meta def MVarSynth.isAssigned : MVarSynth → MetaM Bool | .term _ newMVar => newMVar.isAssigned | .type newMVar => newMVar.isAssigned | .level newMVar => isLevelMVarAssigned newMVar meta def MVarSynth.synth : MVarSynth → QuoteM Expr | .term et newMVar => return mkApp2 (.const ``Quoted.unsafeMk []) et (← quoteExpr (← instantiateMVars (.mvar newMVar))) | .type newMVar => return mkApp (.const ``Quoted []) (← quoteExpr (← instantiateMVars (.mvar newMVar))) | .level newMVar => do quoteLevel (← instantiateLevelMVars (.mvar newMVar)) meta def lctxHasMVar : MetaM Bool := do (← getLCtx).anyM fun decl => return (← instantiateLocalDeclMVars decl).hasExprMVar end Impl open Lean.Elab Lean.Elab.Tactic Lean.Elab.Term Impl @[specialize] meta def withProcessPostponed [Monad m] [MonadFinally m] [MonadLiftT MetaM m] (k : m α) : m α := do let postponed ← getResetPostponed try k <* discard (processPostponed (mayPostpone := false) (exceptionOnFailure := true)) finally setPostponed (postponed ++ (← getPostponed)) meta def Impl.UnquoteState.withLevelNames (s : UnquoteState) (k : TermElabM (α × Array Name)) : TermElabM α := do Term.withLevelNames s.levelNames do let (res, refdLevels) ← try k catch e => if let some n := isAutoBoundImplicitLocalException? e then throwError "unsupported implicit auto-bound: {n} is not a level name" throw e for newLevelName in (← getLevelNames) do if let some fvar ← isLevelFVar newLevelName then if refdLevels.contains newLevelName then addTermInfo' (← getRef) fvar else if (← read).autoBoundImplicit then throwAutoBoundImplicitLocal newLevelName else throwError "unbound level param {newLevelName}" return res /-- `ql(u)` quotes the universe level `u`. -/ scoped elab "ql(" l:level ")" : term => do let ((), s) ← unquoteLevelLCtx.run {mayPostpone := false} let l ← s.withLevelNames do let l ← elabLevel l let refdLevels := (CollectLevelParams.collect (← instantiateLevelMVars l) {}).params return (l, refdLevels) quoteLevel l s /-- `a =QL b` says that the levels `a` and `b` are definitionally equal. -/ scoped syntax atomic(level " =QL ") level : term macro_rules | `($a:level =QL $b) => `(QuotedLevelDefEq ql($a) ql($b)) meta def Impl.macro (t : Syntax) (expectedType : Expr) : TermElabM Expr := do let mainMVar ← mkFreshExprMVar expectedType let s ← (unquoteMVar mainMVar *> get).run' { mayPostpone := (← read).mayPostpone } have lastId := match s.mvars with | (_, .term _ lastMVar) :: _ | (_, .type lastMVar) :: _ => lastMVar | _ => unreachable! let lastDecl ← lastId.getDecl withRef t do s.withLevelNames do withLCtx lastDecl.lctx lastDecl.localInstances do withProcessPostponed do withSynthesize do let t ← Term.elabTerm t lastDecl.type let t ← ensureHasType lastDecl.type t synthesizeSyntheticMVars (postpone := .no) if (← logUnassignedUsingErrorInfos (← getMVars t)) then throwAbortTerm lastId.assign t let refdLevels ← do let mut lvls : CollectLevelParams.State := {} for (_, synth) in s.mvars do match synth with | .term _ exprMVar | .type exprMVar => lvls := lvls.collect (← instantiateMVars (.mvar exprMVar)) | _ => pure () pure lvls.params return ((), refdLevels) for (mvar, synth) in s.mvars.reverse do if ← synth.isAssigned then let t ← synth.synth s unless ← isDefEq mvar t do tryPostpone throwError "cannot assign metavariable ({mvar} : {← inferType mvar}) with {t}" instantiateMVars mainMVar /-- `q(t)` quotes the Lean expression `t` into a `Q(α)` (if `t : α`) -/ scoped syntax "q(" term Parser.Term.optType ")" : term macro_rules | `(q($t : $ty)) => `(q(($t : $ty))) elab_rules : term <= expectedType | `(q($t)) => do let expectedType ← instantiateMVars expectedType if expectedType.hasExprMVar then tryPostpone if ← lctxHasMVar then tryPostpone ensureHasType expectedType $ ← commitIfDidNotPostpone do let mut expectedType ← withReducible <| Impl.whnf expectedType if !expectedType.isAppOfArity ``Quoted 1 then let u ← mkFreshExprMVar (some (.const ``Level [])) let u' := .app (.const ``Expr.sort []) u let t ← mkFreshExprMVar (mkApp (.const ``Quoted []) u') expectedType := .app (.const ``Quoted []) t Impl.macro t expectedType /-- `Q(α)` is the type of Lean expressions having type `α`. -/ scoped syntax "Q(" term Parser.Term.optType ")" : term macro_rules | `(Q($t : $ty)) => `(Q(($t : $ty))) elab_rules : term <= expectedType | `(Q($t)) => do let expectedType ← instantiateMVars expectedType unless ← isDefEq expectedType q(Type) do throwError "Q(.) has type Type, expected type is{indentExpr expectedType}" commitIfDidNotPostpone do Impl.macro t expectedType /-- `a =Q b` says that `a` and `b` are definitionally equal. -/ scoped notation a:50 " =Q " b:51 => QuotedDefEq q(a) q(b) namespace Impl /- support `Q($(foo) ∨ False)` -/ private meta def push [Monad m] (i t l : Syntax) : StateT (Array $ Syntax × Syntax × Syntax) m Unit := modify fun s => s.push (i, t, l) meta def floatLevelAntiquot' [Monad m] [MonadQuotation m] (stx : Syntax) : StateT (Array $ Syntax × Syntax × Syntax) m Syntax := if stx.isAntiquot && !stx.isEscapedAntiquot then withFreshMacroScope do push (← `(u)) (← `(Level)) (← floatLevelAntiquot' stx.getAntiquotTerm) `(u) else match stx with | Syntax.node i k args => return Syntax.node i k (← args.mapM floatLevelAntiquot') | stx => return stx open TSyntax.Compat in meta def floatExprAntiquot' [Monad m] [MonadQuotation m] (depth : Nat) : Syntax → StateT (Array $ Syntax × Syntax × Syntax) m Syntax | `(Q($x)) => do `(Q($(← floatExprAntiquot' (depth + 1) x))) | `(q($x)) => do `(q($(← floatExprAntiquot' (depth + 1) x))) | `(Type $term) => do `(Type $(← floatLevelAntiquot' term)) | `(Sort $term) => do `(Sort $(← floatLevelAntiquot' term)) | stx => do if let (some (kind, _pseudo), false) := (stx.antiquotKind?, stx.isEscapedAntiquot) then let term := stx.getAntiquotTerm if depth > 0 then return Syntax.mkAntiquotNode kind (← floatExprAntiquot' (depth - 1) term) else if term.isIdent && (stripDollars term.getId).isAtomic then return addSyntaxDollar term else withFreshMacroScope do push (← `(a)) (← `(Quoted _)) term return addSyntaxDollar <|<- `(a) else match stx with | Syntax.node i k args => return Syntax.node i k (← args.mapM (floatExprAntiquot' depth)) | stx => pure stx open TSyntax.Compat in meta def floatExprAntiquot [Monad m] [MonadQuotation m] (depth : Nat) : Term → StateT (Array $ Ident × Term × Term) m Term := fun t s => do let (t, lifts) ← floatExprAntiquot' depth t (s.map fun (a,t,l) => (a,t,l)) return (t, lifts.map fun (a,t,l) => (a,t,l)) macro_rules | `(Q($t0)) => do let (t, lifts) ← floatExprAntiquot 0 t0 #[] if lifts.isEmpty && t == t0 then Macro.throwUnsupported let mut t ← `(Q($t)) for (a, ty, lift) in lifts do t ← `(let $a:ident : $ty := $lift; $t) pure t | `(q($t0)) => do let (t, lifts) ← floatExprAntiquot 0 t0 #[] if lifts.isEmpty && t == t0 then Macro.throwUnsupported let mut t ← `(q($t)) for (a, ty, lift) in lifts do t ← `(let $a:ident : $ty := $lift; $t) pure t end Impl
.lake/packages/Qq/Qq/Match.lean
module public import Qq.Macro public import Qq.MetaM public import Qq.ForLean.Do public import Qq.SortLocalDecls public meta import Qq.MatchImpl public meta section /-! # `~q()` matching This file extends the syntax of `match` and `let` to permit matching terms of type `Q(α)` using `~q(<pattern>)`, just as terms of type `Syntax` can be matched with `` `(<pattern>) ``. Compare to the builtin `match_expr` and `let_expr`, `~q()` matching: * is type-safe, and so helps avoid many mistakes in match patterns * matches by definitional equality, rather than expression equality * supports compound expressions, not just a single application See `Qq.matcher` for a brief syntax summary. ## Matching typeclass instances For a more complete example, consider ``` def isCanonicalAdd {u : Level} {α : Q(Type u)} (inst : Q(Add $α)) (x : Q($α)) : MetaM <| Option (Q($α) × Q($α)) := do match x with | ~q($a + $b) => return some (a, b) | _ => return none ``` Here, the `~q($a + $b)` match is specifically matching the addition against the provided `inst` instance, as this is what is being used to elaborate the `+`. If the intent is to match an _arbitrary_ `Add α` instance in `x`, then you must match this with a `$inst` antiquotation: ``` def isAdd {u : Level} {α : Q(Type u)} (x : Q($α)) : MetaM <| Option (Q(Add $α) × Q($α) × Q($α)) := do match x with | ~q(@HAdd.hAdd _ _ _ (@instHAdd _ $inst) $a $b) => return some (inst, a, b) | _ => return none ``` ## Matching `Expr`s By itself, `~q()` can only match against terms of the form `Q($α)`. To match an `Expr`, it must first be converted to Qq with `Qq.inferTypeQ`. For instance, to match an arbitrary expression for `n + 37` where `n : Nat`, we can write ``` def isAdd37 (e : Expr) : MetaM (Option Q(Nat)) := do let ⟨1, ~q(Nat), ~q($n + 37)⟩ ← inferTypeQ e | return none return some n ``` This is performing three sequential matches: first that `e` is in `Sort 1`, then that the type of `e` is `Nat`, then finally that `e` is of the right form. This syntax can be used in `match` too. -/ open Lean Elab Term Meta open Parser.Term namespace Qq namespace Impl def mkInstantiateMVars (decls : List PatVarDecl) : List PatVarDecl → MetaM Q(MetaM $(mkIsDefEqType decls)) | [] => return q(return $(mkIsDefEqResult true decls)) -- https://github.com/leanprover/lean4/issues/501 | { ty := none, fvarId := fvarId, userName := userName } :: rest => do let decl : PatVarDecl := { ty := none, fvarId := fvarId, userName := userName } let instMVars : Q(Level → MetaM $(mkIsDefEqType decls)) ← mkLambdaQ _ decl.fvar q($(← mkInstantiateMVars decls rest)) return q(Bind.bind (instantiateLevelMVars $(decl.fvar)) $instMVars) | { ty := some ty, fvarId := fvarId, userName := userName } :: rest => do let decl : PatVarDecl := { ty := some ty, fvarId := fvarId, userName := userName } let instMVars : Q(Expr → MetaM $(mkIsDefEqType decls)) ← mkLambdaQ _ decl.fvar q($(← mkInstantiateMVars decls rest)) return q(Bind.bind (instantiateMVars $(decl.fvar)) $instMVars) def mkIsDefEqCore (decls : List PatVarDecl) (pat discr : Q(Expr)) : List PatVarDecl → MetaM Q(MetaM $(mkIsDefEqType decls)) | { ty := none, fvarId := fvarId, userName := userName } :: rest => let decl : PatVarDecl := { ty := none, fvarId := fvarId, userName := userName } return q(Bind.bind mkFreshLevelMVar $(← mkLambdaQ `x decl.fvar (← mkIsDefEqCore decls pat discr rest))) | { ty := some ty, fvarId := fvarId, userName := userName } :: rest => let decl : PatVarDecl := { ty := some ty, fvarId := fvarId, userName := userName } return q(Bind.bind (mkFreshExprMVar $ty) $(← mkLambdaQ `x decl.fvar (← mkIsDefEqCore decls pat discr rest))) | [] => do let instMVars ← mkInstantiateMVars decls decls return q(do let matches? ← withReducible $ isDefEq $pat $discr (if matches? then $instMVars else return $(mkIsDefEqResult false decls))) def mkIsDefEq (decls : List PatVarDecl) (pat discr : Q(Expr)) : MetaM Q(MetaM $(mkIsDefEqType decls)) := do return q(withNewMCtxDepth $(← mkIsDefEqCore decls pat discr decls)) def withLetHave [Monad m] [MonadControlT MetaM m] [MonadLiftT MetaM m] [MonadLCtx m] (fvarId : FVarId) (userName : Name) (val : (Quoted α)) (k : (Quoted α) → m (Quoted β)) : m (Quoted β) := do withExistingLocalDecls [LocalDecl.cdecl default fvarId userName α .default .default] do return Quoted.unsafeMk $ ← mkLet' userName (.fvar fvarId) α val (← k (.fvar fvarId)) def mkQqLets {γ : Q(Type)} : (decls : List PatVarDecl) → Q($(mkIsDefEqType decls)) → TermElabM Q($γ) → TermElabM Q($γ) | { ty := none, fvarId := fvarId, userName := userName } :: decls, acc, cb => withLetHave fvarId userName (α := q(Level)) q($acc.1) fun _ => mkQqLets decls q($acc.2) cb | { ty := some ty, fvarId := fvarId, userName := userName } :: decls, acc, cb => withLetHave fvarId userName (α := q(Quoted $ty)) q($acc.1) fun _ => mkQqLets decls q($acc.2) cb | [], _, cb => cb -- FIXME: we're reusing fvarids here def replaceTempExprsByQVars : List PatVarDecl → Expr → Expr | [], e => e | { ty := some _, fvarId, .. } :: decls, e => ((replaceTempExprsByQVars decls e).abstract #[.fvar fvarId]).instantiate #[.fvar fvarId] | { ty := none, .. } :: decls, e => replaceTempExprsByQVars decls e def makeMatchCode {γ : Q(Type)} {m : Q(Type → Type v)} (_instLift : Q(MonadLiftT MetaM $m)) (_instBind : Q(Bind $m)) (decls : List PatVarDecl) (uTy : Q(Level)) (ty : Q(Quoted (.sort $uTy))) (pat discr : Q(Quoted $ty)) (alt : Q($m $γ)) (expectedType : Expr) (k : Expr → TermElabM Q($m $γ)) : TermElabM Q($m $γ) := do let nextDecls : List PatVarDecl := decls.map fun decl => { decl with ty := decl.ty.map fun e => replaceTempExprsByQVars decls e } let next ← withLocalDecl (← mkFreshBinderName) default (mkIsDefEqType decls) (kind := .implDetail) fun fv => do let fv : Q($(mkIsDefEqType decls)) := fv -- note: cannot inline into `$body` due to leanprover/lean4#3827 let body ← mkQqLets nextDecls fv do have pat : Q(Quoted $ty) := replaceTempExprsByQVars decls pat let (_, s) ← unquoteLCtx.run { mayPostpone := (← read).mayPostpone } let _discr' ← (unquoteExpr discr).run' s let _pat' ← (unquoteExpr pat).run' s withLocalDeclDQ (← mkFreshUserName `match_eq) q(QuotedDefEq $discr $pat) fun h => do let res ← k expectedType let res : Q($m $γ) ← instantiateMVars res let res : Q($m $γ) := (← res.abstractM #[h]).instantiate #[q(⟨⟩ : QuotedDefEq $discr $pat)] return res let next : Q($m $γ) := q(if $(mkIsDefEqResultVal decls fv) then $body else $alt) return show Q($(mkIsDefEqType decls) → $m $γ) from Quoted.unsafeMk $ ← mkLambda' `__result fv (mkIsDefEqType decls) next pure q(Bind.bind $(← mkIsDefEq decls pat discr) $next) def unquoteForMatch (et : Expr) : UnquoteM (LocalContext × LocalInstances × Expr) := do unquoteLCtx let newET ← unquoteExpr et let newLCtx := (← get).unquoted return (newLCtx, ← determineLocalInstances newLCtx, newET) def mkNAryFunctionType : Nat → MetaM Expr | 0 => mkFreshTypeMVar | n+1 => do withLocalDeclD `x (← mkFreshTypeMVar) fun x => do mkForallFVars #[x] (← mkNAryFunctionType n) structure PatternVar where name : Name /-- Pattern variables can be functions; if so, this is their arity. -/ arity : Nat mvar : Expr stx : Term partial def getPatVars (pat : Term) : StateT (Array PatternVar) TermElabM Term := do match pat with | `($fn $args*) => if isPatVar fn then return ← mkMVar fn args | _ => if isPatVar pat then return ← mkMVar pat #[] match pat with | ⟨.node info kind args⟩ => return ⟨.node info kind (← args.mapM (getPatVars ⟨·⟩))⟩ | pat => return pat where isPatVar (fn : Syntax) : Bool := fn.isAntiquot && !fn.isEscapedAntiquot && fn.getAntiquotTerm.isIdent && fn.getAntiquotTerm.getId.isAtomic mkMVar (fn : Syntax) (args : Array Term) : StateT _ TermElabM Term := do let args ← args.mapM getPatVars let id := fn.getAntiquotTerm.getId withFreshMacroScope do if let some p := (← get).find? fun p => p.name == id then return ← `($(p.stx) $args*) let mvar ← elabTerm (← `(?m)).1.stripPos (← mkNAryFunctionType args.size) modify (·.push ⟨id, args.size, mvar, ← `(?m)⟩) `(?m $args*) def elabPat (pat : Term) (lctx : LocalContext) (localInsts : LocalInstances) (ty : Expr) (levelNames : List Name) : TermElabM (Expr × Array LocalDecl × Array Name) := withLCtx lctx localInsts do withLevelNames levelNames do let (pat, patVars) ← getPatVars pat #[] let pat ← Lean.Elab.Term.elabTerm pat ty let pat ← ensureHasType ty pat synthesizeSyntheticMVars (postpone := .no) let pat ← instantiateMVars pat let mctx ← getMCtx let levelNames ← getLevelNames let r := mctx.levelMVarToParam levelNames.elem (fun _ => false) pat `u 1 setMCtx r.mctx let mut newDecls := #[] for patVar in patVars do assert! patVar.mvar.isMVar let fvarId := FVarId.mk (← mkFreshId) let type ← inferType patVar.mvar newDecls := newDecls.push $ LocalDecl.cdecl default fvarId patVar.name type .default .default patVar.mvar.mvarId!.assign (.fvar fvarId) for newMVar in ← getMVars pat do let fvarId := FVarId.mk (← mkFreshId) let type ← instantiateMVars (← newMVar.getDecl).type let userName ← mkFreshBinderName newDecls := newDecls.push $ LocalDecl.cdecl default fvarId userName type .default .default newMVar.assign (.fvar fvarId) withExistingLocalDecls newDecls.toList do return (← instantiateMVars pat, ← sortLocalDecls (← newDecls.mapM fun d => instantiateLocalDeclMVars d), r.newParamNames) scoped elab "_qq_match" pat:term " ← " e:term " | " alt:term " in " body:term : term <= expectedType => do let emr ← extractBind expectedType let alt ← elabTermEnsuringType alt expectedType let argLvlExpr ← mkFreshExprMVarQ q(Level) let argTyExpr ← mkFreshExprMVarQ q(Quoted (.sort $argLvlExpr)) let e' ← elabTermEnsuringTypeQ e q(Quoted $argTyExpr) let argTyExpr ← instantiateMVarsQ argTyExpr let ((lctx, localInsts, type), s) ← (unquoteForMatch argTyExpr).run { mayPostpone := (← read).mayPostpone } let (pat, patVarDecls, newLevels) ← elabPat pat lctx localInsts type s.levelNames let mut s := s let mut oldPatVarDecls : List PatVarDecl := [] for newLevel in newLevels do let fvarId := FVarId.mk (← mkFreshId) oldPatVarDecls := oldPatVarDecls ++ [{ ty := none, fvarId := fvarId, userName := newLevel }] s := { s with levelBackSubst := s.levelBackSubst.insert (.param newLevel) (.fvar fvarId) } for ldecl in patVarDecls do let qty ← (quoteExpr ldecl.type).run s oldPatVarDecls := oldPatVarDecls ++ [{ ty := some qty, fvarId := ldecl.fvarId, userName := ldecl.userName }] s := { s with exprBackSubst := s.exprBackSubst.insert ldecl.toExpr (.quoted ldecl.toExpr) } have m : Q(Type → Type) := emr.m have γ : Q(Type) := emr.returnType let inst ← synthInstanceQ q(Bind $m) let inst2 ← synthInstanceQ q(MonadLiftT MetaM $m) have synthed : Q(Expr) := (← quoteExpr (← instantiateMVars pat) s) let alt : Q($m $γ) := alt makeMatchCode q($inst2) inst oldPatVarDecls argLvlExpr argTyExpr synthed q($e') alt expectedType fun expectedType => return Quoted.unsafeMk (← elabTerm body expectedType) scoped syntax "_qq_match" term " := " term " | " doSeq : term macro_rules | `(assert! (_qq_match $pat := $e | $alt); $x) => `(_qq_match $pat ← $e | (do $alt) in $x) partial def isIrrefutablePattern : Term → Bool | `(($stx)) => isIrrefutablePattern stx | `(⟨$args,*⟩) => args.getElems.all isIrrefutablePattern | `(($a, $b)) => isIrrefutablePattern a && isIrrefutablePattern b | `(_) => true | `(true) => false | `(false) => false -- TODO properly | stx => stx.1.isIdent scoped elab "_comefrom" n:ident "do" b:doSeq " in " body:term : term <= expectedType => do let _ ← extractBind expectedType let ty ← exprToSyntax expectedType elabTerm (← `(have $n:ident : $ty := (do $b:doSeq); $body)) expectedType scoped syntax "_comefrom" ident "do" doSeq : term macro_rules | `(assert! (_comefrom $n do $b); $body) => `(_comefrom $n do $b in $body) scoped macro "comefrom" n:ident "do" b:doSeq : doElem => `(doElem| assert! (_comefrom $n do $b)) def mkLetDoSeqItem [Monad m] [MonadQuotation m] (pat : Term) (rhs : TSyntax `term) (alt : TSyntax ``doSeq) : m (List (TSyntax ``doSeqItem)) := do match pat with | `(_) => return [] | _ => if isIrrefutablePattern pat then return [← `(doSeqItem| let $pat:term := $rhs)] else return [← `(doSeqItem| let $pat:term := $rhs | $alt)] end Impl section open Impl /-- `Qq`s expression matching in `MetaM`, up to reducible defeq. This syntax is valid in `match`, `let`, and `if let`, but not `fun`. The usage is very similar to the builtin `Syntax`-matching that uses `` `(<pattern>)`` notation. As an example, consider matching against a `n : Q(ℕ)`, which can be written * With a `match` expression, ``` match n with | ~q(Nat.gcd $x $y) => handleGcd x y | ~q($x + $y) => handleAdd x y | _ => throwError "no match" ``` * With a `let` expression (if there is a single match) ``` let ~q(Nat.gcd $x $y) := n | throwError "no match" handleGcd x y ``` * With an `if let` statement ``` if let ~q(Nat.gcd $x $y) := n then handleGcd x y else if let ~q($x + $y) := n then handleAdd x y else throwError "no match" ``` In addition to the obvious `x` and `y` captures, in the example above `~q` also inserts into the context a term of type `$n =Q Nat.gcd $x $y`. -/ scoped syntax (name := matcher) "~q(" term ")" : term partial def Impl.hasQMatch : Syntax → Bool | `(~q($_)) => true | stx => stx.getArgs.any hasQMatch partial def Impl.floatQMatch (alt : TSyntax ``doSeq) : Term → StateT (List (TSyntax ``doSeqItem)) MacroM Term | `(~q($term)) => withFreshMacroScope do let auxDoElem ← `(doSeqItem| let ~q($term) := x | $alt) modify fun s => s ++ [auxDoElem] `(x) | stx => do match stx with | ⟨.node i k args⟩ => return ⟨.node i k (← args.mapM (floatQMatch alt ⟨·⟩))⟩ | stx => return stx private def push (i : TSyntax ``doSeqItem) : StateT (Array (TSyntax ``doSeqItem)) MacroM Unit := modify fun s => s.push i partial def unpackParensIdent : Syntax → Option Syntax | `(($stx)) => unpackParensIdent stx | stx => if stx.isIdent then some stx else none private partial def floatLevelAntiquot (stx : Syntax.Level) : StateT (Array (TSyntax ``doSeqItem)) MacroM Syntax.Level := if stx.1.isAntiquot && !stx.1.isEscapedAntiquot then if !stx.1.getAntiquotTerm.isIdent then withFreshMacroScope do push <| ← `(doSeqItem| let u : Level := $(⟨stx.1.getAntiquotTerm⟩)) `(level| u) else pure stx else match stx with | ⟨.node i k args⟩ => return ⟨Syntax.node i k (← args.mapM (floatLevelAntiquot ⟨·⟩))⟩ | stx => return stx private partial def floatExprAntiquot (depth : Nat) : Term → StateT (Array (TSyntax ``doSeqItem)) MacroM Term | `(Q($x)) => do `(Q($(← floatExprAntiquot (depth + 1) x))) | `(q($x)) => do `(q($(← floatExprAntiquot (depth + 1) x))) | `(Type $term) => do `(Type $(← floatLevelAntiquot term)) | `(Sort $term) => do `(Sort $(← floatLevelAntiquot term)) | stx => do if stx.1.isAntiquot && !stx.1.isEscapedAntiquot then let term : Term := ⟨stx.1.getAntiquotTerm⟩ if term.1.isIdent then return stx else if depth > 0 then return ⟨.mkAntiquotNode stx.1.antiquotKind?.get!.1 (← floatExprAntiquot (depth - 1) term)⟩ else match unpackParensIdent stx.1.getAntiquotTerm with | some id => if id.getId.isAtomic then return ⟨addSyntaxDollar id⟩ | none => pure () withFreshMacroScope do push <| ← `(doSeqItem| let a : Quoted _ := $term) return ⟨addSyntaxDollar (← `(a))⟩ else match stx with | ⟨.node i k args⟩ => return ⟨.node i k (← args.mapM (floatExprAntiquot depth ⟨·⟩))⟩ | stx => return stx macro_rules | `(doElem| let $pat:term := $_) => do if !hasQMatch pat then Macro.throwUnsupported Macro.throwError "let-bindings with ~q(.) require an explicit alternative" | `(doElem| let $pat:term := $rhs:term | $alt:doSeq) => do if !hasQMatch pat then Macro.throwUnsupported match pat with | `(~q($pat)) => let (pat, lifts) ← floatExprAntiquot 0 pat #[] let t ← `(doSeqItem| do assert! (_qq_match $pat := $rhs | $alt)) `(doElem| do $(lifts.push t):doSeqItem*) | _ => let (pat', auxs) ← floatQMatch (← `(doSeq| __alt)) pat [] let items := #[← `(doSeqItem| comefrom __alt do $alt:doSeq)] ++ (← mkLetDoSeqItem pat' rhs alt) ++ auxs `(doElem| do $items:doSeqItem*) | `(match $[$gen:generalizingParam]? $[$discrs:term],* with $[| $[$patss],* => $rhss]*) => do if !patss.any (·.any (hasQMatch ·)) then Macro.throwUnsupported `(do match $[$gen]? $[$discrs:term],* with $[| $[$patss:term],* => $rhss:term]*) | `(doElem| match $[$gen:generalizingParam]? $[$discrs:term],* with $[| $[$patss],* => $rhss]*) => do if !patss.any (·.any (hasQMatch ·)) then Macro.throwUnsupported -- only `generalizing := true` (the default) is supported if let some stx := gen then match stx with | `(generalizingParam| (generalizing := true)) => pure () | _ => Macro.throwErrorAt stx "not supported in ~q matching" let mut items := #[] items := items.push (← `(doSeqItem| comefrom __alt do throwError "nonexhaustive match")) for pats in patss.reverse, rhs in rhss.reverse do let mut subItems : Array (TSyntax ``doSeqItem) := #[] for discr in discrs, pat in pats do subItems := subItems ++ (← mkLetDoSeqItem pat discr (← `(doSeq| __alt))) subItems := subItems.push (← `(doSeqItem| do $rhs)) items := items.push (← `(doSeqItem| comefrom __alt do $subItems:doSeqItem*)) items := items.push (← `(doSeqItem| __alt)) `(doElem| (do $items:doSeqItem*)) end
.lake/packages/Qq/Qq/MatchImpl.lean
module public import Qq.Macro public import Qq.MetaM public import Qq.ForLean.Do public import Qq.SortLocalDecls import Qq.Typ public section open Lean in partial def Lean.Syntax.stripPos : Syntax → Syntax | atom _ a => atom .none a | ident _ r v p => ident .none r v p | node _ kind args => node .none kind (args.map stripPos) | missing => missing open Lean Elab Term Meta open Parser.Term namespace Qq namespace Impl structure PatVarDecl where ty : Option Q(Expr) fvarId : FVarId userName : Name @[expose] def PatVarDecl.fvarTy : PatVarDecl → Q(Type) | { ty := none, .. } => q(Level) | { ty := some _, .. } => q(Expr) def PatVarDecl.fvar (decl : PatVarDecl) : Q($((decl.fvarTy))) := Expr.fvar decl.fvarId @[expose] def mkIsDefEqType : List PatVarDecl → Q(Type) | [] => q(Bool) | decl :: decls => q($(decl.fvarTy) × $(mkIsDefEqType decls)) @[expose] def mkIsDefEqResult (val : Bool) : (decls : List PatVarDecl) → Q($(mkIsDefEqType decls)) | [] => show Q(Bool) from q($val) | decl :: decls => q(($(decl.fvar), $(mkIsDefEqResult val decls))) @[expose] def mkIsDefEqResultVal : (decls : List PatVarDecl) → Q($(mkIsDefEqType decls)) → Q(Bool) | [], val => q($val) | _ :: decls, val => mkIsDefEqResultVal decls q($val.2) def mkLambda' (n : Name) (fvar : Expr) (ty : Expr) (body : Expr) : MetaM Expr := return mkLambda n BinderInfo.default ty (← body.abstractM #[fvar]) def mkLet' (n : Name) (fvar : Expr) (ty : Expr) (val : Expr) (body : Expr) : MetaM Expr := return mkLet n ty val (← body.abstractM #[fvar]) def mkLambdaQ (n : Name) (fvar : Quoted α) (body : Quoted β) : MetaM (Quoted (.forallE n α β .default)) := return mkLambda n BinderInfo.default α (← body.abstractM #[fvar])
.lake/packages/Qq/Qq/Delab.lean
module public import Qq.Macro meta import Qq.Macro public section /-! # Delaborators for `q()` and `Q()` notation -/ open Qq Lean Elab PrettyPrinter.Delaborator SubExpr Meta Impl Std namespace Qq namespace Impl register_option pp.qq : Bool := { defValue := true group := "pp" descr := "(pretty printer) print quotations as q(...) and Q(...)" } -- TODO: this probably exists in the library private meta def failureOnError (x : MetaM α) : DelabM α := do let y : MetaM (Option α) := do try return some (← x) catch _ => return none match ← y with | some a => return a | none => failure private meta def unquote (e : Expr) : UnquoteM (Expr × LocalContext) := do unquoteLCtx let newE ← unquoteExpr e return (newE, (← get).unquoted) meta def checkQqDelabOptions : DelabM Unit := do unless ← getPPOption (·.getBool `pp.qq true) do failure if ← getPPOption getPPExplicit then failure meta instance : MonadLift UnquoteM (StateT UnquoteState DelabM) where monadLift k s := k s meta def delabQuoted : StateT UnquoteState DelabM Term := do let e ← getExpr -- `(failure : DelabM _)` is of course completely different than `(failure : MetaM _)`... let some newE ← (try some <$> unquoteExpr e catch _ => failure : UnquoteM _) | failure let newLCtx := (← get).unquoted withLCtx newLCtx (← determineLocalInstances newLCtx) do withTheReader SubExpr (fun s => { s with expr := newE }) delab meta def withDelabQuoted (k : StateT UnquoteState DelabM Term) : Delab := withIncRecDepth do StateT.run' (s := { mayPostpone := false }) <| show StateT UnquoteState DelabM Term from do unquoteLCtx let mut res ← k let showNested := `pp.qq._nested if (← getOptions).get showNested true then for fv in (← get).abstractedFVars.reverse do if let some (.quoted expr) := (← get).exprBackSubst[Expr.fvar fv]? then if let some decl := (← get).unquoted.find? fv then if (res.1.find? (·.getId == decl.userName)).isSome then if let some name := removeDollar decl.userName then let pos ← nextExtraPos res ← withTheReader SubExpr (fun _ => { expr, pos }) do withOptions (·.set showNested false) do `(let $(mkIdent name) := $(← delab); $res) return res meta def delabQuotedLevel : DelabM Syntax.Level := do let e ← getExpr let (newE, _) ← failureOnError do StateT.run (s := { mayPostpone := false }) do unquoteLevelLCtx (addDefEqs := false) unquoteLevel e return newE.quote max_prec @[delab app.Qq.Quoted] meta def delabQ : Delab := do guard $ (← getExpr).getAppNumArgs == 1 checkQqDelabOptions withDelabQuoted do let stx ← withAppArg delabQuoted `(Q($stx)) @[delab app.Qq.Quoted.unsafeMk] meta def delabq : Delab := do guard $ (← getExpr).getAppNumArgs == 2 checkQqDelabOptions withDelabQuoted do let stx ← withAppArg delabQuoted `(q($stx)) @[delab app.Qq.QuotedDefEq] meta def delabQuotedDefEq : Delab := do guard $ (← getExpr).getAppNumArgs == 4 checkQqDelabOptions withDelabQuoted do let lhs ← withAppFn do withAppArg delabQuoted let rhs ← withAppArg delabQuoted `($lhs =Q $rhs) @[delab app.Qq.QuotedLevelDefEq] meta def delabQuotedLevelDefEq : Delab := do guard $ (← getExpr).getAppNumArgs == 2 checkQqDelabOptions let lhs ← withAppFn do withAppArg delabQuotedLevel let rhs ← withAppArg delabQuotedLevel `($lhs:level =QL $rhs:level)
.lake/packages/Qq/Qq/Commands.lean
module public import Qq.Macro public import Lean meta import Lean.Elab.Term.TermElabM meta import Qq.Macro meta import Qq.AssertInstancesCommute public section /-! # `run_tacq` and `by_elabq` This file provides Qq analogues to `by_elab` and `run_tac`. -/ namespace Qq open Lean Meta Elab Tactic /-- Build a let expression, similarly to `mkLetFVars`. The array of `values` will be assigned to the current local context, which is expected to consist of `cdecl`s. -/ private meta def mkLetFVarsFromValues (values : Array Expr) (body : Expr) : MetaM Expr := do let ctx ← getLCtx let ctxLet := ctx.foldl (init := LocalContext.empty) fun part decl => part.addDecl (.ldecl decl.index decl.fvarId decl.userName decl.type values[decl.index]! false decl.kind) let fvars : Array Expr := ctx.foldl (init := #[]) fun part decl => part.push (.fvar decl.fvarId) withLCtx ctxLet #[] <| mkLetFVars fvars body /-- `by_elabq` is the Qq analogue to `by_elab` which allows executing arbitrary `TermElabM` code in place of a term. In contrast to `by_elab`, the local context can be directly accessed as quoted expressions and the return type is Q-annotated. Example: ``` def f (x : Prop) [Decidable x] : Int := by_elabq Lean.logInfo x Lean.logInfo x.ty return q(if $x then 2 else 3) ``` See also: `run_tacq`. -/ scoped elab "by_elabq" e:doSeq : term <= expectedType => do let lctx ← getLCtx let levelNames := (← Term.getLevelNames).reverse -- these are backwards! let (quotedCtx, assignments, quotedGoal) ← liftMetaM <| StateT.run' (s := { mayPostpone := false }) do let (quotedCtx, assignments) ← Impl.quoteLCtx lctx levelNames let expectedType ← instantiateMVars expectedType let quotedGoal : Q(Type) ← if expectedType.hasMVar then pure q(TermElabM Expr) else let expectedTypeQ : Q(Expr) ← Qq.Impl.quoteExpr expectedType pure <| q(TermElabM (Quoted $expectedTypeQ)) return (quotedCtx, assignments, quotedGoal) let codeExpr : Expr ← withLCtx quotedCtx #[] do let body ← Term.elabTermEnsuringType (← `(do $e)) quotedGoal Term.synthesizeSyntheticMVarsNoPostponing let body ← instantiateMVars body if (← Term.logUnassignedUsingErrorInfos (← getMVars body)) then throwAbortTerm mkLetFVarsFromValues assignments body let code ← unsafe evalExpr (TermElabM Expr) q(TermElabM Expr) codeExpr code /-- `run_tacq` is the Qq analogue to `run_tac` which allows executing arbitrary `TacticM` code. In contrast to `run_tac`, the local context of the main goal can be directly accessed as quoted expressions. Optionally, the annotated goal can also be saved using the syntax `run_tacq $g =>`. Example: ``` example (a b : Nat) (h : a = b) : True := by run_tacq goal => let p : Q(Prop) := q($a = $b) let t ← Lean.Meta.inferType h Lean.logInfo p Lean.logInfo <| toString (← Lean.Meta.isDefEq t p) Lean.logInfo <| toString (← Lean.Meta.isDefEq h.ty p) Lean.logInfo goal Lean.logInfo goal.ty trivial ``` See also: `by_elabq`. -/ scoped syntax "run_tacq" (atomic(ident "=>"))? doSeq : tactic elab_rules : tactic | `(tactic| run_tacq $[$gi:ident =>]? $seq:doSeq) => do let goal ← try getMainGoal catch _ => throwError "no open goal, run_tacq requires main goal" goal.withContext do let lctx ← getLCtx let levelNames := (← Term.getLevelNames).reverse -- these are backwards! let target ← instantiateMVars (← goal.getType) let (quotedCtx, assignments, goalInfo?) ← liftMetaM <| StateT.run' (s := { mayPostpone := false }) do let (quotedCtx, assignments) ← Impl.quoteLCtx lctx levelNames match gi with | none => return (quotedCtx, assignments, none) | some goalName => let quotedTarget : Q(Expr) ← Qq.Impl.quoteExpr target let goalFid ← mkFreshFVarId let quotedCtx := quotedCtx.mkLocalDecl goalFid goalName.getId q(Quoted $quotedTarget) .default let assignments := assignments.push (toExpr (Expr.mvar goal)) return (quotedCtx, assignments, some (goalName, goalFid)) let codeExpr : Expr ← withLCtx quotedCtx #[] do if let .some (goalName, goalFid) := goalInfo? then discard <| Term.addTermInfo' (isBinder := true) goalName (Expr.fvar goalFid) let body ← Term.elabTermEnsuringType (← `(discard do $seq)) q(TacticM Unit) Term.synthesizeSyntheticMVarsNoPostponing let body ← instantiateMVars body if (← Term.logUnassignedUsingErrorInfos (← getMVars body)) then throwAbortTerm mkLetFVarsFromValues assignments body let code ← unsafe evalExpr (TacticM Unit) q(TacticM Unit) codeExpr code end Qq
.lake/packages/Qq/Qq/ForLean/ToExpr.lean
module public import Lean public section open Lean instance : ToExpr MVarId where toTypeExpr := .const ``MVarId [] toExpr i := mkApp (.const ``MVarId.mk []) (toExpr i.name) instance : ToExpr LevelMVarId where toTypeExpr := .const ``LevelMVarId [] toExpr i := mkApp (.const ``LevelMVarId.mk []) (toExpr i.name) open Level in def toExprLevel : Level → Expr | zero => .const ``zero [] | succ l => mkApp (.const ``succ []) (toExprLevel l) | .max l₁ l₂ => mkApp2 (.const ``Level.max []) (toExprLevel l₁) (toExprLevel l₂) | imax l₁ l₂ => mkApp2 (.const ``imax []) (toExprLevel l₁) (toExprLevel l₂) | param n => mkApp (.const ``param []) (toExpr n) | mvar n => mkApp (.const ``mvar []) (toExpr n) instance : ToExpr Level := ⟨toExprLevel, .const ``Level []⟩ instance : ToExpr BinderInfo where toTypeExpr := .const ``BinderInfo [] toExpr bi := match bi with | .default => .const ``BinderInfo.default [] | .implicit => .const ``BinderInfo.implicit [] | .strictImplicit => .const ``BinderInfo.strictImplicit [] | .instImplicit => .const ``BinderInfo.instImplicit [] instance : ToExpr MData where toTypeExpr := .const ``MData [] toExpr md := Id.run do let mut e := .const ``MData.empty [] for (k, v) in md do let k := toExpr k e := open DataValue in match v with | ofString v => mkApp3 (.const ``KVMap.setString []) e k (toExpr v) | ofBool v => mkApp3 (.const ``KVMap.setBool []) e k (toExpr v) | ofName v => mkApp3 (.const ``KVMap.setName []) e k (toExpr v) | ofNat v => mkApp3 (.const ``KVMap.setNat []) e k (toExpr v) | ofInt v => mkApp3 (.const ``KVMap.setInt []) e k (toExpr v) | ofSyntax _ => e -- TODO e open Expr Literal in def toExprExpr : Expr → Expr | bvar n => mkApp (.const ``bvar []) (mkNatLit n) | fvar n => mkApp (.const ``fvar []) (toExpr n) | mvar n => mkApp (.const ``mvar []) (toExpr n) | sort l => mkApp (.const ``sort []) (toExpr l) | const n ls => mkApp2 (.const ``const []) (toExpr n) (toExpr ls) | app f x => mkApp2 (.const ``app []) (toExprExpr f) (toExprExpr x) | lam x d b c => mkApp4 (.const ``lam []) (toExpr x) (toExprExpr d) (toExprExpr b) (toExpr c) | forallE x d b c => mkApp4 (.const ``forallE []) (toExpr x) (toExprExpr d) (toExprExpr b) (toExpr c) | letE x t v b c => mkApp5 (.const ``letE []) (toExpr x) (toExprExpr t) (toExprExpr v) (toExprExpr b) (toExpr c) | lit l => mkApp (.const ``lit []) (toExpr l) | mdata md e => mkApp2 (.const ``mdata []) (toExpr md) (toExprExpr e) | proj s i e => mkApp3 (.const ``proj []) (toExpr s) (mkNatLit i) (toExprExpr e) instance : ToExpr Expr := ⟨toExprExpr, .const ``Expr []⟩
.lake/packages/Qq/Qq/ForLean/Do.lean
module public import Lean public section /-! Make `Lean.Elab.Term.extractBind` public. -/ open Lean Meta namespace Lean.Elab.Term def mkIdBindFor (type : Expr) : TermElabM ExtractMonadResult := do let u ← getDecLevel type let id := Lean.mkConst ``Id [u] pure { m := id, returnType := type, expectedType := mkApp id type } partial def extractBind (expectedType? : Option Expr) : TermElabM ExtractMonadResult := do match expectedType? with | none => throwError "invalid 'do' notation, expected type is not available" | some expectedType => let extractStep? (type : Expr) : MetaM (Option ExtractMonadResult) := do match type with | .app m returnType => return some { m, returnType, expectedType } | _ => return none let rec extract? (type : Expr) : MetaM (Option ExtractMonadResult) := do match (← extractStep? type) with | some r => return r | none => let typeNew ← whnfCore type if typeNew != type then extract? typeNew else if typeNew.getAppFn.isMVar then throwError "invalid 'do' notation, expected type is not available" match (← unfoldDefinition? typeNew) with | some typeNew => extract? typeNew | none => return none match (← extract? expectedType) with | some r => return r | none => mkIdBindFor expectedType
.lake/packages/Qq/Qq/ForLean/ReduceEval.lean
module public import Lean public section open Lean Meta namespace Lean.Meta def throwFailedToEval (e : Expr) : MetaM α := throwError "reduceEval: failed to evaluate argument{indentExpr e}" partial def evalList [ReduceEval α] (e : Expr) : MetaM (List α) := do let e ← whnf e let .const c _ := e.getAppFn | throwFailedToEval e let nargs := e.getAppNumArgs match c, nargs with | ``List.nil, 1 => pure [] | ``List.cons, 3 => return (← reduceEval (e.getArg! 1)) :: (← evalList (e.getArg! 2)) | _, _ => throwFailedToEval e instance [ReduceEval α] : ReduceEval (List α) := ⟨evalList⟩ instance [NeZero n] : ReduceEval (Fin n) where reduceEval := fun e => do let e ← whnf e if e.isAppOfArity ``Fin.mk 3 then return Fin.ofNat _ (← reduceEval (e.getArg! 1)) else throwFailedToEval e instance {n : Nat} : ReduceEval (BitVec n) where reduceEval := fun e => do let e ← whnf e if e.isAppOfArity ``BitVec.ofFin 2 then have : 2^n - 1 + 1 = 2^n := Nat.sub_one_add_one_eq_of_pos (Nat.two_pow_pos n) let _ : ReduceEval (Fin (2^n)) := this ▸ (inferInstanceAs <| ReduceEval (Fin (2^n - 1 + 1))) pure ⟨(← reduceEval (e.getArg! 1))⟩ else throwFailedToEval e instance : ReduceEval UInt64 where reduceEval := fun e => do let e ← whnf e if e.isAppOfArity ``UInt64.ofBitVec 1 then pure ⟨(← reduceEval (e.getArg! 0))⟩ else throwFailedToEval e instance : ReduceEval USize where reduceEval := fun e => do let e ← whnf e if e.isAppOfArity ``USize.ofBitVec 1 then let a ← whnf (e.getArg! 0) if a.isAppOfArity ``Fin.mk 3 then return USize.ofNat (← reduceEval (a.getArg! 1)) throwFailedToEval e instance : ReduceEval Bool where reduceEval := fun e => do let e ← whnf e if e.isAppOf ``true then pure true else if e.isAppOf ``false then pure false else throwFailedToEval e instance : ReduceEval BinderInfo where reduceEval := fun e => do match (← whnf e).constName? with | some ``BinderInfo.default => pure .default | some ``BinderInfo.implicit => pure .implicit | some ``BinderInfo.strictImplicit => pure .strictImplicit | some ``BinderInfo.instImplicit => pure .instImplicit | _ => throwFailedToEval e instance : ReduceEval Literal where reduceEval := fun e => do let e ← whnf e if e.isAppOfArity ``Literal.natVal 1 then return .natVal (← reduceEval (e.getArg! 0)) else if e.isAppOfArity ``Literal.strVal 1 then return .strVal (← reduceEval (e.getArg! 0)) else throwFailedToEval e instance : ReduceEval MVarId where reduceEval e := do let e ← whnf e if e.isAppOfArity ``MVarId.mk 1 then return ⟨← reduceEval (e.getArg! 0)⟩ else throwFailedToEval e instance : ReduceEval LevelMVarId where reduceEval e := do let e ← whnf e if e.isAppOfArity ``LevelMVarId.mk 1 then return ⟨← reduceEval (e.getArg! 0)⟩ else throwFailedToEval e instance : ReduceEval FVarId where reduceEval e := do let e ← whnf e if e.isAppOfArity ``FVarId.mk 1 then return ⟨← reduceEval (e.getArg! 0)⟩ else throwFailedToEval e
theorem_proving_in_lean4/README.md
Theorem Proving in Lean 4 ----------------------- This repository contains the source code of the book _Theorem Proving in Lean 4_ by Jeremy Avigad, Leonardo de Moura, Soonho Kong, and Sebastian Ullrich, with contributions from the Lean Community. To build the book, change to the `book` directory and run `lake exe tpil`. After this, `book/_out/html-multi` contains a multi-page Web version of the book.
theorem_proving_in_lean4/examples/Examples.lean
-- This module serves as the root of the `Examples` library. -- Import modules here that should be built as part of the library. import Examples.Basic example (g : Nat → Nat → Nat) (h₁ : ∀ x, x ≠ 0 → g x x = 1) (h₂ : x ≠ 0) : g x x + x = 1 + x := by conv => lhs -- ⊢ g x x + x arg 1 -- ⊢ g x x rw [h₁] -- 2 goals: ⊢ 1, ⊢ x ≠ 0 . skip . tactic => exact h₂
theorem_proving_in_lean4/examples/Examples/Basic.lean
def hello := "world"
theorem_proving_in_lean4/book/TPiL.lean
import VersoManual import TPiL.Intro import TPiL.DependentTypeTheory import TPiL.PropositionsAndProofs import TPiL.QuantifiersEquality import TPiL.Tactics import TPiL.InteractingWithLean import TPiL.InductiveTypes import TPiL.InductionAndRecursion import TPiL.StructuresAndRecords import TPiL.TypeClasses import TPiL.Conv import TPiL.AxiomsComputation open Verso.Genre Manual open Verso Code External open Verso Doc Elab in open Lean (quote) in @[role_expander versionString] def versionString : RoleExpander | #[], #[] => do let version ← IO.FS.readFile "../examples/lean-toolchain" let version := version.dropPrefix "leanprover/lean4:" |>.dropPrefix "v" |>.trimAscii |>.copy pure #[← ``(Verso.Doc.Inline.code $(quote version))] | _, _ => throwError "Unexpected arguments" #doc (Manual) "Theorem Proving in Lean 4" => %%% authors := ["Jeremy Avigad", "Leonardo de Moura", "Soonho Kong", "Sebastian Ullrich"] authorshipNote := some "with contributions from the Lean Community" %%% This version of the text assumes you’re using Lean 4 (specifically {versionString}[]). See the [Quickstart section](https://lean-lang.org/documentation/setup/) of the Lean documentation to install Lean. The first version of this book was written for Lean 2, and the Lean 3 version is available [here](https://leanprover.github.io/theorem_proving_in_lean/). {include 1 TPiL.Intro} {include 1 TPiL.DependentTypeTheory} {include 1 TPiL.PropositionsAndProofs} {include 1 TPiL.QuantifiersEquality} {include 1 TPiL.Tactics} {include 1 TPiL.InteractingWithLean} {include 1 TPiL.InductiveTypes} {include 1 TPiL.InductionAndRecursion} {include 1 TPiL.StructuresAndRecords} {include 1 TPiL.TypeClasses} {include 1 TPiL.Conv} {include 1 TPiL.AxiomsComputation}
theorem_proving_in_lean4/book/Main.lean
import VersoManual import TPiL open Verso.Genre Manual open Verso Code External open Verso.Output.Html in def plausible := {{ <script defer="defer" data-domain="lean-lang.org" src="https://plausible.io/js/script.outbound-links.js"></script> }} def config : Config where emitTeX := false emitHtmlSingle := false emitHtmlMulti := true htmlDepth := 1 extraFiles := [("static", "static")] extraCss := Std.HashSet.ofList [ "/static/theme.css", "/static/fonts/source-serif/source-serif-text.css", "/static/fonts/source-code-pro/source-code-pro.css", "/static/fonts/source-sans/source-sans-3.css", "/static/fonts/noto-sans-mono/noto-sans-mono.css" ] extraHead := #[plausible] logo := some "/static/lean_logo.svg" sourceLink := some "https://github.com/leanprover/theorem_proving_in_lean4" issueLink := some "https://github.com/leanprover/theorem_proving_in_lean4/issues" linkTargets := fun st => st.localTargets ++ st.remoteTargets def main := manualMain (%doc TPiL) (config := config)
theorem_proving_in_lean4/book/TPiL/Intro.lean
import VersoManual import TPiL.Examples open TPiL open Verso.Genre Manual open Verso Code External #doc (Manual) "Introduction" => %%% tag := "Intro" htmlSplit := .never %%% # Computers and Theorem Proving %%% tag := "computers-and-theorem-proving" %%% _Formal verification_ involves the use of logical and computational methods to establish claims that are expressed in precise mathematical terms. These can include ordinary mathematical theorems, as well as claims that pieces of hardware or software, network protocols, and mechanical and hybrid systems meet their specifications. In practice, there is not a sharp distinction between verifying a piece of mathematics and verifying the correctness of a system: formal verification requires describing hardware and software systems in mathematical terms, at which point establishing claims as to their correctness becomes a form of theorem proving. Conversely, the proof of a mathematical theorem may require a lengthy computation, in which case verifying the truth of the theorem requires verifying that the computation does what it is supposed to do. The gold standard for supporting a mathematical claim is to provide a proof, and twentieth-century developments in logic show most if not all conventional proof methods can be reduced to a small set of axioms and rules in any of a number of foundational systems. With this reduction, there are two ways that a computer can help establish a claim: it can help find a proof in the first place, and it can help verify that a purported proof is correct. _Automated theorem proving_ focuses on the “finding” aspect. Resolution theorem provers, tableau theorem provers, fast satisfiability solvers, and so on provide means of establishing the validity of formulas in propositional and first-order logic. Other systems provide search procedures and decision procedures for specific languages and domains, such as linear or nonlinear expressions over the integers or the real numbers. Architectures like SMT ("satisfiability modulo theories”) combine domain-general search methods with domain-specific procedures. Computer algebra systems and specialized mathematical software packages provide means of carrying out mathematical computations, establishing mathematical bounds, or finding mathematical objects. A calculation can be viewed as a proof as well, and these systems, too, help establish mathematical claims. Automated reasoning systems strive for power and efficiency, often at the expense of guaranteed soundness. Such systems can have bugs, and it can be difficult to ensure that the results they deliver are correct. In contrast, _interactive theorem proving_ focuses on the “verification” aspect of theorem proving, requiring that every claim is supported by a proof in a suitable axiomatic foundation. This sets a very high standard: every rule of inference and every step of a calculation has to be justified by appealing to prior definitions and theorems, all the way down to basic axioms and rules. In fact, most such systems provide fully elaborated “proof objects” that can be communicated to other systems and checked independently. Constructing such proofs typically requires much more input and interaction from users, but it allows you to obtain deeper and more complex proofs. The _Lean Theorem Prover_ aims to bridge the gap between interactive and automated theorem proving, by situating automated tools and methods in a framework that supports user interaction and the construction of fully specified axiomatic proofs. The goal is to support both mathematical reasoning and reasoning about complex systems, and to verify claims in both domains. Lean's underlying logic has a computational interpretation, and Lean can be viewed equally well as a programming language. More to the point, it can be viewed as a system for writing programs with a precise semantics, as well as reasoning about the functions that the programs compute. Lean also has mechanisms to serve as its own _metaprogramming language_, which means that you can implement automation and extend the functionality of Lean using Lean itself. These aspects of Lean are described in the free online book, [Functional Programming in Lean](https://lean-lang.org/functional_programming_in_lean/), though computational aspects of the system will make an appearance here. # About Lean %%% tag := "about-lean" %%% The _Lean_ project was launched by Leonardo de Moura at Microsoft Research Redmond in 2013. It is an ongoing, long-term effort, and much of the potential for automation will be realized only gradually over time. Lean is released under the [Apache 2.0 license](https://github.com/leanprover/lean4/blob/master/LICENSE), a permissive open source license that permits others to use and extend the code and mathematical libraries freely. To install Lean in your computer consider using the [Quickstart](https://lean-lang.org/install/) instructions. The Lean source code, and instructions for building Lean, are available at [https://github.com/leanprover/lean4/](https://github.com/leanprover/lean4/). This tutorial describes the current version of Lean, known as Lean 4. # About this Book %%% tag := "about-this-book" %%% This book is designed to teach you to develop and verify proofs in Lean. Much of the background information you will need in order to do this is not specific to Lean at all. To start with, you will learn the logical system that Lean is based on, a version of _dependent type theory_ that is powerful enough to prove almost any conventional mathematical theorem, and expressive enough to do it in a natural way. More specifically, Lean is based on a version of a system known as the Calculus of Constructions with inductive types. Lean can not only define mathematical objects and express mathematical assertions in dependent type theory, but it also can be used as a language for writing proofs. Because fully detailed axiomatic proofs are so complicated, the challenge of theorem proving is to have the computer fill in as many of the details as possible. You will learn various methods to support this in {ref "dependent-type-theory"}[dependent type theory]. For example, term rewriting, and Lean's automated methods for simplifying terms and expressions automatically. Similarly, methods of _elaboration_ and _type inference_, which can be used to support flexible forms of algebraic reasoning. Finally, you will learn about features that are specific to Lean, including the language you use to communicate with the system, and the mechanisms Lean offers for managing complex theories and data. Throughout the text you will find examples of Lean code like the one below: ```lean theorem and_commutative (p q : Prop) : p ∧ q → q ∧ p := fun hpq : p ∧ q => have hp : p := And.left hpq have hq : q := And.right hpq show q ∧ p from And.intro hq hp ``` Next to every code example in this book, you will see a button that reads “Copy to clipboard”. Pressing the button copies the example with enough surrounding context to make the code compile correctly. You can paste the example code into [VS Code](https://code.visualstudio.com/) and modify the examples, and Lean will check the results and provide feedback continuously as you type. We recommend running the examples and experimenting with the code on your own as you work through the chapters that follow. You can open this book in VS Code by using the command “Lean 4: Docs: Show Documentation Resources” and selecting “Theorem Proving in Lean 4” in the tab that opens. # Acknowledgments %%% tag := "acknowledgments" %%% This tutorial is an open access project maintained on Github. Many people have contributed to the effort, providing corrections, suggestions, examples, and text. We are grateful to Ulrik Buchholz, Kevin Buzzard, Mario Carneiro, Nathan Carter, Eduardo Cavazos, Amine Chaieb, Joe Corneli, William DeMeo, Marcus Klaas de Vries, Ben Dyer, Gabriel Ebner, Anthony Hart, Simon Hudon, Sean Leather, Assia Mahboubi, Gihan Marasingha, Patrick Massot, Christopher John Mazey, Sebastian Ullrich, Floris van Doorn, Daniel Velleman, Théo Zimmerman, Paul Chisholm, Chris Lovett, and Siddhartha Gadgil for their contributions. Please see [lean prover](https://github.com/leanprover/) and [lean community](https://github.com/leanprover-community/) for an up to date list of our amazing contributors.
theorem_proving_in_lean4/book/TPiL/PropositionsAndProofs.lean
import VersoManual import TPiL.Examples open Verso.Genre Manual open TPiL set_option pp.rawOnError true #doc (Manual) "Propositions and Proofs" => %%% tag := "propositions-and-proofs" htmlSplit := .never %%% By now, you have seen some ways of defining objects and functions in Lean. In this chapter, we will begin to explain how to write mathematical assertions and proofs in the language of dependent type theory as well. # Propositions as Types %%% tag := "propositions-as-types" %%% One strategy for proving assertions about objects defined in the language of dependent type theory is to layer an assertion language and a proof language on top of the definition language. But there is no reason to multiply languages in this way: dependent type theory is flexible and expressive, and there is no reason we cannot represent assertions and proofs in the same general framework. For example, we could introduce a new type, {lean}`Prop`, to represent propositions, and introduce constructors to build new propositions from others. ```lean def Implies (p q : Prop) : Prop := p → q ------ #check And -- And (a b : Prop) : Prop #check Or -- Or (a b : Prop) : Prop #check Not -- Not (a : Prop) : Prop #check Implies -- Implies (p q : Prop) : Prop variable (p q r : Prop) #check And p q -- p ∧ q : Prop #check Or (And p q) r -- p ∧ q ∨ r : Prop #check Implies (And p q) (And q p) -- Implies (p ∧ q) (q ∧ p) : Prop ``` ```setup variable (p : Prop) structure Proof (p : Prop) : Type where proof : p variable (t : p) (q r : Prop) def Implies (p q : Prop) : Prop := p → q universe u variable (t1 t2 : p) {α : Type u} {β : Type v} ``` We could then introduce, for each element {lean}`p : Prop`, another type {lean}`Proof p`, for the type of proofs of {lean}`p`. An “axiom” would be a constant of such a type. ```lean def Implies (p q : Prop) : Prop := p → q structure Proof (p : Prop) : Type where proof : p ------ #check Proof -- Proof (p : Prop) : Type axiom and_commut (p q : Prop) : Proof (Implies (And p q) (And q p)) variable (p q : Prop) #check and_commut p q -- and_commut p q : Proof (Implies (p ∧ q) (q ∧ p)) ``` In addition to axioms, however, we would also need rules to build new proofs from old ones. For example, in many proof systems for propositional logic, we have the rule of _modus ponens_: > From a proof of {lean}`Implies p q` and a proof of {lean}`p`, we obtain a proof of {lean}`q`. We could represent this as follows: ```lean def Implies (p q : Prop) : Prop := p → q structure Proof (p : Prop) : Type where proof : p ------ axiom modus_ponens (p q : Prop) : Proof (Implies p q) → Proof p → Proof q ``` Systems of natural deduction for propositional logic also typically rely on the following rule: > Suppose that, assuming {lean}`p` as a hypothesis, we have a proof of {lean}`q`. Then we can “cancel” the hypothesis and obtain a proof of {lean}`Implies p q`. We could render this as follows: ```lean def Implies (p q : Prop) : Prop := p → q structure Proof (p : Prop) : Type where proof : p ------ axiom implies_intro (p q : Prop) : (Proof p → Proof q) → Proof (Implies p q) ``` This approach would provide us with a reasonable way of building assertions and proofs. Determining that an expression {lean}`t` is a correct proof of assertion {lean}`p` would then simply be a matter of checking that {lean}`t` has type {lean}`Proof p`. Some simplifications are possible, however. To start with, we can avoid writing the term {lean}`Proof` repeatedly by conflating {lean}`Proof p` with {lean}`p` itself. In other words, whenever we have {lean}`p : Prop`, we can interpret {lean}`p` as a type, namely, the type of its proofs. We can then read {lean}`t : p` as the assertion that {lean}`t` is a proof of {lean}`p`. Moreover, once we make this identification, the rules for implication show that we can pass back and forth between {lean}`Implies p q` and {lean}`p → q`. In other words, implication between propositions {lean}`p` and {lean}`q` corresponds to having a function that takes any element of {lean}`p` to an element of {lean}`q`. As a result, the introduction of the connective {lean}`Implies` is entirely redundant: we can use the usual function space constructor {lean}`p → q` from dependent type theory as our notion of implication. This is the approach followed in the Calculus of Constructions, and hence in Lean as well. The fact that the rules for implication in a proof system for natural deduction correspond exactly to the rules governing abstraction and application for functions is an instance of the {deftech}_Curry-Howard isomorphism_, sometimes known as the {deftech}_propositions-as-types_ paradigm. In fact, the type {lean}`Prop` is syntactic sugar for {lean}`Sort 0`, the very bottom of the type hierarchy described in the last chapter. Moreover, {lean}`Type u` is also just syntactic sugar for {lean}`Sort (u+1)`. {lean}`Prop` has some special features, but like the other type universes, it is closed under the arrow constructor: if we have {lean}`p q : Prop`, then {lean}`p → q : Prop`. There are at least two ways of thinking about propositions as types. To some who take a constructive view of logic and mathematics, this is a faithful rendering of what it means to be a proposition: a proposition {lean}`p` represents a sort of data type, namely, a specification of the type of data that constitutes a proof. A proof of {lean}`p` is then simply an object {lean}`t : p` of the right type. Those not inclined to this ideology can view it, rather, as a simple coding trick. To each proposition {lean}`p` we associate a type that is empty if {lean}`p` is false and has a single element, say {lit}`*`, if {lean}`p` is true. In the latter case, let us say that (the type associated with) {lean}`p` is _inhabited_. It just so happens that the rules for function application and abstraction can conveniently help us keep track of which elements of {lean}`Prop` are inhabited. So constructing an element {lean}`t : p` tells us that {lean}`p` is indeed true. You can think of the inhabitant of {lean}`p` as being the “fact that {lean}`p` is true.” A proof of {lean}`p → q` uses “the fact that {lean}`p` is true” to obtain “the fact that {lean}`q` is true.” Indeed, if {lean}`p : Prop` is any proposition, Lean's kernel treats any two elements {lean}`t1 t2 : p` as being definitionally equal, much the same way as it treats {lit}`(fun x => t) s` and {lit}`t[s/x]` as definitionally equal. This is known as {deftech}_proof irrelevance_, and is consistent with the interpretation in the last paragraph. It means that even though we can treat proofs {lean}`t : p` as ordinary objects in the language of dependent type theory, they carry no information beyond the fact that {lean}`p` is true. The two ways we have suggested thinking about the {tech}[propositions-as-types] paradigm differ in a fundamental way. From the constructive point of view, proofs are abstract mathematical objects that are _denoted_ by suitable expressions in dependent type theory. In contrast, if we think in terms of the coding trick described above, then the expressions themselves do not denote anything interesting. Rather, it is the fact that we can write them down and check that they are well-typed that ensures that the proposition in question is true. In other words, the expressions _themselves_ are the proofs. In the exposition below, we will slip back and forth between these two ways of talking, at times saying that an expression “constructs” or “produces” or “returns” a proof of a proposition, and at other times simply saying that it “is” such a proof. This is similar to the way that computer scientists occasionally blur the distinction between syntax and semantics by saying, at times, that a program “computes” a certain function, and at other times speaking as though the program “is” the function in question. In any case, all that really matters is the bottom line. To formally express a mathematical assertion in the language of dependent type theory, we need to exhibit a term {lean}`p : Prop`. To _prove_ that assertion, we need to exhibit a term {lean}`t : p`. Lean's task, as a proof assistant, is to help us to construct such a term, {lean}`t`, and to verify that it is well-formed and has the correct type. # Working with Propositions as Types %%% tag := "working-with-propositions-as-types" %%% In the {tech}[propositions-as-types] paradigm, theorems involving only {lit}`→` can be proved using lambda abstraction and application. In Lean, the {kw}`theorem` command introduces a new theorem: ```lean set_option linter.unusedVariables false --- variable {p : Prop} variable {q : Prop} theorem t1 : p → q → p := fun hp : p => fun hq : q => hp ``` Compare this proof to the expression {lean}`fun x : α => fun y : β => x` of type {lean}`α → β → α`, where {lean}`α` and {lean}`β` are data types. This describes the function that takes arguments {leanRef}`x` and {leanRef}`y` of type {lean}`α` and {lean}`β`, respectively, and returns {leanRef}`x`. The proof of {lean}`t1` has the same form, the only difference being that {lean}`p` and {lean}`q` are elements of {lean}`Prop` rather than {lean}`Type`. Intuitively, our proof of {lean}`p → q → p` assumes {lean}`p` and {lean}`q` are true, and uses the first hypothesis (trivially) to establish that the conclusion, {lean}`p`, is true. Note that the {kw}`theorem` command is really a version of the {kw}`def` command: under the propositions and types correspondence, proving the theorem {lean}`p → q → p` is really the same as defining an element of the associated type. To the kernel type checker, there is no difference between the two. There are a few pragmatic differences between definitions and theorems, however. In normal circumstances, it is never necessary to unfold the “definition” of a theorem; by {tech}[proof irrelevance], any two proofs of that theorem are definitionally equal. Once the proof of a theorem is complete, typically we only need to know that the proof exists; it doesn't matter what the proof is. In light of that fact, Lean tags proofs as _irreducible_, which serves as a hint to the parser (more precisely, the _elaborator_) that there is generally no need to unfold them when processing a file. In fact, Lean is generally able to process and check proofs in parallel, since assessing the correctness of one proof does not require knowing the details of another. Additionally, {ref "variables-and-sections"}[section variables] that are referred to in the body of a definition are automatically added as parameters, but only the variables referred to in a theorem's type are added. This is because the way in which a statement is proved should not influence the statement that is being proved. As with definitions, the {kw}`#print` command will show you the proof of a theorem: ```lean set_option linter.unusedVariables false variable {p : Prop} variable {q : Prop} ------ theorem t1 : p → q → p := fun hp : p => fun hq : q => hp #print t1 -- theorem t1 : ∀ {p q : Prop}, p → q → p := fun {p q} hp hq => hp ``` Notice that the lambda abstractions {leanRef}`hp : p` and {leanRef}`hq : q` can be viewed as temporary assumptions in the proof of {lean}`t1`. Lean also allows us to specify the type of the final term {leanRef}`hp`, explicitly, with a {kw}`show` statement: ```lean set_option linter.unusedVariables false variable {p : Prop} variable {q : Prop} ------ theorem t1 : p → q → p := fun hp : p => fun hq : q => show p from hp ``` Adding such extra information can improve the clarity of a proof and help detect errors when writing a proof. The {kw}`show` command does nothing more than annotate the type, and, internally, all the presentations of {leanRef}`t1` that we have seen produce the same term. As with ordinary definitions, we can move the lambda-abstracted variables to the left of the colon: ```lean set_option linter.unusedVariables false variable {p : Prop} variable {q : Prop} ------ theorem t1 (hp : p) (hq : q) : p := hp #print t1 -- theorem t1 : ∀ {p q : Prop}, p → q → p := fun {p q} hp hq => hp ``` We can use the theorem {leanRef}`t1` just as a function application: ```lean set_option linter.unusedVariables false variable {p : Prop} variable {q : Prop} ------ theorem t1 (hp : p) (hq : q) : p := hp axiom hp : p theorem t2 : q → p := t1 hp ``` The {kw}`axiom` declaration postulates the existence of an element of the given type and may compromise logical consistency. For example, we can use it to postulate that the empty type {lean}`False` has an element: ```lean axiom unsound : False -- Everything follows from false theorem ex : 1 = 0 := False.elim unsound ``` :::setup ``` variable {p q : Prop} (hp : p) {t1 : p → q → p} ``` Declaring an “axiom” {lean}`hp : p` is tantamount to declaring that {lean}`p` is true, as witnessed by {lean}`hp`. Applying the theorem {lean}`t1 : p → q → p` to the fact {lean}`hp : p` that {lean}`p` is true yields the theorem {lean}`t1 hp : q → p`. ::: Recall that we can also write theorem {leanRef}`t1` as follows: ```lean set_option linter.unusedVariables false ------ theorem t1 {p q : Prop} (hp : p) (hq : q) : p := hp #print t1 ``` The type of {leanRef}`t1` is now {lean}`∀ {p q : Prop}, p → q → p`. We can read this as the assertion “for every pair of propositions {lean}`p`{lit}` `{lean}`q`, we have {lean}`p → q → p`.” For example, we can move all parameters to the right of the colon: ```lean set_option linter.unusedVariables false ------ theorem t1 : ∀ {p q : Prop}, p → q → p := fun {p q : Prop} (hp : p) (hq : q) => hp ``` If {lean}`p` and {lean}`q` have been declared as {ref "variables-and-sections"}[variables], Lean will generalize them for us automatically: ```lean variable {p q : Prop} theorem t1 : p → q → p := fun (hp : p) (hq : q) => hp ``` When we generalize {leanRef}`t1` in such a way, we can then apply it to different pairs of propositions, to obtain different instances of the general theorem. ```lean set_option linter.unusedVariables false ------ theorem t1 (p q : Prop) (hp : p) (hq : q) : p := hp variable (p q r s : Prop) #check t1 p q -- t1 p q : p → q → p #check t1 r s -- t1 r s : r → s → r #check t1 (r → s) (s → r) -- t1 (r → s) (s → r) : (r → s) → (s → r) → r → s variable (h : r → s) #check t1 (r → s) (s → r) h -- t1 (r → s) (s → r) h : (s → r) → r → s ``` Once again, using the {tech}[propositions-as-types] correspondence, the variable {leanRef}`h` of type {leanRef}`r → s` can be viewed as the hypothesis, or premise, that {leanRef}`r → s` holds. As another example, let us consider the composition function discussed in the last chapter, now with propositions instead of types. ```lean variable (p q r s : Prop) theorem t2 (h₁ : q → r) (h₂ : p → q) : p → r := fun h₃ : p => show r from h₁ (h₂ h₃) ``` As a theorem of propositional logic, what does {leanRef}`t2` say? Note that it is often useful to use numeric Unicode subscripts, entered as {kbd}`\0`, {kbd}`\1`, {kbd}`\2`, ..., for hypotheses, as we did in this example. # Propositional Logic %%% tag := "propositional-logic" %%% Lean defines all the standard logical connectives and notation. The propositional connectives come with the following notation: :::table +header * * ASCII * Unicode * Editor shortcut * Definition * * {lean}`True` * {empty}[] * {empty}[] * {lean}`True` * * {lean}`False` * {empty}[] * {empty}[] * {lean}`False` * * {lean}`Not` * {lit}`¬` * {kbd}`\not`, {kbd}`\neg` * {lean}`Not` * * {lit}`/\` * {lit}`∧` * {kbd}`\and` * {lean}`And` * * {lit}`\/` * {lit}`∨` * {kbd}`\or` * {lean}`Or` * * {lit}`->` * {lit}`→` * {kbd}`\to`, {kbd}`\r`, {kbd}`\imp` * {empty}[] * * {lit}`<->` * {lit}`↔` * {kbd}`\iff`, {kbd}`\lr` * {lean}`Iff` ::: They all take values in {lean}`Prop`. ```lean variable (p q : Prop) #check p → q → p ∧ q #check ¬p → p ↔ False #check p ∨ q → q ∨ p ``` :::setup ``` variable (p q r a b c d e : Prop) ``` The order of operations is as follows: unary negation {lit}`¬` binds most strongly, then {lit}`∧`, then {lit}`∨`, then {lit}`→`, and finally {lit}`↔`. For example, {lean}`a ∧ b → c ∨ d ∧ e` means {lean}`(a ∧ b) → (c ∨ (d ∧ e))`. Remember that {lit}`→` associates to the right (nothing changes now that the arguments are elements of {lean}`Prop`, instead of some other {lean}`Type`), as do the other binary connectives. So if we have {lean}`p q r : Prop`, the expression {lean}`p → q → r` reads “if {lean}`p`, then if {lean}`q`, then {lean}`r`.” This is just the “curried” form of {lean}`p ∧ q → r`. ::: In the last chapter we observed that lambda abstraction can be viewed as an “introduction rule” for {lit}`→`. In the current setting, it shows how to “introduce” or establish an implication. Application can be viewed as an “elimination rule,” showing how to “eliminate” or use an implication in a proof. The other propositional connectives are defined in Lean's library, and are automatically imported. Each connective comes with its canonical introduction and elimination rules. ## Conjunction %%% tag := "conjunction" %%% :::setup ``` variable (p q : Prop) (h1 : p) (h2 : q) ``` The expression {lean}`And.intro h1 h2` builds a proof of {lean}`p ∧ q` using proofs {lean}`h1 : p` and {lean}`h2 : q`. It is common to describe {lean}`And.intro` as the _and-introduction_ rule. In the next example we use {lean}`And.intro` to create a proof of {lean}`p → q → p ∧ q`. ::: ```lean variable (p q : Prop) example (hp : p) (hq : q) : p ∧ q := And.intro hp hq #check fun (hp : p) (hq : q) => And.intro hp hq ``` The {kw}`example` command states a theorem without naming it or storing it in the permanent context. Essentially, it just checks that the given term has the indicated type. It is convenient for illustration, and we will use it often. :::setup ``` variable (p q : Prop) (h : p ∧ q) ``` The expression {lean}`And.left h` creates a proof of {lean}`p` from a proof {lean}`h : p ∧ q`. Similarly, {lean}`And.right h` is a proof of {lean}`q`. They are commonly known as the left and right _and-elimination_ rules. ::: ```lean variable (p q : Prop) example (h : p ∧ q) : p := And.left h example (h : p ∧ q) : q := And.right h ``` We can now prove {lean}`p ∧ q → q ∧ p` with the following proof term. ```lean variable (p q : Prop) example (h : p ∧ q) : q ∧ p := And.intro (And.right h) (And.left h) ``` :::setup ``` variable (p q : Prop) (hp : p) (hq : q) (α β : Type) (a : α) (b : β) ``` Notice that and-introduction and and-elimination are similar to the pairing and projection operations for the Cartesian product. The difference is that given {lean}`hp : p` and {lean}`hq : q`, {lean}`And.intro hp hq` has type {lean}`p ∧ q : Prop`, while given {lean}`a : α` and {lean}`b : β`, {lean}`Prod.mk a b` has type {lean}`α × β : Type`. {lean}`Prod` cannot be used with {lean}`Prop`s, and {lean}`And` cannot be used with {lean}`Type`s. The similarity between {lit}`∧` and {lit}`×` is another instance of the {tech}[Curry-Howard isomorphism], but in contrast to implication and the function space constructor, {lit}`∧` and {lit}`×` are treated separately in Lean. With the analogy, however, the proof we have just constructed is similar to a function that swaps the elements of a pair. We will see in {ref "structures-and-records"}[Structures and Records] that certain types in Lean are _structures_, which is to say, the type is defined with a single canonical _constructor_ which builds an element of the type from a sequence of suitable arguments. For every {lean}`p q : Prop`, {lean}`p ∧ q` is an example: the canonical way to construct an element is to apply {lean}`And.intro` to suitable arguments {lean}`hp : p` and {lean}`hq : q`. Lean allows us to use _anonymous constructor_ notation {lit}`⟨arg1, arg2, ...⟩` in situations like these, when the relevant type is an inductive type and can be inferred from the context. In particular, we can often write {lean (type := "p ∧ q")}`⟨hp, hq⟩` instead of {lean}`And.intro hp hq`: ::: ```lean variable (p q : Prop) variable (hp : p) (hq : q) #check (⟨hp, hq⟩ : p ∧ q) ``` These angle brackets are obtained by typing {kbd}`\<` and {kbd}`\>`, respectively. :::setup ``` inductive Foo where | mk inductive Bar where | mk : Foo → Bar variable (e : Foo) def Foo.bar (x : Foo) : Bar := .mk x ``` Lean provides another useful syntactic gadget. Given an expression {lean}`e` of an inductive type {lean}`Foo` (possibly applied to some arguments), the notation {lean}`e.bar` is shorthand for {lean}`Foo.bar e`. This provides a convenient way of accessing functions without opening a namespace. For example, the following two expressions mean the same thing: ::: ```lean variable (xs : List Nat) #check List.length xs #check xs.length ``` :::setup ``` variable (p q : Prop) (h : p ∧ q) ``` As a result, given {lean}`h : p ∧ q`, we can write {lean}`h.left` for {lean}`And.left h` and {lean}`h.right` for {lean}`And.right h`. We can therefore rewrite the sample proof above conveniently as follows: ::: ```lean variable (p q : Prop) example (h : p ∧ q) : q ∧ p := ⟨h.right, h.left⟩ ``` There is a fine line between brevity and obfuscation, and omitting information in this way can sometimes make a proof harder to read. But for straightforward constructions like the one above, when the type of {leanRef}`h` and the goal of the construction are salient, the notation is clean and effective. It is common to iterate constructions like “And.” Lean also allows you to flatten nested constructors that associate to the right, so that these two proofs are equivalent: ```lean variable (p q : Prop) example (h : p ∧ q) : q ∧ p ∧ q := ⟨h.right, ⟨h.left, h.right⟩⟩ example (h : p ∧ q) : q ∧ p ∧ q := ⟨h.right, h.left, h.right⟩ ``` This is often useful as well. ## Disjunction %%% tag := "disjunction" %%% :::setup ``` variable (p q : Prop) (hp : p) (hq : q) ``` The expression {lean}`Or.intro_left q hp` creates a proof of {lean}`p ∨ q` from a proof {lean}`hp : p`. Similarly, {lean}`Or.intro_right p hq` creates a proof for {lean}`p ∨ q` using a proof {lean}`hq : q`. These are the left and right _or-introduction_ rules. ::: ```lean variable (p q : Prop) example (hp : p) : p ∨ q := Or.intro_left q hp example (hq : q) : p ∨ q := Or.intro_right p hq ``` :::setup ``` variable (p q r : Prop) (hpq : p ∨ q) (hpr : p → r) (hqr : q → r) ``` The _or-elimination_ rule is slightly more complicated. The idea is that we can prove {lean}`r` from {lean}`p ∨ q`, by showing that {lean}`r` follows from {lean}`p` and that {lean}`r` follows from {lean}`q`. In other words, it is a proof by cases. In the expression {lean}`Or.elim hpq hpr hqr`, {lean}`Or.elim` takes three arguments, {lean}`hpq : p ∨ q`, {lean}`hpr : p → r` and {lean}`hqr : q → r`, and produces a proof of {lean}`r`. In the following example, we use {lean}`Or.elim` to prove {lean}`p ∨ q → q ∨ p`. ::: ```lean variable (p q r : Prop) example (h : p ∨ q) : q ∨ p := Or.elim h (fun hp : p => show q ∨ p from Or.intro_right q hp) (fun hq : q => show q ∨ p from Or.intro_left p hq) ``` In most cases, the first argument of {lean}`Or.intro_right` and {lean}`Or.intro_left` can be inferred automatically by Lean. Lean therefore provides {lean}`Or.inr` and {lean}`Or.inl` which can be viewed as shorthand for {lean}`Or.intro_right _` and {lean}`Or.intro_left _`. Thus the proof term above could be written more concisely: ```lean variable (p q r : Prop) example (h : p ∨ q) : q ∨ p := Or.elim h (fun hp => Or.inr hp) (fun hq => Or.inl hq) ``` Notice that there is enough information in the full expression for Lean to infer the types of {leanRef}`hp` and {leanRef}`hq` as well. But using the type annotations in the longer version makes the proof more readable, and can help catch and debug errors. :::setup ``` variable (h : p ∨ q) ``` Because {lean}`Or` has two constructors, we cannot use anonymous constructor notation. But we can still write {lean}`h.elim` instead of {lean}`Or.elim h`: ::: ```lean variable (p q r : Prop) example (h : p ∨ q) : q ∨ p := h.elim (fun hp => Or.inr hp) (fun hq => Or.inl hq) ``` Once again, you should exercise judgment as to whether such abbreviations enhance or diminish readability. ## Negation and Falsity %%% tag := "negation-and-falsity" %%% :::setup ``` variable (p q : Prop) (hnp : ¬ p) (hp : p) ``` Negation, {lean}`¬p`, is actually defined to be {lean}`p → False`, so we obtain {lean}`¬p` by deriving a contradiction from {lean}`p`. Similarly, the expression {lean}`hnp hp` produces a proof of {lean}`False` from {lean}`hp : p` and {lean}`hnp : ¬p`. The next example uses both these rules to produce a proof of {lean}`(p → q) → ¬q → ¬p`. (The symbol {lit}`¬` is produced by typing {kbd}`\not` or {kbd}`\neg`.) ::: ```lean variable (p q : Prop) example (hpq : p → q) (hnq : ¬q) : ¬p := fun hp : p => show False from hnq (hpq hp) ``` The connective {lean}`False` has a single elimination rule, {lean}`False.elim`, which expresses the fact that anything follows from a contradiction. This rule is sometimes called _ex falso_ (short for _ex falso sequitur quodlibet_), or the _principle of explosion_. ```lean variable (p q : Prop) example (hp : p) (hnp : ¬p) : q := False.elim (hnp hp) ``` The arbitrary fact, {lean}`q`, that follows from falsity is an implicit argument in {lean}`False.elim` and is inferred automatically. This pattern, deriving an arbitrary fact from contradictory hypotheses, is quite common, and is represented by {lean}`absurd`. ```lean variable (p q : Prop) example (hp : p) (hnp : ¬p) : q := absurd hp hnp ``` Here, for example, is a proof of {lean}`¬p → q → (q → p) → r`: ```lean variable (p q r : Prop) example (hnp : ¬p) (hq : q) (hqp : q → p) : r := absurd (hqp hq) hnp ``` Incidentally, just as {lean}`False` has only an elimination rule, {lean}`True` has only an introduction rule, {lean}`True.intro : True`. In other words, {lean}`True` is simply true, and has a canonical proof, {lean}`True.intro`. ## Logical Equivalence %%% tag := "logical-equivalence" %%% :::setup ``` variable (p q : Prop) (h1 : p → q) (h2 : q → p) (h : p ↔ q) ``` The expression {lean}`Iff.intro h1 h2` produces a proof of {lean}`p ↔ q` from {lean}`h1 : p → q` and {lean}`h2 : q → p`. The expression {lean}`Iff.mp h` produces a proof of {lean}`p → q` from {lean}`h : p ↔ q`. Similarly, {lean}`Iff.mpr h` produces a proof of {lean}`q → p` from {lean}`h : p ↔ q`. Here is a proof of {lean}`p ∧ q ↔ q ∧ p`: ::: ```lean variable (p q : Prop) theorem and_swap : p ∧ q ↔ q ∧ p := Iff.intro (fun h : p ∧ q => show q ∧ p from And.intro (And.right h) (And.left h)) (fun h : q ∧ p => show p ∧ q from And.intro (And.right h) (And.left h)) #check and_swap p q -- and_swap p q : p ∧ q ↔ q ∧ p variable (h : p ∧ q) example : q ∧ p := Iff.mp (and_swap p q) h ``` We can use the anonymous constructor notation to construct a proof of {lean}`p ↔ q` from proofs of the forward and backward directions, and we can also use {lit}`.` notation with {lit}`mp` and {lit}`mpr`. The previous examples can therefore be written concisely as follows: ```lean variable (p q : Prop) theorem and_swap : p ∧ q ↔ q ∧ p := ⟨ fun h => ⟨h.right, h.left⟩, fun h => ⟨h.right, h.left⟩ ⟩ example (h : p ∧ q) : q ∧ p := (and_swap p q).mp h ``` # Introducing Auxiliary Subgoals %%% tag := "introducing-auxiliary-subgoals" %%% This is a good place to introduce another device Lean offers to help structure long proofs, namely, the {kw}`have` construct, which introduces an auxiliary subgoal in a proof. Here is a small example, adapted from the last section: ```lean variable (p q : Prop) example (h : p ∧ q) : q ∧ p := have hp : p := h.left have hq : q := h.right show q ∧ p from And.intro hq hp ``` :::setup ``` variable (p q : Prop) (s : p) (t : q) ``` Internally, the expression {lean}`have h : p := s; t` produces the term {lean}`(fun (h : p) => t) s`. In other words, {lean}`s` is a proof of {lean}`p`, {lean}`t` is a proof of the desired conclusion assuming {leanRef}`h : p`, and the two are combined by a lambda abstraction and application. This simple device is extremely useful when it comes to structuring long proofs, since we can use intermediate {kw}`have`'s as stepping stones leading to the final goal. ::: Lean also supports a structured way of reasoning backwards from a goal, which models the “suffices to show” construction in ordinary mathematics. The next example simply permutes the last two lines in the previous proof. ```lean variable (p q : Prop) example (h : p ∧ q) : q ∧ p := have hp : p := h.left suffices hq : q from And.intro hq hp show q from And.right h ``` Writing {leanRef}`suffices hq : q` leaves us with two goals. First, we have to show that it indeed suffices to show {lean}`q`, by proving the original goal of {leanRef}`q ∧ p` with the additional hypothesis {leanRef}`hq : q`. Finally, we have to show {leanRef}`q`. # Classical Logic %%% tag := "classical-logic" %%% The introduction and elimination rules we have seen so far are all constructive, which is to say, they reflect a computational understanding of the logical connectives based on the {tech}[propositions-as-types] correspondence. Ordinary classical logic adds to this the law of the excluded middle, {lean}`p ∨ ¬p`. To use this principle, you have to open the classical namespace. ```lean open Classical variable (p : Prop) #check em p ``` :::setup ``` variable (p q RH : Prop) ``` Intuitively, the constructive “Or” is very strong: asserting {lean}`p ∨ q` amounts to knowing which is the case. If {lean}`RH` represents the Riemann hypothesis, a classical mathematician is willing to assert {lean}`RH ∨ ¬RH`, even though we cannot yet assert either disjunct. ::: One consequence of the law of the excluded middle is the principle of double-negation elimination: ```lean open Classical theorem dne {p : Prop} (h : ¬¬p) : p := Or.elim (em p) (fun hp : p => hp) (fun hnp : ¬p => absurd hnp h) ``` :::setup ``` open Classical variable (p : Prop) theorem dne {p : Prop} (h : ¬¬p) : p := Or.elim (em p) (fun hp : p => hp) (fun hnp : ¬p => absurd hnp h) ``` Double-negation elimination allows one to prove any proposition, {lean}`p`, by assuming {lean}`¬p` and deriving {lean}`False`, because that amounts to proving {lean}`¬¬p`. In other words, double-negation elimination allows one to carry out a proof by contradiction, something which is not generally possible in constructive logic. As an exercise, you might try proving the converse, that is, showing that {lean}`em` can be proved from {lean}`dne`. The classical axioms also give you access to additional patterns of proof that can be justified by appeal to {lean}`em`. For example, one can carry out a proof by cases: ::: ```lean open Classical variable (p : Prop) example (h : ¬¬p) : p := byCases (fun h1 : p => h1) (fun h1 : ¬p => absurd h1 h) ``` Or you can carry out a proof by contradiction: ```lean open Classical variable (p : Prop) example (h : ¬¬p) : p := byContradiction (fun h1 : ¬p => show False from h h1) ``` If you are not used to thinking constructively, it may take some time for you to get a sense of where classical reasoning is used. It is needed in the following example because, from a constructive standpoint, knowing that {lean}`p` and {lean}`q` are not both true does not necessarily tell you which one is false: ```lean open Classical variable (p q : Prop) ------ example (h : ¬(p ∧ q)) : ¬p ∨ ¬q := Or.elim (em p) (fun hp : p => Or.inr (show ¬q from fun hq : q => h ⟨hp, hq⟩)) (fun hp : ¬p => Or.inl hp) ``` We will see later that there _are_ situations in constructive logic where principles like excluded middle and double-negation elimination are permissible, and Lean supports the use of classical reasoning in such contexts without relying on excluded middle. The full list of axioms that are used in Lean to support classical reasoning are discussed in {ref "axioms-and-computation"}[Axioms and Computation]. # Examples of Propositional Validities %%% tag := "examples-of-propositional-validities" %%% :::setup ``` variable (p q r s : Prop) ``` Lean's standard library contains proofs of many valid statements of propositional logic, all of which you are free to use in proofs of your own. The following list includes a number of common identities. Commutativity: 1. {lean}`p ∧ q ↔ q ∧ p` 2. {lean}`p ∨ q ↔ q ∨ p` Associativity: 3. {lean}`(p ∧ q) ∧ r ↔ p ∧ (q ∧ r)` 4. {lean}`(p ∨ q) ∨ r ↔ p ∨ (q ∨ r)` Distributivity: 5. {lean}`p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r)` 6. {lean}`p ∨ (q ∧ r) ↔ (p ∨ q) ∧ (p ∨ r)` Other properties: 7. {lean}`(p → (q → r)) ↔ (p ∧ q → r)` 8. {lean}`((p ∨ q) → r) ↔ (p → r) ∧ (q → r)` 9. {lean}`¬(p ∨ q) ↔ ¬p ∧ ¬q` 10. {lean}`¬p ∨ ¬q → ¬(p ∧ q)` 11. {lean}`¬(p ∧ ¬p)` 12. {lean}`p ∧ ¬q → ¬(p → q)` 13. {lean}`¬p → (p → q)` 14. {lean}`(¬p ∨ q) → (p → q)` 15. {lean}`p ∨ False ↔ p` 16. {lean}`p ∧ False ↔ False` 17. {lean}`¬(p ↔ ¬p)` 18. {lean}`(p → q) → (¬q → ¬p)` These require classical reasoning: 19. {lean}`(p → r ∨ s) → ((p → r) ∨ (p → s))` 20. {lean}`¬(p ∧ q) → ¬p ∨ ¬q` 21. {lean}`¬(p → q) → p ∧ ¬q` 22. {lean}`(p → q) → (¬p ∨ q)` 23. {lean}`(¬q → ¬p) → (p → q)` 24. {lean}`p ∨ ¬p` 25. {lean}`(((p → q) → p) → p)` The {lean}`sorry` identifier magically produces a proof of anything, or provides an object of any data type at all. Of course, it is unsound as a proof method—for example, you can use it to prove {lean}`False`—and Lean produces severe warnings when files use or import theorems which depend on it. But it is very useful for building long proofs incrementally. Start writing the proof from the top down, using {lean}`sorry` to fill in subproofs. Make sure Lean accepts the term with all the {lean}`sorry`'s; if not, there are errors that you need to correct. Then go back and replace each {lean}`sorry` with an actual proof, until no more remain. Here is another useful trick. Instead of using {lean}`sorry`, you can use an underscore {lit}`_` as a placeholder. Recall this tells Lean that the argument is implicit, and should be filled in automatically. If Lean tries to do so and fails, it returns with an error message “don't know how to synthesize placeholder,” followed by the type of the term it is expecting, and all the objects and hypotheses available in the context. In other words, for each unresolved placeholder, Lean reports the subgoal that needs to be filled at that point. You can then construct a proof by incrementally filling in these placeholders. ::: For reference, here are two sample proofs of validities taken from the list above. ```lean open Classical -- distributivity example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := Iff.intro (fun h : p ∧ (q ∨ r) => have hp : p := h.left Or.elim (h.right) (fun hq : q => show (p ∧ q) ∨ (p ∧ r) from Or.inl ⟨hp, hq⟩) (fun hr : r => show (p ∧ q) ∨ (p ∧ r) from Or.inr ⟨hp, hr⟩)) (fun h : (p ∧ q) ∨ (p ∧ r) => Or.elim h (fun hpq : p ∧ q => have hp : p := hpq.left have hq : q := hpq.right show p ∧ (q ∨ r) from ⟨hp, Or.inl hq⟩) (fun hpr : p ∧ r => have hp : p := hpr.left have hr : r := hpr.right show p ∧ (q ∨ r) from ⟨hp, Or.inr hr⟩)) -- an example that requires classical reasoning example (p q : Prop) : ¬(p ∧ ¬q) → (p → q) := fun h : ¬(p ∧ ¬q) => fun hp : p => show q from Or.elim (em q) (fun hq : q => hq) (fun hnq : ¬q => absurd (And.intro hp hnq) h) ``` # Exercises %%% tag := none %%% Prove the following identities, replacing the {lean}`sorry` placeholders with actual proofs. ```lean variable (p q r : Prop) -- commutativity of ∧ and ∨ example : p ∧ q ↔ q ∧ p := sorry example : p ∨ q ↔ q ∨ p := sorry -- associativity of ∧ and ∨ example : (p ∧ q) ∧ r ↔ p ∧ (q ∧ r) := sorry example : (p ∨ q) ∨ r ↔ p ∨ (q ∨ r) := sorry -- distributivity example : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := sorry example : p ∨ (q ∧ r) ↔ (p ∨ q) ∧ (p ∨ r) := sorry -- other properties example : (p → (q → r)) ↔ (p ∧ q → r) := sorry example : ((p ∨ q) → r) ↔ (p → r) ∧ (q → r) := sorry example : ¬(p ∨ q) ↔ ¬p ∧ ¬q := sorry example : ¬p ∨ ¬q → ¬(p ∧ q) := sorry example : ¬(p ∧ ¬p) := sorry example : p ∧ ¬q → ¬(p → q) := sorry example : ¬p → (p → q) := sorry example : (¬p ∨ q) → (p → q) := sorry example : p ∨ False ↔ p := sorry example : p ∧ False ↔ False := sorry example : (p → q) → (¬q → ¬p) := sorry ``` Prove the following identities, replacing the {lean}`sorry` placeholders with actual proofs. These require classical reasoning. ```lean open Classical variable (p q r : Prop) example : (p → q ∨ r) → ((p → q) ∨ (p → r)) := sorry example : ¬(p ∧ q) → ¬p ∨ ¬q := sorry example : ¬(p → q) → p ∧ ¬q := sorry example : (p → q) → (¬p ∨ q) := sorry example : (¬q → ¬p) → (p → q) := sorry example : p ∨ ¬p := sorry example : (((p → q) → p) → p) := sorry ``` Prove {lean}`¬(p ↔ ¬p)` without using classical logic.
theorem_proving_in_lean4/book/TPiL/QuantifiersEquality.lean
import VersoManual import TPiL.Examples open Verso.Genre open Verso.Genre.Manual hiding tactic open TPiL #doc (Manual) "Quantifiers and Equality" => %%% tag := "quantifiers-and-equality" %%% ```setup variable {α : Type u} (p : α → Prop) (x y t : α) (r : α → α → Prop) {β : α → Type v} ``` The last chapter introduced you to methods that construct proofs of statements involving the propositional connectives. In this chapter, we extend the repertoire of logical constructions to include the universal and existential quantifiers, and the equality relation. # The Universal Quantifier %%% tag := "the-universal-quantifier" %%% Notice that if {lean}`α` is any type, we can represent a unary predicate {lean}`p` on {lean}`α` as an object of type {lean}`α → Prop`. In that case, given {lean}`x : α`, {lean}`p x` denotes the assertion that {lean}`p` holds of {lean}`x`. Similarly, an object {lean}`r : α → α → Prop` denotes a binary relation on {lean}`α`: given {lean}`x y : α`, {lean}`r x y` denotes the assertion that {lean}`x` is related to {lean}`y`. The universal quantifier, {lean}`∀ x : α, p x` is supposed to denote the assertion that “for every {lean}`x : α`, {lean}`p x`” holds. As with the propositional connectives, in systems of natural deduction, “forall” is governed by an introduction and elimination rule. Informally, the introduction rule states: > Given a proof of {lean}`p x`, in a context where {lean}`x : α` is arbitrary, we obtain a proof {lean}`∀ x : α, p x`. The elimination rule states: > Given a proof {lean}`∀ x : α, p x` and any term {lean}`t : α`, we obtain a proof of {lean}`p t`. As was the case for implication, the propositions-as-types interpretation now comes into play. Remember the introduction and elimination rules for dependent arrow types: ```setup variable {α : Type u} (p : α → Prop) (x y : α) (r : α → α → Prop) {β : α → Type v} {t : {x : α} → β x} ``` > Given a term {lean}`t` of type {lean}`β x`, in a context where {lean}`x : α` is arbitrary, we have {lean}`(fun x : α => t) : (x : α) → β x`. ```setup variable {α : Type u} (p : α → Prop) (x y : α) (r : α → α → Prop) {β : α → Type v} {t : α} {s : (x : α) → β x} ``` The elimination rule states: > Given a term {lean}`s : (x : α) → β x` and any term {lean}`t : α`, we have {lean}`s t : β t`. In the case where {lean}`p x` has type {lean}`Prop`, if we replace {lean}`(x : α) → β x` with {lean}`∀ x : α, p x`, we can read these as the correct rules for building proofs involving the universal quantifier. :::setup ``` variable {α : Type u} {β : Type v} {p : {x : α} → Prop} (q : Prop) ``` The Calculus of Constructions therefore identifies dependent arrow types with forall-expressions in this way. If {lean}`p` is any expression, {lean}`∀ x : α, p` is nothing more than alternative notation for {lean}`(x : α) → p`, with the idea that the former is more natural than the latter in cases where {lean}`p` is a proposition. Typically, the expression {lean}`p` will depend on {leanRef}`x : α`. Recall that, in the case of ordinary function spaces, we could interpret {lean}`α → β` as the special case of {lean}`(x : α) → β` in which {lean}`β` does not depend on {leanRef}`x`. Similarly, we can think of an implication {lean}`p → q` between propositions as the special case of {lean}`∀ x : p, q` in which the expression {lean}`q` does not depend on {leanRef}`x`. ::: Here is an example of how the {tech}[propositions-as-types] correspondence gets put into practice. ```lean example (α : Type) (p q : α → Prop) : (∀ x : α, p x ∧ q x) → ∀ y : α, p y := fun h : ∀ x : α, p x ∧ q x => fun y : α => show p y from (h y).left ``` As a notational convention, we give the universal quantifier the widest scope possible, so parentheses are needed to limit the quantifier over {leanRef}`x` to the hypothesis in the example above. The canonical way to prove {lean}`∀ y : α, p y` is to take an arbitrary {leanRef}`y`, and prove {leanRef}`p y`. This is the introduction rule. Now, given that {leanRef}`h` has type {leanRef}`∀ x : α, p x ∧ q x`, the expression {leanRef}`h y` has type {leanRef}`p`{lit}` `{leanRef}`y`{lit}` ∧ `{leanRef}`q`{lit}` `{leanRef}`y`. This is the elimination rule. Taking the left conjunct gives the desired conclusion, {leanRef}`p y`. :::setup ``` variable {x z : α} ``` Remember that expressions which differ up to renaming of bound variables are considered to be equivalent. So, for example, we could have used the same variable, {lean}`x`, in both the hypothesis and conclusion, and instantiated it by a different variable, {lean}`z`, in the proof: ::: ```lean example (α : Type) (p q : α → Prop) : (∀ x : α, p x ∧ q x) → ∀ x : α, p x := fun h : ∀ x : α, p x ∧ q x => fun z : α => show p z from And.left (h z) ``` As another example, here is how we can express the fact that a relation, {lean}`r`, is transitive: ```lean variable (α : Type) (r : α → α → Prop) variable (trans_r : ∀ x y z, r x y → r y z → r x z) variable (a b c : α) variable (hab : r a b) (hbc : r b c) #check trans_r -- trans_r : ∀ (x y z : α), r x y → r y z → r x z #check trans_r a b c -- trans_r a b c : r a b → r b c → r a c #check trans_r a b c hab -- trans_r a b c hab : r b c → r a c #check trans_r a b c hab hbc -- trans_r a b c hab hbc : r a c ``` Think about what is going on here. When we instantiate {leanRef}`trans_r` at the values {leanRef}`a b c`, we end up with a proof of {leanRef}`r`{lit}` `{leanRef}`a b`{lit}` → `{leanRef}`r`{lit}` `{leanRef}`b c`{lit}` → `{leanRef}`r`{lit}` `{leanRef}`a`{lit}` `{leanRef}`c`. Applying this to the “hypothesis” {leanRef}`hab : r a b`, we get a proof of the implication {leanRef}`r`{lit}` `{leanRef}`b c`{lit}` → `{leanRef}`r`{lit}` `{leanRef}`a`{lit}` `{leanRef}`c`. Finally, applying it to the hypothesis {leanRef}`hbc` yields a proof of the conclusion {leanRef}`r`{lit}` `{leanRef}`a`{lit}` `{leanRef}`c`. In situations like this, it can be tedious to supply the arguments {leanRef}`a b c`, when they can be inferred from {leanRef}`hab hbc`. For that reason, it is common to make these arguments implicit: ```lean variable (α : Type) (r : α → α → Prop) variable (trans_r : ∀ {x y z}, r x y → r y z → r x z) variable (a b c : α) variable (hab : r a b) (hbc : r b c) #check trans_r #check trans_r hab #check trans_r hab hbc ``` The advantage is that we can simply write {leanRef}`trans_r hab hbc` as a proof of {leanRef}`r`{lit}` `{leanRef}`a`{lit}` `{leanRef}`c`. A disadvantage is that Lean does not have enough information to infer the types of the arguments in the expressions {leanRef}`trans_r` and {leanRef}`trans_r hab`. The output of the first {kw}`#check` command is {lit}`r ?m.1 ?m.2 → r ?m.2 ?m.3 → r ?m.1 ?m.3`, indicating that the implicit arguments are unspecified in this case. Here is an example of how we can carry out elementary reasoning with an equivalence relation: ```lean variable (α : Type) (r : α → α → Prop) variable (refl_r : ∀ x, r x x) variable (symm_r : ∀ {x y}, r x y → r y x) variable (trans_r : ∀ {x y z}, r x y → r y z → r x z) example (a b c d : α) (hab : r a b) (hcb : r c b) (hcd : r c d) : r a d := trans_r (trans_r hab (symm_r hcb)) hcd ``` To get used to using universal quantifiers, you should try some of the exercises at the end of this section. :::setup ``` universe i j variable (α : Sort i) (β : {x : α} → Sort j) {x : α} ``` It is the typing rule for dependent arrow types, and the universal quantifier in particular, that distinguishes {lean}`Prop` from other types. Suppose we have {lean}`α : Sort i` and {lean}`β : Sort j`, where the expression {lean}`β` may depend on a variable {lean}`x : α`. Then {lean}`(x : α) → β` is an element of {lean}`Sort (imax i j)`, where {lit}`imax i j` is the maximum of {lit}`i` and {lit}`j` if {lit}`j` is not {lit}`0`, and {lit}`0` otherwise. The idea is as follows. If {lit}`j` is not {lit}`0`, then {lean}`(x : α) → β` is an element of {lean}`Sort (max i j)`. In other words, the type of dependent functions from {lean}`α` to {lean}`β` “lives” in the universe whose index is the maximum of {lit}`i` and {lit}`j`. Suppose, however, that {lean}`β` is of {lean}`Sort 0`, that is, an element of {lean}`Prop`. In that case, {lean}`(x : α) → β` is an element of {lean}`Sort 0` as well, no matter which type universe {lean}`α` lives in. In other words, if {lean}`β` is a proposition depending on {lean}`α`, then {lean}`∀ x : α, β` is again a proposition. This reflects the interpretation of {lean}`Prop` as the type of propositions rather than data, and it is what makes {lean}`Prop` {deftech}_impredicative_. The term “{deftech}[predicative]” stems from foundational developments around the turn of the twentieth century, when logicians such as Poincaré and Russell blamed set-theoretic paradoxes on the “vicious circles” that arise when we define a property by quantifying over a collection that includes the very property being defined. Notice that if {lean}`α` is any type, we can form the type {lean}`α → Prop` of all predicates on {lean}`α` (the “power type of {lean}`α`”). The impredicativity of {lean}`Prop` means that we can form propositions that quantify over {lean}`α → Prop`. In particular, we can define predicates on {lean}`α` by quantifying over all predicates on {lean}`α`, which is exactly the type of circularity that was once considered problematic. ::: # Equality %%% tag := "equality" %%% Let us now turn to one of the most fundamental relations defined in Lean's library, namely, the equality relation. In the chapter on {ref "inductive-types"}[inductive types], we will explain _how_ equality is defined from the primitives of Lean's logical framework. In the meanwhile, here we explain how to use it. Of course, a fundamental property of equality is that it is an equivalence relation: ```lean #check Eq.refl -- Eq.refl.{u_1} {α : Sort u_1} (a : α) : a = a #check Eq.symm -- Eq.symm.{u} {α : Sort u} {a b : α} (h : a = b) : b = a #check Eq.trans -- Eq.trans.{u} {α : Sort u} {a b c : α} (h₁ : a = b) (h₂ : b = c) : a = c ``` We can make the output easier to read by telling Lean not to insert the implicit arguments (which are displayed here as metavariables). ```lean universe u #check @Eq.refl.{u} -- @Eq.refl : ∀ {α : Sort u} (a : α), a = a #check @Eq.symm.{u} -- @Eq.symm : ∀ {α : Sort u} {a b : α}, a = b → b = a #check @Eq.trans.{u} -- @Eq.trans : ∀ {α : Sort u} {a b c : α}, a = b → b = c → a = c ``` The inscription {lit}`.{u}` tells Lean to instantiate the constants at the universe {lit}`u`. Thus, for example, we can specialize the example from the previous section to the equality relation: ```lean variable (α : Type) (a b c d : α) variable (hab : a = b) (hcb : c = b) (hcd : c = d) example : a = d := Eq.trans (Eq.trans hab (Eq.symm hcb)) hcd ``` We can also use the projection notation: ```lean variable (α : Type) (a b c d : α) variable (hab : a = b) (hcb : c = b) (hcd : c = d) ------ example : a = d := (hab.trans hcb.symm).trans hcd ``` Reflexivity is more powerful than it looks. Recall that terms in the Calculus of Constructions have a computational interpretation, and that the logical framework treats terms with a common reduct as the same. As a result, some nontrivial identities can be proved by reflexivity: ```lean variable (α β : Type) example (f : α → β) (a : α) : (fun x => f x) a = f a := Eq.refl _ example (a : α) (b : β) : (a, b).1 = a := Eq.refl _ example : 2 + 3 = 5 := Eq.refl _ ``` This feature of the framework is so important that the library defines a notation {lean}`rfl` for {lean}`Eq.refl _`: ```lean variable (α β : Type) ------ example (f : α → β) (a : α) : (fun x => f x) a = f a := rfl example (a : α) (b : β) : (a, b).1 = a := rfl example : 2 + 3 = 5 := rfl ``` :::setup ``` variable {a b : α} {p : α → Prop} {h1 : a = b} {h2 : p a} ``` Equality is much more than an equivalence relation, however. It has the important property that every assertion respects the equivalence, in the sense that we can substitute equal expressions without changing the truth value. That is, given {lean}`h1 : a = b` and {lean}`h2 : p a`, we can construct a proof for {lean}`p b` using substitution: {lean}`Eq.subst h1 h2`. ::: ```lean example (α : Type) (a b : α) (p : α → Prop) (h1 : a = b) (h2 : p a) : p b := Eq.subst h1 h2 example (α : Type) (a b : α) (p : α → Prop) (h1 : a = b) (h2 : p a) : p b := h1 ▸ h2 ``` The triangle in the second presentation is a macro built on top of {lean}`Eq.subst` and {lean}`Eq.symm`, and you can enter it by typing {kbd}`\t`. The rule {lean}`Eq.subst` is used to define the following auxiliary rules, which carry out more explicit substitutions. They are designed to deal with applicative terms, that is, terms of form {lean}`s t`. Specifically, {lean}`congrArg` can be used to replace the argument, {lean}`congrFun` can be used to replace the term that is being applied, and {lean}`congr` can be used to replace both at once. ```lean variable (α : Type) variable (a b : α) variable (f g : α → Nat) variable (h₁ : a = b) variable (h₂ : f = g) example : f a = f b := congrArg f h₁ example : f a = g a := congrFun h₂ a example : f a = g b := congr h₂ h₁ ``` Lean's library contains a large number of common identities, such as these: ```lean variable (a b c : Nat) example : a + 0 = a := Nat.add_zero a example : 0 + a = a := Nat.zero_add a example : a * 1 = a := Nat.mul_one a example : 1 * a = a := Nat.one_mul a example : a + b = b + a := Nat.add_comm a b example : a + b + c = a + (b + c) := Nat.add_assoc a b c example : a * b = b * a := Nat.mul_comm a b example : a * b * c = a * (b * c) := Nat.mul_assoc a b c example : a * (b + c) = a * b + a * c := Nat.mul_add a b c example : a * (b + c) = a * b + a * c := Nat.left_distrib a b c example : (a + b) * c = a * c + b * c := Nat.add_mul a b c example : (a + b) * c = a * c + b * c := Nat.right_distrib a b c ``` Note that {lean}`Nat.mul_add` and {lean}`Nat.add_mul` are alternative names for {lean}`Nat.left_distrib` and {lean}`Nat.right_distrib`, respectively. The properties above are stated for the natural numbers (type {lean}`Nat`). Here is an example of a calculation in the natural numbers that uses substitution combined with associativity and distributivity. ```lean example (x y : Nat) : (x + y) * (x + y) = x * x + y * x + x * y + y * y := have h1 : (x + y) * (x + y) = (x + y) * x + (x + y) * y := Nat.mul_add (x + y) x y have h2 : (x + y) * (x + y) = x * x + y * x + (x * y + y * y) := (Nat.add_mul x y x) ▸ (Nat.add_mul x y y) ▸ h1 h2.trans (Nat.add_assoc (x * x + y * x) (x * y) (y * y)).symm ``` :::setup ``` variable {α : Type u} ``` ```lean (show := false) example {α : Type u} {x y : α} {h : x = y} {p : α → Prop} {e : p x} : p y := h ▸ e ``` Notice that the second implicit parameter to {lean}`Eq.subst`, which provides the context in which the substitution is to occur, has type {lean}`α → Prop`. Inferring this predicate therefore requires an instance of _higher-order unification_. In full generality, the problem of determining whether a higher-order unifier exists is undecidable, and Lean can at best provide imperfect and approximate solutions to the problem. As a result, {lean}`Eq.subst` doesn't always do what you want it to. The macro {leanRef}`h ▸ e` uses more effective heuristics for computing this implicit parameter, and often succeeds in situations where applying {lean}`Eq.subst` fails. ::: Because equational reasoning is so common and important, Lean provides a number of mechanisms to carry it out more effectively. The next section offers syntax that allow you to write calculational proofs in a more natural and perspicuous way. But, more importantly, equational reasoning is supported by a term rewriter, a simplifier, and other kinds of automation. The term rewriter and simplifier are described briefly in the next section, and then in greater detail in the next chapter. # Calculational Proofs %%% tag := "calculational-proofs" %%% A calculational proof is just a chain of intermediate results that are meant to be composed by basic principles such as the transitivity of equality. In Lean, a calculational proof starts with the keyword {kw}`calc`, and has the following syntax: ``` calc <expr>_0 'op_1' <expr>_1 ':=' <proof>_1 '_' 'op_2' <expr>_2 ':=' <proof>_2 ... '_' 'op_n' <expr>_n ':=' <proof>_n ``` Note that the {kw}`calc` relations all have the same indentation. Each {lit}`<proof>_i` is a proof for {lit}`<expr>_{i-1} op_i <expr>_i`. We can also use {lit}`_` in the first relation (right after {lit}`<expr>_0`) which is useful to align the sequence of relation/proof pairs: ``` calc <expr>_0 '_' 'op_1' <expr>_1 ':=' <proof>_1 '_' 'op_2' <expr>_2 ':=' <proof>_2 ... '_' 'op_n' <expr>_n ':=' <proof>_n ``` Here is an example: ```lean variable (a b c d e : Nat) theorem T (h1 : a = b) (h2 : b = c + 1) (h3 : c = d) (h4 : e = 1 + d) : a = e := calc a = b := h1 _ = c + 1 := h2 _ = d + 1 := congrArg Nat.succ h3 _ = 1 + d := Nat.add_comm d 1 _ = e := Eq.symm h4 ``` This style of writing proofs is most effective when it is used in conjunction with the {tactic}`simp` and {tactic}`rw` tactics, which are discussed in greater detail in the next chapter. For example, using {tactic}`rw` for rewrite, the proof above could be written as follows: ```lean variable (a b c d e : Nat) ------ theorem T (h1 : a = b) (h2 : b = c + 1) (h3 : c = d) (h4 : e = 1 + d) : a = e := calc a = b := by rw [h1] _ = c + 1 := by rw [h2] _ = d + 1 := by rw [h3] _ = 1 + d := by rw [Nat.add_comm] _ = e := by rw [h4] ``` Essentially, the {kw}`rw` tactic uses a given equality (which can be a hypothesis, a theorem name, or a complex term) to “rewrite” the goal. If doing so reduces the goal to an identity {lean}`t = t`, the tactic applies reflexivity to prove it. Rewrites can be applied sequentially, so that the proof above can be shortened to this: ```lean variable (a b c d e : Nat) ------ theorem T (h1 : a = b) (h2 : b = c + 1) (h3 : c = d) (h4 : e = 1 + d) : a = e := calc a = d + 1 := by rw [h1, h2, h3] _ = 1 + d := by rw [Nat.add_comm] _ = e := by rw [h4] ``` Or even this: ```lean variable (a b c d e : Nat) ------ theorem T (h1 : a = b) (h2 : b = c + 1) (h3 : c = d) (h4 : e = 1 + d) : a = e := by rw [h1, h2, h3, Nat.add_comm, h4] ``` The {kw}`simp` tactic, instead, rewrites the goal by applying the given identities repeatedly, in any order, anywhere they are applicable in a term. It also uses other rules that have been previously declared to the system, and applies commutativity wisely to avoid looping. As a result, we can also prove the theorem as follows: ```lean variable (a b c d e : Nat) ------ theorem T (h1 : a = b) (h2 : b = c + 1) (h3 : c = d) (h4 : e = 1 + d) : a = e := by simp [h1, h2, h3, Nat.add_comm, h4] ``` We will discuss variations of {kw}`rw` and {kw}`simp` in the next chapter. The {kw}`calc` command can be configured for any relation that supports some form of transitivity. It can even combine different relations. ```lean variable (a b c d : Nat) example (h1 : a = b) (h2 : b ≤ c) (h3 : c + 1 < d) : a < d := calc a = b := h1 _ < b + 1 := Nat.lt_succ_self b _ ≤ c + 1 := Nat.succ_le_succ h2 _ < d := h3 ``` You can “teach” {kw}`calc` new transitivity theorems by adding new instances of the {lean}`Trans` type class. Type classes are introduced later, but the following small example demonstrates how to extend the {kw}`calc` notation using new {lean}`Trans` instances. ```lean def divides (x y : Nat) : Prop := ∃ k, k*x = y def divides_trans (h₁ : divides x y) (h₂ : divides y z) : divides x z := let ⟨k₁, d₁⟩ := h₁ let ⟨k₂, d₂⟩ := h₂ ⟨k₁ * k₂, by rw [Nat.mul_comm k₁ k₂, Nat.mul_assoc, d₁, d₂]⟩ def divides_mul (x : Nat) (k : Nat) : divides x (k*x) := ⟨k, rfl⟩ instance : Trans divides divides divides where trans := divides_trans example (h₁ : divides x y) (h₂ : y = z) : divides x (2*z) := calc divides x y := h₁ _ = z := h₂ divides _ (2*z) := divides_mul .. infix:50 " | " => divides example (h₁ : divides x y) (h₂ : y = z) : divides x (2*z) := calc x | y := h₁ _ = z := h₂ _ | 2*z := divides_mul .. ``` The example above also makes it clear that you can use {kw}`calc` even if you do not have an infix notation for your relation. Lean already includes the standard Unicode notation for divisibility (using {lit}`∣`, which can be entered as {kbd}`\dvd` or {kbd}`\mid`), so the example above uses the ordinary vertical bar to avoid a conflict. In practice, this is not a good idea, as it risks confusion with the ASCII {lit}`|` used in the {kw}`match`{lit}` ... `{kw}`with` expression. With {kw}`calc`, we can write the proof in the last section in a more natural and perspicuous way. ```lean variable (x y : Nat) example : (x + y) * (x + y) = x * x + y * x + x * y + y * y := calc (x + y) * (x + y) = (x + y) * x + (x + y) * y := by rw [Nat.mul_add] _ = x * x + y * x + (x + y) * y := by rw [Nat.add_mul] _ = x * x + y * x + (x * y + y * y) := by rw [Nat.add_mul] _ = x * x + y * x + x * y + y * y := by rw [←Nat.add_assoc] ``` The alternative {kw}`calc` notation is worth considering here. When the first expression is taking this much space, using {lit}`_` in the first relation naturally aligns all relations: ```lean variable (x y : Nat) example : (x + y) * (x + y) = x * x + y * x + x * y + y * y := calc (x + y) * (x + y) _ = (x + y) * x + (x + y) * y := by rw [Nat.mul_add] _ = x * x + y * x + (x + y) * y := by rw [Nat.add_mul] _ = x * x + y * x + (x * y + y * y) := by rw [Nat.add_mul] _ = x * x + y * x + x * y + y * y := by rw [←Nat.add_assoc] ``` Here the left arrow before {lean}`Nat.add_assoc` tells rewrite to use the identity in the opposite direction. (You can enter it with {kbd}`\l` or use the ASCII equivalent, {lit}`<-`.) If brevity is what we are after, both {tactic}`rw` and {tactic}`simp` can do the job on their own: ```lean variable (x y : Nat) example : (x + y) * (x + y) = x * x + y * x + x * y + y * y := by rw [Nat.mul_add, Nat.add_mul, Nat.add_mul, ←Nat.add_assoc] example : (x + y) * (x + y) = x * x + y * x + x * y + y * y := by simp [Nat.mul_add, Nat.add_mul, Nat.add_assoc] ``` # The Existential Quantifier %%% tag := "the-existential-quantifier" %%% Finally, consider the existential quantifier, which can be written as either {lean}`exists x : α, p x` or {lean}`∃ x : α, p x`. Both versions are actually notationally convenient abbreviations for a more long-winded expression, {lean}`Exists (fun x : α => p x)`, defined in Lean's library. As you should by now expect, the library includes both an introduction rule and an elimination rule. The introduction rule is straightforward: to prove {lean}`∃ x : α, p x`, it suffices to provide a suitable term {lean}`t` and a proof of {lean}`p t`. Here are some examples: ```lean example : ∃ x : Nat, x > 0 := have h : 1 > 0 := Nat.zero_lt_succ 0 Exists.intro 1 h example (x : Nat) (h : x > 0) : ∃ y, y < x := Exists.intro 0 h example (x y z : Nat) (hxy : x < y) (hyz : y < z) : ∃ w, x < w ∧ w < z := Exists.intro y (And.intro hxy hyz) #check @Exists.intro -- @Exists.intro : ∀ {α : Sort u_1} {p : α → Prop} (w : α), p w → Exists p ``` :::setup ``` variable {t : α} {p : α → Prop} (h : p t) ``` We can use the anonymous constructor notation {lean (type := "Exists (fun x : α => p x)")}`⟨t, h⟩` for {lean}`Exists.intro t h`, when the type is clear from the context. ::: ```lean example : ∃ x : Nat, x > 0 := have h : 1 > 0 := Nat.zero_lt_succ 0 ⟨1, h⟩ example (x : Nat) (h : x > 0) : ∃ y, y < x := ⟨0, h⟩ example (x y z : Nat) (hxy : x < y) (hyz : y < z) : ∃ w, x < w ∧ w < z := ⟨y, hxy, hyz⟩ ``` :::setup ``` variable (p : α → Prop) (g : Nat → Nat → Nat) (hg : g 0 0 = 0) ``` Note that {lean}`Exists.intro` has implicit arguments: Lean has to infer the predicate {lean}`p : α → Prop` in the conclusion {lean}`∃ x, p x`. This is not a trivial affair. For example, if we have {lean}`hg : g 0 0 = 0` and write {lean}`Exists.intro 0 hg`, there are many possible values for the predicate {lean}`p`, corresponding to the theorems {lean}`∃ x, g x x = x`, {lean}`∃ x, g x x = 0`, {lean}`∃ x, g x 0 = x`, etc. Lean uses the context to infer which one is appropriate. This is illustrated in the following example, in which we set the option {option}`pp.explicit` to true to ask Lean's pretty-printer to show the implicit arguments. ::: ```lean variable (g : Nat → Nat → Nat) theorem gex1 (hg : g 0 0 = 0) : ∃ x, g x x = x := ⟨0, hg⟩ theorem gex2 (hg : g 0 0 = 0) : ∃ x, g x 0 = x := ⟨0, hg⟩ theorem gex3 (hg : g 0 0 = 0) : ∃ x, g 0 0 = x := ⟨0, hg⟩ theorem gex4 (hg : g 0 0 = 0) : ∃ x, g x x = 0 := ⟨0, hg⟩ set_option pp.explicit true -- display implicit arguments #print gex1 #print gex2 #print gex3 #print gex4 ``` :::setup ``` variable (q : Prop) (α : Type u) (p : α → Prop) (w : α) (x : α) ``` We can view {lean}`Exists.intro` as an information-hiding operation, since it hides the witness to the body of the assertion. The existential elimination rule, {lean}`Exists.elim`, performs the opposite operation. It allows us to prove a proposition {lean}`q` from {lean}`∃ x : α, p x`, by showing that {lean}`q` follows from {lean}`p w` for an arbitrary value {lean}`w`. Roughly speaking, since we know there is an {lean}`x` satisfying {lean}`p x`, we can give it a name, say, {lean}`w`. If {lean}`q` does not mention {lean}`w`, then showing that {lean}`q` follows from {lean}`p w` is tantamount to showing that {lean}`q` follows from the existence of any such {lean}`x`. Here is an example: ::: ```lean variable (α : Type) (p q : α → Prop) example (h : ∃ x, p x ∧ q x) : ∃ x, q x ∧ p x := Exists.elim h (fun w => fun hw : p w ∧ q w => show ∃ x, q x ∧ p x from ⟨w, hw.right, hw.left⟩) ``` :::setup ``` variable {α : Type u} (p : α → Prop) {β : α → Type} (a : α) (h : p a) (h' : β a) ``` It may be helpful to compare the exists-elimination rule to the or-elimination rule: the assertion {lean}`∃ x : α, p x` can be thought of as a big disjunction of the propositions {lean}`p a`, as {lean}`a` ranges over all the elements of {lean}`α`. Note that the anonymous constructor notation {leanRef}`⟨w, hw.right, hw.left⟩` abbreviates a nested constructor application; we could equally well have written {lit}`⟨`{leanRef}`w`{lit}`, ⟨`{leanRef}`hw.right`{lit}`, `{leanRef}`hw.left`{lit}`⟩⟩`. Notice that an existential proposition is very similar to a sigma type, as described in dependent types section. The difference is that existential propositions are _propositions_, while sigma types are _types_. Otherwise, they are very similar. Given a predicate {lean}`p : α → Prop` and a family of types {lean}`β : α → Type`, for a term {lean}`a : α` with {lean}`h : p a` and {lean}`h' : β a`, the term {lean}`Exists.intro a h` has type {lean}`(∃ x : α, p x) : Prop`, while {lean}`Sigma.mk a h'` has type {lean}`(Σ x : α, β x)`. The similarity between {lit}`∃` and {lit}`Σ` is another instance of the {tech}[Curry-Howard isomorphism]. ::: Lean provides a more convenient way to eliminate from an existential quantifier with the {kw}`match` expression: ```lean variable (α : Type) (p q : α → Prop) example (h : ∃ x, p x ∧ q x) : ∃ x, q x ∧ p x := match h with | ⟨w, hw⟩ => ⟨w, hw.right, hw.left⟩ ``` The {kw}`match` expression is part of Lean's function definition system, which provides convenient and expressive ways of defining complex functions. Once again, it is the {tech}[Curry-Howard isomorphism] that allows us to co-opt this mechanism for writing proofs as well. The {kw}`match` statement “destructs” the existential assertion into the components {leanRef}`w` and {leanRef}`hw`, which can then be used in the body of the statement to prove the proposition. We can annotate the types used in the match for greater clarity: ```lean variable (α : Type) (p q : α → Prop) ------ example (h : ∃ x, p x ∧ q x) : ∃ x, q x ∧ p x := match h with | ⟨(w : α), (hw : p w ∧ q w)⟩ => ⟨w, hw.right, hw.left⟩ ``` We can even use the match statement to decompose the conjunction at the same time: ```lean variable (α : Type) (p q : α → Prop) ------ example (h : ∃ x, p x ∧ q x) : ∃ x, q x ∧ p x := match h with | ⟨w, hpw, hqw⟩ => ⟨w, hqw, hpw⟩ ``` Lean also provides a pattern-matching {kw}`let` expression: ```lean variable (α : Type) (p q : α → Prop) ------ example (h : ∃ x, p x ∧ q x) : ∃ x, q x ∧ p x := let ⟨w, hpw, hqw⟩ := h ⟨w, hqw, hpw⟩ ``` This is essentially just alternative notation for the {kw}`match` construct above. Lean will even allow us to use an implicit {kw}`match` in the {kw}`fun` expression: ```lean variable (α : Type) (p q : α → Prop) ------ example : (∃ x, p x ∧ q x) → ∃ x, q x ∧ p x := fun ⟨w, hpw, hqw⟩ => ⟨w, hqw, hpw⟩ ``` We will see in {ref "induction-and-recursion"}[Induction and Recursion] that all these variations are instances of a more general pattern-matching construct. :::setup ``` def IsEven (a : Nat) := ∃ b, a = 2 * b variable (a : Nat) ``` In the following example, we define {lean}`IsEven a` as {lean}`∃ b, a = 2 * b`, and then we show that the sum of two even numbers is an even number. ::: ```lean def IsEven (a : Nat) := ∃ b, a = 2 * b theorem even_plus_even (h1 : IsEven a) (h2 : IsEven b) : IsEven (a + b) := Exists.elim h1 (fun w1 (hw1 : a = 2 * w1) => Exists.elim h2 (fun w2 (hw2 : b = 2 * w2) => Exists.intro (w1 + w2) (calc a + b _ = 2 * w1 + 2 * w2 := by rw [hw1, hw2] _ = 2 * (w1 + w2) := by rw [Nat.mul_add]))) ``` Using the various gadgets described in this chapter—the match statement, anonymous constructors, and the {tactic}`rewrite` tactic, we can write this proof concisely as follows: ```lean def IsEven (a : Nat) := ∃ b, a = 2 * b ------ theorem even_plus_even (h1 : IsEven a) (h2 : IsEven b) : IsEven (a + b) := match h1, h2 with | ⟨w1, hw1⟩, ⟨w2, hw2⟩ => ⟨w1 + w2, by rw [hw1, hw2, Nat.mul_add]⟩ ``` :::leanFirst Just as the constructive “or” is stronger than the classical “or,” so, too, is the constructive “exists” stronger than the classical “exists”. For example, the following implication requires classical reasoning because, from a constructive standpoint, knowing that it is not the case that every {leanRef}`x` satisfies {leanRef}`¬ p` is not the same as having a particular {leanRef}`x` that satisfies {leanRef}`p`. ```lean open Classical variable (p : α → Prop) example (h : ¬ ∀ x, ¬ p x) : ∃ x, p x := byContradiction (fun h1 : ¬ ∃ x, p x => have h2 : ∀ x, ¬ p x := fun x => fun h3 : p x => have h4 : ∃ x, p x := ⟨x, h3⟩ show False from h1 h4 show False from h h2) ``` ::: What follows are some common identities involving the existential quantifier. In the exercises below, we encourage you to prove as many as you can. We also leave it to you to determine which are nonconstructive, and hence require some form of classical reasoning. ```lean open Classical variable (α : Type) (p q : α → Prop) variable (r : Prop) example : (∃ x : α, r) → r := sorry example (a : α) : r → (∃ x : α, r) := sorry example : (∃ x, p x ∧ r) ↔ (∃ x, p x) ∧ r := sorry example : (∃ x, p x ∨ q x) ↔ (∃ x, p x) ∨ (∃ x, q x) := sorry example : (∀ x, p x) ↔ ¬ (∃ x, ¬ p x) := sorry example : (∃ x, p x) ↔ ¬ (∀ x, ¬ p x) := sorry example : (¬ ∃ x, p x) ↔ (∀ x, ¬ p x) := sorry example : (¬ ∀ x, p x) ↔ (∃ x, ¬ p x) := sorry example : (∀ x, p x → r) ↔ (∃ x, p x) → r := sorry example (a : α) : (∃ x, p x → r) ↔ (∀ x, p x) → r := sorry example (a : α) : (∃ x, r → p x) ↔ (r → ∃ x, p x) := sorry ``` Notice that the second example and the last two examples require the assumption that there is at least one element {leanRef}`a` of type {leanRef}`α`. Here are solutions to two of the more difficult ones: ```lean open Classical variable (α : Type) (p q : α → Prop) variable (a : α) variable (r : Prop) example : (∃ x, p x ∨ q x) ↔ (∃ x, p x) ∨ (∃ x, q x) := Iff.intro (fun ⟨a, (h1 : p a ∨ q a)⟩ => Or.elim h1 (fun hpa : p a => Or.inl ⟨a, hpa⟩) (fun hqa : q a => Or.inr ⟨a, hqa⟩)) (fun h : (∃ x, p x) ∨ (∃ x, q x) => Or.elim h (fun ⟨a, hpa⟩ => ⟨a, (Or.inl hpa)⟩) (fun ⟨a, hqa⟩ => ⟨a, (Or.inr hqa)⟩)) example : (∃ x, p x → r) ↔ (∀ x, p x) → r := Iff.intro (fun ⟨b, (hb : p b → r)⟩ => fun h2 : ∀ x, p x => show r from hb (h2 b)) (fun h1 : (∀ x, p x) → r => show ∃ x, p x → r from byCases (fun hap : ∀ x, p x => ⟨a, λ h' => h1 hap⟩) (fun hnap : ¬ ∀ x, p x => byContradiction (fun hnex : ¬ ∃ x, p x → r => have hap : ∀ x, p x := fun x => byContradiction (fun hnp : ¬ p x => have hex : ∃ x, p x → r := ⟨x, (fun hp => absurd hp hnp)⟩ show False from hnex hex) show False from hnap hap))) ``` # More on the Proof Language %%% tag := "more-on-the-proof-language" %%% We have seen that keywords like {kw}`fun`, {kw}`have`, and {kw}`show` make it possible to write formal proof terms that mirror the structure of informal mathematical proofs. In this section, we discuss some additional features of the proof language that are often convenient. To start with, we can use anonymous {kw}`have` expressions to introduce an auxiliary goal without having to label it. We can refer to the last expression introduced in this way using the keyword {lit}`this`: ```lean variable (f : Nat → Nat) variable (h : ∀ x : Nat, f x ≤ f (x + 1)) example : f 0 ≤ f 3 := have : f 0 ≤ f 1 := h 0 have : f 0 ≤ f 2 := Nat.le_trans this (h 1) show f 0 ≤ f 3 from Nat.le_trans this (h 2) ``` Often proofs move from one fact to the next, so this can be effective in eliminating the clutter of lots of labels. When the goal can be inferred, we can also ask Lean instead to fill in the proof by writing {kw}`by assumption`: ```lean variable (f : Nat → Nat) variable (h : ∀ x : Nat, f x ≤ f (x + 1)) ------ example : f 0 ≤ f 3 := have : f 0 ≤ f 1 := h 0 have : f 0 ≤ f 2 := Nat.le_trans (by assumption) (h 1) show f 0 ≤ f 3 from Nat.le_trans (by assumption) (h 2) ``` This tells Lean to use the {leanRef}`assumption` tactic, which, in turn, proves the goal by finding a suitable hypothesis in the local context. We will learn more about the {leanRef}`assumption` tactic in the next chapter. :::setup ``` variable {p : Prop} (prf : p) ``` We can also ask Lean to fill in the proof by writing {lean}`‹p›`, where {lean}`p` is the proposition whose proof we want Lean to find in the context. You can type these corner quotes using {kbd}`\f<` and {kbd}`\f>`, respectively. The letter “f” is for “French,” since the Unicode symbols can also be used as French quotation marks. In fact, the notation is defined in Lean as follows: ::: ```lean notation "‹" p "›" => show p by assumption ``` This approach is more robust than using {leanRef}`by assumption`, because the type of the assumption that needs to be inferred is given explicitly. It also makes proofs more readable. Here is a more elaborate example: ```lean variable (f : Nat → Nat) variable (h : ∀ x : Nat, f x ≤ f (x + 1)) example : f 0 ≥ f 1 → f 1 ≥ f 2 → f 0 = f 2 := fun _ : f 0 ≥ f 1 => fun _ : f 1 ≥ f 2 => have : f 0 ≥ f 2 := Nat.le_trans ‹f 1 ≥ f 2› ‹f 0 ≥ f 1› have : f 0 ≤ f 2 := Nat.le_trans (h 0) (h 1) show f 0 = f 2 from Nat.le_antisymm this ‹f 0 ≥ f 2› ``` Keep in mind that you can use the French quotation marks in this way to refer to _anything_ in the context, not just things that were introduced anonymously. Its use is also not limited to propositions, though using it for data is somewhat odd: ```lean example (n : Nat) : Nat := ‹Nat› ``` Later, we show how you can extend the proof language using the Lean macro system. # Exercises %%% tag := none %%% 1. Prove these equivalences: ```lean variable (α : Type) (p q : α → Prop) example : (∀ x, p x ∧ q x) ↔ (∀ x, p x) ∧ (∀ x, q x) := sorry example : (∀ x, p x → q x) → (∀ x, p x) → (∀ x, q x) := sorry example : (∀ x, p x) ∨ (∀ x, q x) → ∀ x, p x ∨ q x := sorry ``` You should also try to understand why the reverse implication is not derivable in the last example. 2. It is often possible to bring a component of a formula outside a universal quantifier, when it does not depend on the quantified variable. Try proving these (one direction of the second of these requires classical logic): ```lean variable (α : Type) (p q : α → Prop) variable (r : Prop) example : α → ((∀ x : α, r) ↔ r) := sorry example : (∀ x, p x ∨ r) ↔ (∀ x, p x) ∨ r := sorry example : (∀ x, r → p x) ↔ (r → ∀ x, p x) := sorry ``` 3. Consider the “barber paradox,” that is, the claim that in a certain town there is a (male) barber that shaves all and only the men who do not shave themselves. Prove that this is a contradiction: ```lean variable (men : Type) (barber : men) variable (shaves : men → men → Prop) example (h : ∀ x : men, shaves barber x ↔ ¬ shaves x x) : False := sorry ``` 4. ::::setup ``` variable {n : Nat} ``` :::leanFirst Remember that, without any parameters, an expression of type {lean}`Prop` is just an assertion. Fill in the definitions of {leanRef}`prime` and {leanRef}`Fermat_prime` below, and construct each of the given assertions. For example, you can say that there are infinitely many primes by asserting that for every natural number {lean}`n`, there is a prime number greater than {lean}`n`. Goldbach's weak conjecture states that every odd number greater than 5 is the sum of three primes. Look up the definition of a Fermat prime or any of the other statements, if necessary. ```lean def even (n : Nat) : Prop := sorry def prime (n : Nat) : Prop := sorry def infinitely_many_primes : Prop := sorry def Fermat_prime (n : Nat) : Prop := sorry def infinitely_many_Fermat_primes : Prop := sorry def goldbach_conjecture : Prop := sorry def Goldbach's_weak_conjecture : Prop := sorry def Fermat's_last_theorem : Prop := sorry ``` ::: :::: 5. Prove as many of the identities listed in the Existential Quantifier section as you can.
theorem_proving_in_lean4/book/TPiL/Conv.lean
import VersoManual import TPiL.Examples open Verso.Genre open Verso.Genre.Manual hiding tactic open TPiL #doc (Manual) "The Conversion Tactic Mode" => %%% tag := "conv" %%% Inside a tactic block, one can use the keyword {tactic}`conv` to enter _conversion mode_. This mode allows to travel inside assumptions and goals, even inside function abstractions and dependent arrows, to apply rewriting or simplifying steps. # Basic navigation and rewriting %%% tag := "basic-navigation-and-rewriting" %%% :::leanFirst As a first example, let us prove example {leanRef}`(a b c : Nat) : a * (b * c) = a * (c * b)` (examples in this file are somewhat artificial since other tactics could finish them immediately). The naive first attempt is to enter tactic mode and try {leanRef}`rw [Nat.mul_comm]`. But this transforms the goal into {leanRef}`b * c * a = a * (c * b)`, after commuting the very first multiplication appearing in the term. There are several ways to fix this issue, and one way is to use a more precise tool: the conversion mode. The following code block shows the current target after each line. ```lean (showProofStates := "oops conv1 conv2 conv3 conv4") #guard_msgs (drop all) in example (a b c : Nat) : a * (b * c) = a * (c * b) := by rw [Nat.mul_comm] -- ^ PROOF_STATE: oops example (a b c : Nat) : a * (b * c) = a * (c * b) := by conv => -- ^ PROOF_STATE: conv1 lhs -- ^ PROOF_STATE: conv2 congr -- ^ PROOF_STATE: conv3 rfl -- ^ PROOF_STATE: conv4 rw [Nat.mul_comm] ``` ::: The above snippet shows three navigation commands: - {leanRef}`lhs` navigates to the left-hand side of a relation (equality, in this case). There is also a {tactic}`rhs` to navigate to the right-hand side. - {leanRef}`congr` creates as many targets as there are (nondependent and explicit) arguments to the current head function (here the head function is multiplication). - {leanRef}`rfl` closes target using reflexivity. Once arrived at the relevant target, we can use {leanRef}`rw` as in normal tactic mode. :::leanFirst The second main reason to use conversion mode is to rewrite under binders. Suppose we want to prove example {leanRef}`(fun x : Nat => 0 + x) = (fun x => x)`. The naive first attempt is to enter tactic mode and try {leanRef}`rw [Nat.zero_add]`. But this fails with a frustrating ``` error: tactic 'rewrite' failed, did not find instance of the pattern in the target expression 0 + ?n ⊢ (fun x => 0 + x) = fun x => x ``` The solution is: ```lean example : (fun x : Nat => 0 + x) = (fun x => x) := by conv => lhs intro x rw [Nat.zero_add] ``` ::: where {leanRef}`intro x` is the navigation command entering inside the {kw}`fun` binder. Note that this example is somewhat artificial, one could also do: ```lean example : (fun x : Nat => 0 + x) = (fun x => x) := by funext x; rw [Nat.zero_add] ``` or just ```lean example : (fun x : Nat => 0 + x) = (fun x => x) := by simp ``` {leanRef}`conv` can also rewrite a hypothesis {lit}`h` from the local context, using {kw}`conv at`{lit}` h`. # Pattern matching %%% tag := "pattern-matching-conv" %%% Navigation using the above commands can be tedious. One can shortcut it using pattern matching as follows: ```lean example (a b c : Nat) : a * (b * c) = a * (c * b) := by conv in b * c => rw [Nat.mul_comm] ``` which is just syntax sugar for ```lean example (a b c : Nat) : a * (b * c) = a * (c * b) := by conv => pattern b * c rw [Nat.mul_comm] ``` Of course, wildcards are allowed: ```lean example (a b c : Nat) : a * (b * c) = a * (c * b) := by conv in _ * c => rw [Nat.mul_comm] ``` # Structuring conversion tactics %%% tag := "structuring-conversion-tactics" %%% Curly brackets and {lit}`.` can also be used in {leanRef}`conv` mode to structure tactics: ```lean example (a b c : Nat) : (0 + a) * (b * c) = a * (c * b) := by conv => lhs congr . rw [Nat.zero_add] . rw [Nat.mul_comm] ``` # Other tactics inside conversion mode %%% tag := "other-tactics-inside-conversion-mode" %%% - :::leanFirst {leanRef}`arg`{lit}` i` enter the {lit}`i`-th nondependent explicit argument of an application. ```lean (showProofStates := "arg2 arg3") example (a b c : Nat) : a * (b * c) = a * (c * b) := by conv => -- ^ PROOF_STATE: arg1 lhs -- ^ PROOF_STATE: arg2 arg 2 -- ^ PROOF_STATE: arg3 rw [Nat.mul_comm] ``` ::: - {tactic}`args` is an alternative name for {leanRef}`congr`. - {leanRef}`simp` applies the simplifier to the current goal. It supports the same options available in regular tactic mode. ```lean def f (x : Nat) := if x > 0 then x + 1 else x + 2 example (g : Nat → Nat) (h₁ : g x = x + 1) (h₂ : x > 0) : g x = f x := by conv => rhs simp [f, h₂] exact h₁ ``` - {kw}`enter`{lit}` [1, x, 2, y]` iterate {leanRef}`arg` and {leanRef}`intro` with the given arguments. - {tactic}`done` fail if there are unsolved goals. - {tactic}`trace_state` display the current tactic state. - {tactic}`whnf` put term in weak head normal form. - {kw}`tactic`{lit}` => <tactic sequence>` go back to regular tactic mode. This is useful for discharging goals not supported by {leanRef}`conv` mode, and applying custom congruence and extensionality lemmas. ```lean (showProofStates := "convTac1 convTac2 convTac4") example (g : Nat → Nat → Nat) (h₁ : ∀ x, x ≠ 0 → g x x = 1) (h₂ : x ≠ 0) : g x x + x = 1 + x := by conv => lhs -- ^ PROOF_STATE: convTac1 arg 1 -- ^ PROOF_STATE: convTac2 rw [h₁] . skip . tactic => -- ^ PROOF_STATE: convTac4 exact h₂ ``` - {kw}`apply`{lit}` <term>` is syntax sugar for {kw}`tactic`{lit}` => apply <term>`. ```lean example (g : Nat → Nat → Nat) (h₁ : ∀ x, x ≠ 0 → g x x = 1) (h₂ : x ≠ 0) : g x x + x = 1 + x := by conv => lhs arg 1 rw [h₁] . skip . apply h₂ ```
theorem_proving_in_lean4/book/TPiL/InductiveTypes.lean
import VersoManual import TPiL.Examples open Verso.Genre open Verso.Genre.Manual hiding tactic open TPiL #doc (Manual) "Inductive Types" => %%% tag := "inductive-types" %%% :::setup ``` variable {α : Sort u} {β : Sort v} ``` We have seen that Lean's formal foundation includes basic types, {lean}`Prop`, {lean}`Type 0`, {lean}`Type 1`, {lean}`Type 2`, ..., and allows for the formation of dependent function types, {lean}`(x : α) → β`. In the examples, we have also made use of additional types like {lean}`Bool`, {lean}`Nat`, and {lean}`Int`, and type constructors, like {lean}`List`, and product, {lit}`×`. In fact, in Lean's library, every concrete type other than the universes and every type constructor other than dependent arrows is an instance of a general family of type constructions known as _inductive types_. It is remarkable that it is possible to construct a substantial edifice of mathematics based on nothing more than the type universes, dependent arrow types, and inductive types; everything else follows from those. ::: Intuitively, an inductive type is built up from a specified list of constructors. In Lean, the syntax for specifying such a type is as follows: :::setup ``` variable {α β ω : Type} inductive Foo where | constructor₁ : α → Foo | constructor₂ : β → Foo | constructorₙ : ω → Foo ``` ``` inductive Foo where | constructor₁ : ... → Foo | constructor₂ : ... → Foo ... | constructorₙ : ... → Foo ``` The intuition is that each constructor specifies a way of building new objects of {lean}`Foo`, possibly from previously constructed values. The type {lean}`Foo` consists of nothing more than the objects that are constructed in this way. We will see below that the arguments of the constructors can include objects of type {lean}`Foo`, subject to a certain “positivity” constraint, which guarantees that elements of {lean}`Foo` are built from the bottom up. Roughly speaking, each {lit}`...` can be any arrow type constructed from {lean}`Foo` and previously defined types, in which {lean}`Foo` appears, if at all, only as the “target” of the dependent arrow type. ::: We will provide a number of examples of inductive types. We will also consider slight generalizations of the scheme above, to mutually defined inductive types, and so-called _inductive families_. As with the logical connectives, every inductive type comes with introduction rules, which show how to construct an element of the type, and elimination rules, which show how to “use” an element of the type in another construction. The analogy to the logical connectives should not come as a surprise; as we will see below, they, too, are examples of inductive type constructions. You have already seen the introduction rules for an inductive type: they are just the constructors that are specified in the definition of the type. The elimination rules provide for a principle of recursion on the type, which includes, as a special case, a principle of induction as well. In the next chapter, we will describe Lean's function definition package, which provides even more convenient ways to define functions on inductive types and carry out inductive proofs. But because the notion of an inductive type is so fundamental, we feel it is important to start with a low-level, hands-on understanding. We will start with some basic examples of inductive types, and work our way up to more elaborate and complex examples. # Enumerated Types %%% tag := "enumerated-types" %%% The simplest kind of inductive type is a type with a finite, enumerated list of elements. ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday ``` The {kw}`inductive` command creates a new type, {leanRef}`Weekday`. The constructors all live in the {lit}`Weekday` namespace. ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday ------ #check Weekday.sunday #check Weekday.monday open Weekday #check sunday #check monday ``` You can omit {leanRef}`: Weekday` when declaring the {leanRef}`Weekday` inductive type. ```lean inductive Weekday where | sunday | monday | tuesday | wednesday | thursday | friday | saturday ``` :::setup ``` inductive Weekday where | sunday | monday | tuesday | wednesday | thursday | friday | saturday ``` Think of {leanRef}`sunday`, {leanRef}`monday`, ... , {leanRef}`saturday` as being distinct elements of {leanRef}`Weekday`, with no other distinguishing properties. The elimination principle, {name}`Weekday.rec`, is defined along with the type {leanRef}`Weekday` and its constructors. It is also known as a _recursor_, and it is what makes the type “inductive”: it allows us to define a function on {leanRef}`Weekday` by assigning values corresponding to each constructor. The intuition is that an inductive type is exhaustively generated by the constructors, and has no elements beyond those they construct. ```signature Weekday.rec.{u} {motive : Weekday → Sort u} (sunday : motive Weekday.sunday) (monday : motive Weekday.monday) (tuesday : motive Weekday.tuesday) (wednesday : motive Weekday.wednesday) (thursday : motive Weekday.thursday) (friday : motive Weekday.friday) (saturday : motive Weekday.saturday) (t : Weekday) : motive t ``` ::: :::leanFirst We will use the {kw}`match` expression to define a function from {leanRef}`Weekday` to the natural numbers: ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday ------ open Weekday def numberOfDay (d : Weekday) : Nat := match d with | sunday => 1 | monday => 2 | tuesday => 3 | wednesday => 4 | thursday => 5 | friday => 6 | saturday => 7 #eval numberOfDay Weekday.sunday -- 1 #eval numberOfDay Weekday.monday -- 2 #eval numberOfDay Weekday.tuesday -- 3 ``` When using Lean's logic, the {kw}`match` expression is compiled using the _recursor_ {leanRef}`Weekday.rec` generated when you declare the inductive type. This ensures that the resulting term is well-defined in the type theory. For compiled code, {kw}`match` is compiled as in other functional programming languages. ::: ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday ------ open Weekday def numberOfDay (d : Weekday) : Nat := match d with | sunday => 1 | monday => 2 | tuesday => 3 | wednesday => 4 | thursday => 5 | friday => 6 | saturday => 7 set_option pp.all true #print numberOfDay #print numberOfDay.match_1 #print Weekday.casesOn #check @Weekday.rec ``` :::leanFirst When declaring an inductive datatype, you can use {leanRef}`deriving Repr` to instruct Lean to generate a function that converts {leanRef}`Weekday` objects into text. This function is used by the {kw}`#eval` command to display {leanRef}`Weekday` objects. If no {lean}`Repr` exists, {kw}`#eval` attempts to derive one on the spot. ```lean inductive Weekday where | sunday | monday | tuesday | wednesday | thursday | friday | saturday deriving Repr open Weekday #eval tuesday -- Weekday.tuesday ``` ::: It is often useful to group definitions and theorems related to a structure in a namespace with the same name. For example, we can put the {leanRef}`numberOfDay` function in the {lit}`Weekday` namespace. We are then allowed to use the shorter name when we open the namespace. :::leanFirst We can define functions from {leanRef}`Weekday` to {leanRef}`Weekday`: ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday deriving Repr ------ namespace Weekday def next (d : Weekday) : Weekday := match d with | sunday => monday | monday => tuesday | tuesday => wednesday | wednesday => thursday | thursday => friday | friday => saturday | saturday => sunday def previous (d : Weekday) : Weekday := match d with | sunday => saturday | monday => sunday | tuesday => monday | wednesday => tuesday | thursday => wednesday | friday => thursday | saturday => friday #eval next (next tuesday) -- Weekday.thursday #eval next (previous tuesday) -- Weekday.tuesday example : next (previous tuesday) = tuesday := rfl end Weekday ``` ::: :::leanFirst How can we prove the general theorem that {leanRef}`next (previous d) = d` for any Weekday {leanRef}`d`? You can use {kw}`match` to provide a proof of the claim for each constructor: ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday deriving Repr namespace Weekday def next (d : Weekday) : Weekday := match d with | sunday => monday | monday => tuesday | tuesday => wednesday | wednesday => thursday | thursday => friday | friday => saturday | saturday => sunday def previous (d : Weekday) : Weekday := match d with | sunday => saturday | monday => sunday | tuesday => monday | wednesday => tuesday | thursday => wednesday | friday => thursday | saturday => friday ------ theorem next_previous (d : Weekday) : next (previous d) = d := match d with | sunday => rfl | monday => rfl | tuesday => rfl | wednesday => rfl | thursday => rfl | friday => rfl | saturday => rfl ``` ::: Using a tactic proof, we can be even more concise: ```lean inductive Weekday where | sunday : Weekday | monday : Weekday | tuesday : Weekday | wednesday : Weekday | thursday : Weekday | friday : Weekday | saturday : Weekday deriving Repr namespace Weekday def next (d : Weekday) : Weekday := match d with | sunday => monday | monday => tuesday | tuesday => wednesday | wednesday => thursday | thursday => friday | friday => saturday | saturday => sunday def previous (d : Weekday) : Weekday := match d with | sunday => saturday | monday => sunday | tuesday => monday | wednesday => tuesday | thursday => wednesday | friday => thursday | saturday => friday ------ theorem next_previous (d : Weekday) : next (previous d) = d := by cases d <;> rfl ``` {ref "tactics-for-inductive-types"}[Tactics for Inductive Types] below will introduce additional tactics that are specifically designed to make use of inductive types. Notice that, under the {tech}[propositions-as-types] correspondence, we can use {kw}`match` to prove theorems as well as define functions. In other words, under the {tech}[propositions-as-types] correspondence, the proof by cases is a kind of definition by cases, where what is being “defined” is a proof instead of a piece of data. The {lean}`Bool` type in the Lean library is an instance of enumerated type. ```lean namespace Hidden ------ inductive Bool where | false : Bool | true : Bool ------ end Hidden ``` (To run these examples, we put them in a namespace called {lit}`Hidden`, so that a name like {leanRef}`Bool` does not conflict with the {lean}`Bool` in the standard library. This is necessary because these types are part of the Lean “prelude” that is automatically imported when the system is started.) As an exercise, you should think about what the introduction and elimination rules for these types do. As a further exercise, we suggest defining boolean operations {lean}`and`, {lean}`or`, {lean}`not` on the {leanRef}`Bool` type, and verifying common identities. Note that you can define a binary operation like {leanRef}`and` using {kw}`match`: ```lean namespace Hidden ------ def and (a b : Bool) : Bool := match a with | true => b | false => false ------- end Hidden ``` Similarly, most identities can be proved by introducing suitable {kw}`match`, and then using {lean}`rfl`. # Constructors with Arguments %%% tag := "constructors-with-arguments" %%% :::setup ``` variable (α : Type u) (β : Type v) (a : α) (b : β) ``` Enumerated types are a very special case of inductive types, in which the constructors take no arguments at all. In general, a “construction” can depend on data, which is then represented in the constructed argument. Consider the definitions of the product type and sum type in the library: ```lean namespace Hidden ------ inductive Prod (α : Type u) (β : Type v) | mk : α → β → Prod α β inductive Sum (α : Type u) (β : Type v) where | inl : α → Sum α β | inr : β → Sum α β ------- end Hidden ``` Consider what is going on in these examples. The product type has one constructor, {lean}`Prod.mk`, which takes two arguments. To define a function on {leanRef}`Prod α β`, we can assume the input is of the form {lean}`Prod.mk a b`, and we have to specify the output, in terms of {leanRef}`a` and {leanRef}`b`. We can use this to define the two projections for {leanRef}`Prod`. Remember that the standard library defines notation {lean}`α × β` for {leanRef}`Prod α β` and {lean}`(a, b)` for {lean}`Prod.mk a b`. ```lean namespace Hidden inductive Prod (α : Type u) (β : Type v) | mk : α → β → Prod α β ------ def fst {α : Type u} {β : Type v} (p : Prod α β) : α := match p with | Prod.mk a b => a def snd {α : Type u} {β : Type v} (p : Prod α β) : β := match p with | Prod.mk a b => b -------- end Hidden ``` The function {leanRef}`fst` takes a pair, {leanRef}`p`. The {kw}`match` interprets {leanRef}`p` as a pair, {leanRef}`Prod.mk a b`. Recall also from {ref "dependent-type-theory"}[Dependent Type Theory] that to give these definitions the greatest generality possible, we allow the types {leanRef}`α` and {leanRef}`β` to belong to any universe. ::: :::setup ``` universe u_2 u_3 u_1 variable (b : Bool) {α : Type u} {t1 t2 : α} ``` Here is another example where we use the recursor {lean (type := "{α : Type u_2} → {β : Type u_3} → {motive : α × β → Sort u_1} → (t : α × β) → ((fst : α) → (snd : β) → motive (fst, snd)) → motive t")}`Prod.casesOn` instead of {kw}`match`. ```lean def prod_example (p : Bool × Nat) : Nat := Prod.casesOn (motive := fun _ => Nat) p (fun b n => cond b (2 * n) (2 * n + 1)) #eval prod_example (true, 3) #eval prod_example (false, 3) ``` The argument {leanRef}`motive` is used to specify the type of the object you want to construct, and it is a function because it may depend on the pair. The {leanRef}`cond` function is a boolean conditional: {lean}`cond b t1 t2` returns {lean}`t1` if {lean}`b` is true, and {lean}`t2` otherwise. The function {leanRef}`prod_example` takes a pair consisting of a boolean, {leanRef}`b`, and a number, {leanRef}`n`, and returns either {leanRef}`2 * n` or {leanRef}`2 * n + 1` according to whether {leanRef}`b` is true or false. ::: :::setup ``` open Sum variable {α : Type u} {β : Type v} (a : α) (b : β) ``` In contrast, the sum type has _two_ constructors, {lean}`inl` and {lean}`inr` (for “insert left” and “insert right”), each of which takes _one_ (explicit) argument. To define a function on {lean}`Sum α β`, we have to handle two cases: either the input is of the form {lean}`inl a`, in which case we have to specify an output value in terms of {leanRef}`a`, or the input is of the form {lean}`inr b`, in which case we have to specify an output value in terms of {leanRef}`b`. ```lean def sum_example (s : Sum Nat Nat) : Nat := Sum.casesOn (motive := fun _ => Nat) s (fun n => 2 * n) (fun n => 2 * n + 1) #eval sum_example (Sum.inl 3) #eval sum_example (Sum.inr 3) ``` ::: :::setup ``` open Sum variable (n : Nat) ``` This example is similar to the previous one, but now an input to {leanRef}`sum_example` is implicitly either of the form {lean}`inl n` or {lean}`inr n`. In the first case, the function returns {lean}`2 * n`, and the second case, it returns {lean}`2 * n + 1`. ::: :::setup ``` variable {α β : Type} {a : α} {b : β} open Sum ``` Notice that the product type depends on parameters {lean}`α β : Type` which are arguments to the constructors as well as {lean}`Prod`. Lean detects when these arguments can be inferred from later arguments to a constructor or the return type, and makes them implicit in that case. In {ref "defining-the-natural-numbers"}[Defining the Natural Numbers] we will see what happens when the constructor of an inductive type takes arguments from the inductive type itself. What characterizes the examples we consider in this section is that each constructor relies only on previously specified types. Notice that a type with multiple constructors is disjunctive: an element of {lean}`Sum α β` is either of the form {lean}`inl a` _or_ of the form {lean}`inl b`. A constructor with multiple arguments introduces conjunctive information: from an element {lean}`Prod.mk a b` of {lean}`Prod α β` we can extract {leanRef}`a` _and_ {leanRef}`b`. An arbitrary inductive type can include both features, by having any number of constructors, each of which takes any number of arguments. ::: As with function definitions, Lean's inductive definition syntax will let you put named arguments to the constructors before the colon: ```lean namespace Hidden ------ inductive Prod (α : Type u) (β : Type v) where | mk (fst : α) (snd : β) : Prod α β inductive Sum (α : Type u) (β : Type v) where | inl (a : α) : Sum α β | inr (b : β) : Sum α β ------- end Hidden ``` The results of these definitions are essentially the same as the ones given earlier in this section. A type, like {leanRef}`Prod`, that has only one constructor is purely conjunctive: the constructor simply packs the list of arguments into a single piece of data, essentially a tuple where the type of subsequent arguments can depend on the type of the initial argument. We can also think of such a type as a “record” or a “structure”. In Lean, the keyword {kw}`structure` can be used to define such an inductive type as well as its projections, at the same time. ```lean namespace Hidden ------ structure Prod (α : Type u) (β : Type v) where mk :: fst : α snd : β ------- end Hidden ``` This example simultaneously introduces the inductive type, {leanRef}`Prod`, its constructor, {leanRef}`mk`, the usual eliminators ({lit}`rec` and {lit}`recOn`), as well as the projections, {leanRef}`fst` and {leanRef}`snd`, as defined above. If you do not name the constructor, Lean uses {lit}`mk` as a default. For example, the following defines a record to store a color as a triple of RGB values: ```lean structure Color where red : Nat green : Nat blue : Nat deriving Repr def yellow := Color.mk 255 255 0 #eval Color.red yellow ``` The definition of {leanRef}`yellow` forms the record with the three values shown, and the projection {leanRef}`Color.red` returns the red component. The {kw}`structure` command is especially useful for defining algebraic structures, and Lean provides substantial infrastructure to support working with them. Here, for example, is the definition of a semigroup: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc : ∀ a b c, mul (mul a b) c = mul a (mul b c) ``` We will see more examples in the chapter on {ref "structures-and-records"}[structures and records]. :::leanFirst We have already discussed the dependent product type {leanRef}`Sigma`: ```lean namespace Hidden ------ inductive Sigma {α : Type u} (β : α → Type v) where | mk : (a : α) → β a → Sigma β ------- end Hidden ``` ::: Two more examples of inductive types in the library are the following: ```lean namespace Hidden ------ inductive Option (α : Type u) where | none : Option α | some : α → Option α inductive Inhabited (α : Type u) where | mk : α → Inhabited α ------- end Hidden ``` :::setup ``` variable {α : Type u} {β : Type v} {γ : Type u'} (b : β) (f : α → Option β) (a : α) ``` In the semantics of dependent type theory, there is no built-in notion of a partial function. Every element of a function type {lean}`α → β` or a dependent function type {lean}`(a : α) → β` is assumed to have a value at every input. The {lean}`Option` type provides a way of representing partial functions. An element of {lean}`Option β` is either {lean}`none` or of the form {lean}`some b`, for some value {lean}`b : β`. Thus we can think of an element {lean}`f` of the type {lean}`α → Option β` as being a partial function from {lean}`α` to {lean}`β`: for every {lean}`a : α`, {lean}`f a` either returns {lean (type := "Option β")}`none`, indicating {lean}`f a` is “undefined”, or {lean}`some b`. An element of {lean}`Inhabited α` is simply a witness to the fact that there is an element of {lean}`α`. Later, we will see that {lean}`Inhabited` is an example of a _type class_ in Lean: Lean can be instructed that suitable base types are inhabited, and can automatically infer that other constructed types are inhabited on that basis. As exercises, we encourage you to develop a notion of composition for partial functions from {lean}`α` to {lean}`β` and {lean}`β` to {lean}`γ`, and show that it behaves as expected. We also encourage you to show that {lean}`Bool` and {lean}`Nat` are inhabited, that the product of two inhabited types is inhabited, and that the type of functions to an inhabited type is inhabited. ::: # Inductively Defined Propositions %%% tag := "inductively-defined-propositions" %%% Inductively defined types can live in any type universe, including the bottom-most one, {lean}`Prop`. In fact, this is exactly how the logical connectives are defined. ```lean namespace Hidden ------ inductive False : Prop inductive True : Prop where | intro : True inductive And (a b : Prop) : Prop where | intro : a → b → And a b inductive Or (a b : Prop) : Prop where | inl : a → Or a b | inr : b → Or a b ------- end Hidden ``` :::setup ``` variable (p : Prop) (hp : p) (α : Type u) (β : Type v) ``` You should think about how these give rise to the introduction and elimination rules that you have already seen. There are rules that govern what the eliminator of an inductive type can eliminate _to_, that is, what kinds of types can be the target of a recursor. Roughly speaking, what characterizes inductive types in {lean}`Prop` is that one can only eliminate to other types in {lean}`Prop`. This is consistent with the understanding that if {lean}`p : Prop`, an element {lean}`hp : p` carries no data. There is a small exception to this rule, however, which we will discuss below, in {ref "inductive-families"}[Inductive Families]. Even the existential quantifier is inductively defined: ```lean namespace Hidden ------ inductive Exists {α : Sort u} (p : α → Prop) : Prop where | intro (w : α) (h : p w) : Exists p ------- end Hidden ``` Keep in mind that the notation {lean}`∃ x : α, p` is syntactic sugar for {lean}`Exists (fun x : α => p)`. The definitions of {lean}`False`, {lean}`True`, {lean}`And`, and {lean}`Or` are perfectly analogous to the definitions of {lean}`Empty`, {lean}`Unit`, {lean}`Prod`, and {lean}`Sum`. The difference is that the first group yields elements of {lean}`Prop`, and the second yields elements of {lean}`Type u` for some {leanRef}`u`. In a similar way, {leanRef}`∃ x : α, p` is a {lean}`Prop`-valued variant of {lean}`Σ x : α, β`. ::: ::::setup ``` variable (α : Type u) (β : Type v) (p : Prop) ``` This is a good place to mention another inductive type, denoted {lean}`{x : α // p}`, which is sort of a hybrid between {lean}`∃ x : α, p` and {lean}`Σ x : α, β`. ```lean namespace Hidden ------ inductive Subtype {α : Type u} (p : α → Prop) where | mk : (x : α) → p x → Subtype p ------- end Hidden ``` :::: ::::setup ``` variable {α : Type u} {p : α → Prop} ``` :::leanFirst In fact, in Lean, {leanRef}`Subtype` is defined using the structure command: ```lean namespace Hidden ------ structure Subtype {α : Sort u} (p : α → Prop) where val : α property : p val ------- end Hidden ``` The notation {lean}`{x : α // p x}` is syntactic sugar for {lean}`Subtype (fun x : α => p x)`. It is modeled after subset notation in set theory: the idea is that {leanRef}`{x : α // p x}` denotes the collection of elements of {leanRef}`α` that have property {leanRef}`p`. ::: :::: # Defining the Natural Numbers %%% tag := "defining-the-natural-numbers" %%% The inductively defined types we have seen so far are “flat”: constructors wrap data and insert it into a type, and the corresponding recursor unpacks the data and acts on it. Things get much more interesting when the constructors act on elements of the very type being defined. A canonical example is the type {lean}`Nat` of natural numbers: ```lean namespace Hidden ------ inductive Nat where | zero : Nat | succ : Nat → Nat ------- end Hidden ``` :::setup ``` open Nat variable {motive : Nat → Sort u} {f : (n : Nat) → motive n} {n : Nat} ``` There are two constructors. We start with {lean}`zero : Nat`; it takes no arguments, so we have it from the start. In contrast, the constructor {lean}`succ` can only be applied to a previously constructed {lean}`Nat`. Applying it to {lean}`zero` yields {lean}`succ zero : Nat`. Applying it again yields {lean}`succ (succ zero) : Nat`, and so on. Intuitively, {lean}`Nat` is the “smallest” type with these constructors, meaning that it is exhaustively (and freely) generated by starting with {lean}`zero` and applying {lean}`succ` repeatedly. As before, the recursor for {lean}`Nat` is designed to define a dependent function {lean}`f` from {lean}`Nat` to any domain, that is, an element {lean}`f` of {lean}`(n : Nat) → motive n` for some {lean}`motive : Nat → Sort u`. It has to handle two cases: the case where the input is {lean}`zero`, and the case where the input is of the form {lean}`succ n` for some {lean}`n : Nat`. In the first case, we simply specify a target value with the appropriate type, as before. In the second case, however, the recursor can assume that a value of {lean}`f` at {lean}`n` has already been computed. As a result, the next argument to the recursor specifies a value for {lean}`f (succ n)` in terms of {lean}`n` and {lean}`f n`. If we check the type of the recursor, you find the following: ::: ```signature Nat.rec.{u} : {motive : Nat → Sort u} → (zero : motive Nat.zero) → (succ : (n : Nat) → motive n → motive (Nat.succ n)) → (t : Nat) → motive t ``` The implicit argument, {leanRef}`motive`, is the codomain of the function being defined. In type theory it is common to say {leanRef}`motive` is the _motive_ for the elimination/recursion, since it describes the kind of object we wish to construct. The next two arguments specify how to compute the zero and successor cases, as described above. They are also known as the _minor premises_. Finally, the {leanRef}`t : Nat` is the input to the function. It is also known as the _major premise_. The {name}`Nat.recOn` is similar to {name}`Nat.rec` but the major premise occurs before the minor premises. ```signature Nat.recOn.{u} : {motive : Nat → Sort u} → (t : Nat) → (zero : motive Nat.zero) → (succ : ((n : Nat) → motive n → motive (Nat.succ n))) → motive t ``` :::setup ``` def add (m n : Nat) : Nat := match n with | Nat.zero => m | Nat.succ n => Nat.succ (add m n) variable {n m : Nat} open Nat ``` Consider, for example, the addition function {lean}`add m n` on the natural numbers. Fixing {lean}`m`, we can define addition by recursion on {lean}`n`. In the base case, we set {lean}`add m zero` to {lean}`m`. In the successor step, assuming the value {lean}`add m n` is already determined, we define {lean}`add m (succ n)` to be {lean}`succ (add m n)`. ::: ```lean namespace Hidden ------ inductive Nat where | zero : Nat | succ : Nat → Nat deriving Repr def add (m n : Nat) : Nat := match n with | Nat.zero => m | Nat.succ n => Nat.succ (add m n) open Nat #eval add (succ (succ zero)) (succ zero) ------- end Hidden ``` It is useful to put such definitions into a namespace, {lean}`Nat`. We can then go on to define familiar notation in that namespace. The two defining equations for addition now hold definitionally: ```lean namespace Hidden inductive Nat where | zero : Nat | succ : Nat → Nat deriving Repr ------ namespace Nat def add (m n : Nat) : Nat := match n with | Nat.zero => m | Nat.succ n => Nat.succ (add m n) instance : Add Nat where add := add theorem add_zero (m : Nat) : m + zero = m := rfl theorem add_succ (m n : Nat) : m + succ n = succ (m + n) := rfl end Nat ------- end Hidden ``` We will explain how the {kw}`instance` command works in the {ref "type-classes"}[Type Classes] chapter. In the examples below, we will use Lean's version of the natural numbers. ::::leanFirst :::setup ``` variable {n : Nat} {motive : Nat → Sort u} {ih : motive n} ``` Proving a fact like {lean}`0 + n = n`, however, requires a proof by induction. As observed above, the induction principle is just a special case of the recursion principle, when the codomain {lean}`motive n` is an element of {lean}`Prop`. It represents the familiar pattern of an inductive proof: to prove {lean}`∀ n, motive n`, first prove {lean}`motive 0`, and then, for arbitrary {lean}`n`, assume {lean}`ih : motive n` and prove {lean}`motive (n + 1)`. ::: ```lean namespace Hidden ------ open Nat theorem zero_add (n : Nat) : 0 + n = n := Nat.recOn (motive := fun x => 0 + x = x) n (show 0 + 0 = 0 from rfl) (fun (n : Nat) (ih : 0 + n = n) => show 0 + (n + 1) = n + 1 from calc 0 + (n + 1) _ = (0 + n) + 1 := rfl _ = n + 1 := by rw [ih]) ------- end Hidden ``` :::: Notice that, once again, when {name}`Nat.recOn` is used in the context of a proof, it is really the induction principle in disguise. The {tactic}`rw` and {tactic}`simp` tactics tend to be very effective in proofs like these. In this case, each can be used to reduce the proof to: ```lean namespace Hidden ------ open Nat theorem zero_add (n : Nat) : 0 + n = n := Nat.recOn (motive := fun x => 0 + x = x) n rfl (fun n ih => by simp [ih]) ------- end Hidden ``` :::setup ``` variable (m n k : Nat) ``` As another example, let us prove the associativity of addition, {lean}`∀ m n k, m + n + k = m + (n + k)`. (The notation {leanRef}`+`, as we have defined it, associates to the left, so {leanRef}`m + n + k` is really {lean}`(m + n) + k`.) The hardest part is figuring out which variable to do the induction on. Since addition is defined by recursion on the second argument, {leanRef (in := "n k,")}`k` is a good guess, and once we make that choice the proof almost writes itself: ::: ```lean namespace Hidden ------ open Nat theorem add_assoc (m n k : Nat) : m + n + k = m + (n + k) := Nat.recOn (motive := fun k => m + n + k = m + (n + k)) k (show m + n + 0 = m + (n + 0) from rfl) (fun k (ih : m + n + k = m + (n + k)) => show m + n + (k + 1) = m + (n + (k + 1)) from calc m + n + (k + 1) _ = (m + n + k) + 1 := rfl _ = (m + (n + k)) + 1 := by rw [ih] _ = m + ((n + k) + 1) := rfl _ = m + (n + (k + 1)) := rfl) ------- end Hidden ``` Once again, you can reduce the proof to: ```lean open Nat theorem add_assoc (m n k : Nat) : m + n + k = m + (n + k) := Nat.recOn (motive := fun k => m + n + k = m + (n + k)) k rfl (fun k ih => by simp [add_succ (m + n) k, ih]; rfl) ``` Suppose we try to prove the commutativity of addition. Choosing induction on the second argument, we might begin as follows: ```lean open Nat theorem add_comm (m n : Nat) : m + n = n + m := Nat.recOn (motive := fun x => m + x = x + m) n (show m + 0 = 0 + m by rw [Nat.zero_add, Nat.add_zero]) (fun (n : Nat) (ih : m + n = n + m) => show m + succ n = succ n + m from calc m + succ n _ = succ (m + n) := rfl _ = succ (n + m) := by rw [ih] _ = succ n + m := sorry) ``` At this point, we see that we need another supporting fact, namely, that {leanRef}`succ (n + m)`{lit}` = `{leanRef}`succ n + m`. You can prove this by induction on {leanRef}`m`: ```lean open Nat theorem succ_add (n m : Nat) : succ n + m = succ (n + m) := Nat.recOn (motive := fun x => succ n + x = succ (n + x)) m (show succ n + 0 = succ (n + 0) from rfl) (fun (m : Nat) (ih : succ n + m = succ (n + m)) => show succ n + succ m = succ (n + succ m) from calc succ n + succ m _ = succ (succ n + m) := rfl _ = succ (succ (n + m)) := by rw [ih] _ = succ (n + succ m) := rfl) ``` You can then replace the {leanRef}`sorry` in the previous proof with {leanRef}`succ_add`. Yet again, the proofs can be compressed: ```lean namespace Hidden inductive Nat where | zero : Nat | succ : Nat → Nat deriving Repr def add (m n : Nat) : Nat := match n with | Nat.zero => m | Nat.succ n => Nat.succ (add m n) instance : Add Nat where add := add namespace Nat theorem add_zero (m : Nat) : m + zero = m := rfl theorem add_succ (m n : Nat) : m + succ n = succ (m + n) := rfl theorem zero_add (n : Nat) : zero + n = n := Nat.recOn (motive := fun x => zero + x = x) n rfl (fun n ih => by simpa [add_zero, add_succ]) end Nat ------ open Nat theorem succ_add (n m : Nat) : succ n + m = succ (n + m) := Nat.recOn (motive := fun x => succ n + x = succ (n + x)) m rfl (fun m ih => by simpa [add_succ (succ n)]) theorem add_comm (m n : Nat) : m + n = n + m := Nat.recOn (motive := fun x => m + x = x + m) n (by simp [add_zero, zero_add]) (fun m ih => by simp_all [succ_add, add_succ]) ------- end Hidden ``` # Other Recursive Data Types %%% tag := "other-recursive-data-types" %%% :::leanFirst Let us consider some more examples of inductively defined types. For any type, {leanRef}`α`, the type {leanRef}`List α` of lists of elements of {leanRef}`α` is defined in the library. ```lean namespace Hidden ------ inductive List (α : Type u) where | nil : List α | cons (h : α) (t : List α) : List α namespace List def append (as bs : List α) : List α := match as with | nil => bs | cons a as => cons a (append as bs) theorem nil_append (as : List α) : append nil as = as := rfl theorem cons_append (a : α) (as bs : List α) : append (cons a as) bs = cons a (append as bs) := rfl end List ------- end Hidden ``` A list of elements of type {leanRef}`α` is either the empty list, {leanRef}`nil`, or an element {leanRef}`h : α` followed by a list {leanRef}`t : List α`. The first element, {leanRef}`h`, is commonly known as the “head” of the list, and the remainder, {leanRef}`t`, is known as the “tail.” ::: As an exercise, prove the following: ```lean namespace Hidden inductive List (α : Type u) where | nil : List α | cons : α → List α → List α namespace List def append (as bs : List α) : List α := match as with | nil => bs | cons a as => cons a (append as bs) theorem nil_append (as : List α) : append nil as = as := rfl theorem cons_append (a : α) (as bs : List α) : append (cons a as) bs = cons a (append as bs) := rfl ------ theorem append_nil (as : List α) : append as nil = as := sorry theorem append_assoc (as bs cs : List α) : append (append as bs) cs = append as (append bs cs) := sorry ------- end List end Hidden ``` :::setup ``` universe u def length : {α : Type u} → List α → Nat := List.length def append : {α : Type u} → List α → List α → List α := List.append variable (as bs : List α) ``` Try also defining the function {lean}`length : {α : Type u} → List α → Nat` that returns the length of a list, and prove that it behaves as expected (for example, {lean}`length (append as bs) = length as + length bs`). ::: For another example, we can define the type of binary trees: ```lean inductive BinaryTree where | leaf : BinaryTree | node : BinaryTree → BinaryTree → BinaryTree ``` In fact, we can even define the type of countably branching trees: ```lean inductive CBTree where | leaf : CBTree | sup : (Nat → CBTree) → CBTree namespace CBTree def succ (t : CBTree) : CBTree := sup (fun _ => t) def toCBTree : Nat → CBTree | 0 => leaf | n+1 => succ (toCBTree n) def omega : CBTree := sup toCBTree end CBTree ``` # Tactics for Inductive Types %%% tag := "tactics-for-inductive-types" %%% Given the fundamental importance of inductive types in Lean, it should not be surprising that there are a number of tactics designed to work with them effectively. We describe some of them here. :::setup ``` variable {x : InductiveType} ``` The {tactic}`cases` tactic works on elements of an inductively defined type, and does what the name suggests: it decomposes the element according to each of the possible constructors. In its most basic form, it is applied to an element {lean}`x` in the local context. It then reduces the goal to cases in which {lean}`x` is replaced by each of the constructions. ::: ```lean example (p : Nat → Prop) (hz : p 0) (hs : ∀ n, p (Nat.succ n)) : ∀ n, p n := by intro n cases n . exact hz --^ PROOF_STATE: A . apply hs --^ PROOF_STATE: B ``` In the first branch, the proof state is: ```proofState A case zero p : Nat → Prop hz : p 0 hs : ∀ (n : Nat), p n.succ ⊢ p 0 ``` In the second branch, it is: ```proofState B case succ p : Nat → Prop hz : p 0 hs : ∀ (n : Nat), p n.succ n✝ : Nat ⊢ p (n✝ + 1) ``` :::leanFirst There are extra bells and whistles. For one thing, {leanRef}`cases` allows you to choose the names for each alternative using a {leanRef}`with` clause. In the next example, for example, we choose the name {leanRef}`m` for the argument to {leanRef}`succ`, so that the second case refers to {leanRef}`succ m`. More importantly, the cases tactic will detect any items in the local context that depend on the target variable. It reverts these elements, does the split, and reintroduces them. In the example below, notice that the hypothesis {leanRef}`h : n ≠ 0` becomes {leanRef}`h : 0 ≠ 0` in the first branch, and {leanRef}`h : m + 1 ≠ 0` in the second. ```lean (showProofStates := "C D") open Nat example (n : Nat) (h : n ≠ 0) : succ (pred n) = n := by cases n with | zero => -- ^ PROOF_STATE: C apply absurd rfl h | succ m => -- ^ PROOF_STATE: D rfl ``` ::: Notice that {leanRef}`cases` can be used to produce data as well as prove propositions. ```lean def f (n : Nat) : Nat := by cases n; exact 3; exact 7 example : f 0 = 3 := rfl example : f 5 = 7 := rfl ``` Once again, cases will revert, split, and then reintroduce dependencies in the context. ```lean def Tuple (α : Type) (n : Nat) := { as : List α // as.length = n } def f {n : Nat} (t : Tuple α n) : Nat := by cases n; exact 3; exact 7 def myTuple : Tuple Nat 3 := ⟨[0, 1, 2], rfl⟩ example : f myTuple = 7 := rfl ``` Here is an example of multiple constructors with arguments. ```lean inductive Foo where | bar1 : Nat → Nat → Foo | bar2 : Nat → Nat → Nat → Foo def silly (x : Foo) : Nat := by cases x with | bar1 a b => exact b | bar2 c d e => exact e ``` The alternatives for each constructor don't need to be solved in the order the constructors were declared. ```lean inductive Foo where | bar1 : Nat → Nat → Foo | bar2 : Nat → Nat → Nat → Foo ------ def silly (x : Foo) : Nat := by cases x with | bar2 c d e => exact e | bar1 a b => exact b ``` :::leanFirst The syntax of the {leanRef}`with` is convenient for writing structured proofs. Lean also provides a complementary {leanRef}`case` tactic, which allows you to focus on goal assign variable names. ```lean inductive Foo where | bar1 : Nat → Nat → Foo | bar2 : Nat → Nat → Nat → Foo ------ def silly (x : Foo) : Nat := by cases x case bar1 a b => exact b case bar2 c d e => exact e ``` ::: The {leanRef}`case` tactic is clever, in that it will match the constructor to the appropriate goal. For example, we can fill the goals above in the opposite order: ```lean inductive Foo where | bar1 : Nat → Nat → Foo | bar2 : Nat → Nat → Nat → Foo ------ def silly (x : Foo) : Nat := by cases x case bar2 c d e => exact e case bar1 a b => exact b ``` You can also use {leanRef}`cases` with an arbitrary expression. Assuming that expression occurs in the goal, the cases tactic will generalize over the expression, introduce the resulting universally quantified variable, and case on that. ```lean open Nat example (p : Nat → Prop) (hz : p 0) (hs : ∀ n, p (succ n)) (m k : Nat) : p (m + 3 * k) := by cases m + 3 * k exact hz -- goal is p 0 apply hs -- goal is a : Nat ⊢ p (succ a) ``` Think of this as saying “split on cases as to whether {leanRef}`m + 3 * k` is zero or the successor of some number.” The result is functionally equivalent to the following: ```lean (showProofStates := "Z S") open Nat example (p : Nat → Prop) (hz : p 0) (hs : ∀ n, p (succ n)) (m k : Nat) : p (m + 3 * k) := by generalize m + 3 * k = n cases n -- ^ PROOF_STATE: Z exact hz -- ^ PROOF_STATE: S apply hs ``` Notice that the expression {leanRef}`m + 3 * k` is erased by {leanRef}`generalize`; all that matters is whether it is of the form {leanRef}`0` or {leanRef}`n✝ + 1`. This form of {leanRef}`cases` will _not_ revert any hypotheses that also mention the expression in the equation (in this case, {leanRef}`m + 3 * k`). If such a term appears in a hypothesis and you want to generalize over that as well, you need to {tactic}`revert` it explicitly. If the expression you case on does not appear in the goal, the {tactic}`cases` tactic uses {tactic}`have` to put the type of the expression into the context. Here is an example: ```lean example (p : Prop) (m n : Nat) (h₁ : m < n → p) (h₂ : m ≥ n → p) : p := by cases Nat.lt_or_ge m n case inl hlt => exact h₁ hlt -- ^ PROOF_STATE: one case inr hge => exact h₂ hge -- ^ PROOF_STATE: two ``` The theorem {leanRef}`Nat.lt_or_ge m n` says {leanRef}`m < n`{lit}` ∨ `{leanRef}`m ≥ n`, and it is natural to think of the proof above as splitting on these two cases. In the first branch, we have the hypothesis {leanRef}`hlt : m < n`, and in the second we have the hypothesis {leanRef}`hge : m ≥ n`. The proof above is functionally equivalent to the following: ```lean example (p : Prop) (m n : Nat) (h₁ : m < n → p) (h₂ : m ≥ n → p) : p := by have h : m < n ∨ m ≥ n := Nat.lt_or_ge m n cases h case inl hlt => exact h₁ hlt case inr hge => exact h₂ hge ``` After the first two lines, we have {leanRef}`h : m < n ∨ m ≥ n` as a hypothesis, and we simply do cases on that. :::leanFirst Here is another example, where we use the decidability of equality on the natural numbers to split on the cases {leanRef}`m = n` and {leanRef}`m ≠ n`. ```lean #check Nat.sub_self example (m n : Nat) : m - n = 0 ∨ m ≠ n := by cases Decidable.em (m = n) with | inl heq => rw [heq]; apply Or.inl; exact Nat.sub_self n | inr hne => apply Or.inr; exact hne ``` ::: Remember that if you {kw}`open `{lit}`Classical`, you can use the law of the excluded middle for any proposition at all. But using type class inference (see {ref "type-classes"}[Type Classes]), Lean can actually find the relevant decision procedure, which means that you can use the case split in a computable function. :::leanFirst Just as the {leanRef}`cases` tactic can be used to carry out proof by cases, the {leanRef}`induction` tactic can be used to carry out proofs by induction. The syntax is similar to that of {leanRef}`cases`, except that the argument can only be a term in the local context. Here is an example: ```lean namespace Hidden ------ theorem zero_add (n : Nat) : 0 + n = n := by induction n with | zero => rfl | succ n ih => rw [Nat.add_succ, ih] ------- end Hidden ``` ::: :::leanFirst As with {leanRef}`cases`, we can use the {leanRef}`case` tactic instead of {leanRef}`with`. ```lean namespace Hidden ------ theorem zero_add (n : Nat) : 0 + n = n := by induction n case zero => rfl case succ n ih => rw [Nat.add_succ, ih] ------- end Hidden ``` ::: Here are some additional examples: :::TODO FIXME ::: ```lean namespace Hidden inductive Nat where | zero | succ : Nat → Nat def Nat.toNat : Nat → _root_.Nat | .zero => .zero | .succ n => .succ n.toNat def Nat.ofNat : _root_.Nat → Nat | .zero => .zero | .succ n => .succ (.ofNat n) def add (m n : Nat) : Nat := match n with | Nat.zero => m | Nat.succ n => Nat.succ (add m n) instance : Add Nat where add := add instance : OfNat Nat n where ofNat := .ofNat n @[simp] theorem zero_zero : (.zero : Nat) = 0 := rfl theorem add_zero (n : Nat) : n + 0 = n := rfl theorem add_succ (n k : Nat) : n + k.succ = (n + k).succ := rfl ------ open Nat theorem zero_add (n : Nat) : 0 + n = n := by induction n <;> simp [*, add_zero, add_succ] theorem succ_add (m n : Nat) : succ m + n = succ (m + n) := by induction n <;> simp [*, add_zero, add_succ] theorem add_comm (m n : Nat) : m + n = n + m := by induction n <;> simp [*, add_zero, add_succ, succ_add, zero_add] theorem add_assoc (m n k : Nat) : m + n + k = m + (n + k) := by induction k <;> simp [*, add_zero, add_succ] ------- end Hidden ``` The {leanRef}`induction` tactic also supports user-defined induction principles with multiple targets (aka major premises). This example uses {name}`Nat.mod.inductionOn`, which has the following signature: ```signature Nat.mod.inductionOn {motive : Nat → Nat → Sort u} (x y : Nat) (ind : ∀ x y, 0 < y ∧ y ≤ x → motive (x - y) y → motive x y) (base : ∀ x y, ¬(0 < y ∧ y ≤ x) → motive x y) : motive x y ``` ```lean example (x : Nat) {y : Nat} (h : y > 0) : x % y < y := by induction x, y using Nat.mod.inductionOn with | ind x y h₁ ih => rw [Nat.mod_eq_sub_mod h₁.2] exact ih h | base x y h₁ => have : ¬ 0 < y ∨ ¬ y ≤ x := Iff.mp (Decidable.not_and_iff_or_not ..) h₁ match this with | Or.inl h₁ => exact absurd h h₁ | Or.inr h₁ => have hgt : y > x := Nat.gt_of_not_le h₁ rw [← Nat.mod_eq_of_lt hgt] at hgt assumption ``` You can use the {kw}`match` notation in tactics too: ```lean example : p ∨ q → q ∨ p := by intro h match h with | Or.inl _ => apply Or.inr; assumption | Or.inr h2 => apply Or.inl; exact h2 ``` :::leanFirst As a convenience, pattern-matching has been integrated into tactics such as {leanRef}`intro` and {leanRef}`funext`. ```lean example : s ∧ q ∧ r → p ∧ r → q ∧ p := by intro ⟨_, ⟨hq, _⟩⟩ ⟨hp, _⟩ exact ⟨hq, hp⟩ example : (fun (x : Nat × Nat) (y : Nat × Nat) => x.1 + y.2) = (fun (x : Nat × Nat) (z : Nat × Nat) => z.2 + x.1) := by funext (a, b) (c, d) show a + d = d + a rw [Nat.add_comm] ``` ::: :::leanFirst We close this section with one last tactic that is designed to facilitate working with inductive types, namely, the {leanRef}`injection` tactic. By design, the elements of an inductive type are freely generated, which is to say, the constructors are injective and have disjoint ranges. The {leanRef}`injection` tactic is designed to make use of this fact: ```lean open Nat example (m n k : Nat) (h : succ (succ m) = succ (succ n)) : n + k = m + k := by injection h with h' injection h' with h'' rw [h''] ``` ::: The first instance of the tactic adds {leanRef}`h' : m.succ = n.succ` to the context, and the second adds {leanRef}`h'' : m = n`. The {leanRef}`injection` tactic also detects contradictions that arise when different constructors are set equal to one another, and uses them to close the goal. ```lean open Nat example (m n : Nat) (h : succ m = 0) : n = n + 7 := by injection h example (m n : Nat) (h : succ m = 0) : n = n + 7 := by contradiction example (h : 7 = 4) : False := by contradiction ``` As the second example shows, the {leanRef}`contradiction` tactic also detects contradictions of this form. # Inductive Families %%% tag := "inductive-families" %%% We are almost done describing the full range of inductive definitions accepted by Lean. So far, you have seen that Lean allows you to introduce inductive types with any number of recursive constructors. In fact, a single inductive definition can introduce an indexed _family_ of inductive types, in a manner we now describe. An inductive family is an indexed family of types defined by a simultaneous induction of the following form: ``` inductive foo : ... → Sort u where | constructor₁ : ... → foo ... | constructor₂ : ... → foo ... ... | constructorₙ : ... → foo ... ``` ::::setup ``` universe u ``` :::leanFirst In contrast to an ordinary inductive definition, which constructs an element of some {leanRef}`Sort u`, the more general version constructs a function {lit}`... → `{lean}`Sort u`, where “{lit}`...`” denotes a sequence of argument types, also known as _indices_. Each constructor then constructs an element of some member of the family. One example is the definition of {leanRef}`Vect α n`, the type of vectors of elements of {leanRef}`α` of length {leanRef}`n`: ```lean inductive Vect (α : Type u) : Nat → Type u where | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n + 1) ``` ::: :::: Notice that the {leanRef}`cons` constructor takes an element of {leanRef}`Vect α n` and returns an element of {leanRef}`Vect α (n + 1)`, thereby using an element of one member of the family to build an element of another. A more exotic example is given by the definition of the equality type in Lean: ```lean namespace Hidden ------ inductive Eq {α : Sort u} (a : α) : α → Prop where | refl : Eq a a ------- end Hidden ``` :::setup ``` variable (α : Sort u) (a : α) (x : α) ``` For each fixed {leanRef}`α : Sort u` and {leanRef}`a : α`, this definition constructs a family of types {lean}`Eq a x`, indexed by {lean}`x : α`. Notably, however, there is only one constructor, {leanRef}`refl`, which is an element of {leanRef}`Eq a a`. Intuitively, the only way to construct a proof of {lean}`Eq a x` is to use reflexivity, in the case where {lean}`x` is {lean}`a`. Note that {lean}`Eq a a` is the only inhabited type in the family of types {lean}`Eq a x`. The elimination principle generated by Lean is as follows: ::: ```lean set_option pp.proofs true -------- universe u v #check (@Eq.rec : {α : Sort u} → {a : α} → {motive : (x : α) → a = x → Sort v} → motive a rfl → {b : α} → (h : a = b) → motive b h) ``` It is a remarkable fact that all the basic axioms for equality follow from the constructor, {leanRef}`refl`, and the eliminator, {leanRef}`Eq.rec`. The definition of equality is atypical, however; see the discussion in {ref "axiomatic-details"}[Axiomatic Details]. The recursor {leanRef}`Eq.rec` is also used to define substitution: ```lean namespace Hidden ------ theorem subst {α : Type u} {a b : α} {p : α → Prop} (h₁ : Eq a b) (h₂ : p a) : p b := Eq.rec (motive := fun x _ => p x) h₂ h₁ ------- end Hidden ``` You can also define {leanRef}`subst` using {kw}`match`. ```lean namespace Hidden ------ theorem subst {α : Type u} {a b : α} {p : α → Prop} (h₁ : Eq a b) (h₂ : p a) : p b := match h₁ with | rfl => h₂ ------- end Hidden ``` Actually, Lean compiles the {kw}`match` expressions using a definition based on generated helpers such as {name}`Eq.casesOn` and {name}`Eq.ndrec`, which are themselves defined using {leanRef}`Eq.rec`. ```lean namespace Hidden ------ theorem subst {α : Type u} {a b : α} {p : α → Prop} (h₁ : a = b) (h₂ : p a) : p b := match h₁ with | rfl => h₂ set_option pp.all true #print subst #print subst.match_1_1 #print Eq.casesOn #print Eq.ndrec ------- end Hidden ``` Using the recursor or {kw}`match` with {leanRef}`h₁ : a = b`, we may assume {leanRef}`a` and {leanRef}`b` are the same, in which case, {leanRef}`p b` and {leanRef}`p a` are the same. :::leanFirst It is not hard to prove that {leanRef}`Eq` is symmetric and transitive. In the following example, we prove {leanRef}`symm` and leave as exercises the theorems {leanRef}`trans` and {leanRef}`congr` (congruence). ```lean namespace Hidden ------ variable {α β : Type u} {a b c : α} theorem symm (h : Eq a b) : Eq b a := match h with | rfl => rfl theorem trans (h₁ : Eq a b) (h₂ : Eq b c) : Eq a c := sorry theorem congr (f : α → β) (h : Eq a b) : Eq (f a) (f b) := sorry ------- end Hidden ``` ::: In the type theory literature, there are further generalizations of inductive definitions, for example, the principles of _induction-recursion_ and _induction-induction_. These are not supported by Lean. # Axiomatic Details %%% tag := "axiomatic-details" %%% We have described inductive types and their syntax through examples. This section provides additional information for those interested in the axiomatic foundations. We have seen that the constructor to an inductive type takes _parameters_—intuitively, the arguments that remain fixed throughout the inductive construction—and _indices_, the arguments parameterizing the family of types that is simultaneously under construction. Each constructor should have a type, where the argument types are built up from previously defined types, the parameter and index types, and the inductive family currently being defined. The requirement is that if the latter is present at all, it occurs only _strictly positively_. This means simply that any argument to the constructor in which it occurs is a dependent arrow type in which the inductive type under definition occurs only as the resulting type, where the indices are given in terms of constants and previous arguments. Since an inductive type lives in {leanRef}`Sort u` for some {leanRef}`u`, it is reasonable to ask _which_ universe levels {leanRef}`u` can be instantiated to. Each constructor {lit}`c` in the definition of a family {lit}`C` of inductive types is of the form ``` c : (a : α) → (b : β[a]) → C a p[a,b] ``` where {lit}`a` is a sequence of data type parameters, {lit}`b` is the sequence of arguments to the constructors, and {lit}`p[a, b]` are the indices, which determine which element of the inductive family the construction inhabits. (Note that this description is somewhat misleading, in that the arguments to the constructor can appear in any order as long as the dependencies make sense.) The constraints on the universe level of {lit}`C` fall into two cases, depending on whether or not the inductive type is specified to land in {lean}`Prop` (that is, {lean}`Sort 0`). Let us first consider the case where the inductive type is _not_ specified to land in {lean}`Prop`. Then the universe level {leanRef}`u` is constrained to satisfy the following: > For each constructor {lit}`c` as above, and each {lit}`βk[a]` in the sequence {lit}`β[a]`, if {lit}`βk[a] : Sort v`, we have {leanRef}`u` ≥ {leanRef}`v`. In other words, the universe level {leanRef}`u` is required to be at least as large as the universe level of each type that represents an argument to a constructor. When the inductive type is specified to land in {lean}`Prop`, there are no constraints on the universe levels of the constructor arguments. But these universe levels do have a bearing on the elimination rule. Generally speaking, for an inductive type in {lean}`Prop`, the motive of the elimination rule is required to be in {lean}`Prop`. There is an exception to this last rule: we are allowed to eliminate from an inductively defined {lean}`Prop` to an arbitrary {leanRef}`Sort` when there is only one constructor and each constructor argument is either in {lean}`Prop` or an index. The intuition is that in this case the elimination does not make use of any information that is not already given by the mere fact that the type of argument is inhabited. This special case is known as _singleton elimination_. We have already seen singleton elimination at play in applications of {name}`Eq.rec`, the eliminator for the inductively defined equality type. We can use an element {leanRef}`h : Eq a b` to cast an element {leanRef}`h₂ : p a` to {leanRef}`p b` even when {leanRef}`p a` and {leanRef}`p b` are arbitrary types, because the cast does not produce new data; it only reinterprets the data we already have. Singleton elimination is also used with heterogeneous equality and well-founded recursion, which will be discussed in a the chapter on {ref "well-founded-recursion-and-induction"}[induction and recursion]. # Mutual and Nested Inductive Types %%% tag := "mutual-and-nested-inductive-types" %%% We now consider two generalizations of inductive types that are often useful, which Lean supports by “compiling” them down to the more primitive kinds of inductive types described above. In other words, Lean parses the more general definitions, defines auxiliary inductive types based on them, and then uses the auxiliary types to define the ones we really want. Lean's equation compiler, described in the next chapter, is needed to make use of these types effectively. Nonetheless, it makes sense to describe the declarations here, because they are straightforward variations on ordinary inductive definitions. First, Lean supports _mutually defined_ inductive types. The idea is that we can define two (or more) inductive types at the same time, where each one refers to the other(s). ```lean mutual inductive Even : Nat → Prop where | even_zero : Even 0 | even_succ : (n : Nat) → Odd n → Even (n + 1) inductive Odd : Nat → Prop where | odd_succ : (n : Nat) → Even n → Odd (n + 1) end ``` In this example, two types are defined simultaneously: a natural number {leanRef}`n` is {leanRef}`Even` if it is {lean}`0` or one more than an {leanRef}`Odd` number, and {leanRef}`Odd` if it is one more than an {leanRef}`Even` number. In the exercises below, you are asked to spell out the details. :::leanFirst A mutual inductive definition can also be used to define the notation of a finite tree with nodes labelled by elements of {leanRef (in:="Tree (α")}`α`: ```lean mutual inductive Tree (α : Type u) where | node : α → TreeList α → Tree α inductive TreeList (α : Type u) where | nil : TreeList α | cons : Tree α → TreeList α → TreeList α end ``` ::: With this definition, one can construct an element of {leanRef}`Tree α` by giving an element of {leanRef}`α` together with a list of subtrees, possibly empty. The list of subtrees is represented by the type {leanRef}`TreeList α`, which is defined to be either the empty list, {leanRef}`nil`, or the {leanRef}`cons` of a tree and an element of {leanRef}`TreeList α`. :::leanFirst This definition is inconvenient to work with, however. It would be much nicer if the list of subtrees were given by the type {leanRef}`List (Tree α)`, especially since Lean's library contains a number of functions and theorems for working with lists. One can show that the type {leanRef}`TreeList α` is _isomorphic_ to {leanRef}`List (Tree α)`, but translating results back and forth along this isomorphism is tedious. In fact, Lean allows us to define the inductive type we really want: ```lean inductive Tree (α : Type u) where | mk : α → List (Tree α) → Tree α ``` ::: This is known as a _nested_ inductive type. It falls outside the strict specification of an inductive type given in the last section because {leanRef}`Tree` does not occur strictly positively among the arguments to {leanRef}`mk`, but, rather, nested inside the {leanRef}`List` type constructor. Lean then automatically builds the isomorphism between {leanRef}`TreeList α` and {leanRef}`List (Tree α)` in its kernel, and defines the constructors for {leanRef}`Tree` in terms of the isomorphism. # Exercises %%% tag := none %%% ```setup open Nat variable {n m : Nat} def length : List α → Nat | [] => 0 | _ :: xs => length xs + 1 def reverse : List α → List α := go [] where go (acc : List α) : List α → List α | [] => acc | x :: xs => go (x :: acc) xs variable {xs ys : List α} inductive Term where | const (n : Nat) | var (n : Nat) | plus (s t : Term) | times (s t : Term) open Term variable {s t : Term} ``` 1. Try defining other operations on the natural numbers, such as multiplication, the predecessor function (with {lean}`pred 0 = 0`), truncated subtraction (with {lean}`n - m = 0` when {lean}`m` is greater than or equal to {lean}`n`), and exponentiation. Then try proving some of their basic properties, building on the theorems we have already proved. Since many of these are already defined in Lean's core library, you should work within a namespace named {lit}`Hidden`, or something like that, in order to avoid name clashes. 2. Define some operations on lists, like a {lean}`length` function or the {lean}`reverse` function. Prove some properties, such as the following: a. {lean}`length (xs ++ ys) = length xs + length ys` b. {lean}`length (reverse xs) = length xs` c. {lean}`reverse (reverse xs) = xs` 3. Define an inductive data type consisting of terms built up from the following constructors: - {lean}`const n`, a constant denoting the natural number {lean}`n` - {lean}`var n`, a variable, numbered {lean}`n` - {lean}`plus s t`, denoting the sum of {leanRef}`s` and {leanRef}`t` - {lean}`times s t`, denoting the product of {leanRef}`s` and {leanRef}`t` Recursively define a function that evaluates any such term with respect to an assignment of values to the variables. 4. Similarly, define the type of propositional formulas, as well as functions on the type of such formulas: an evaluation function, functions that measure the complexity of a formula, and a function that substitutes another formula for a given variable.
theorem_proving_in_lean4/book/TPiL/Tactics.lean
import VersoManual import TPiL.Examples open Verso.Genre open Manual hiding tactic open TPiL #doc (Manual) "Tactics" => In this chapter, we describe an alternative approach to constructing proofs, using _tactics_. A proof term is a representation of a mathematical proof; tactics are commands, or instructions, that describe how to build such a proof. Informally, you might begin a mathematical proof by saying “to prove the forward direction, unfold the definition, apply the previous lemma, and simplify.” Just as these are instructions that tell the reader how to find the relevant proof, tactics are instructions that tell Lean how to construct a proof term. They naturally support an incremental style of writing proofs, in which you decompose a proof and work on goals one step at a time. We will describe proofs that consist of sequences of tactics as “tactic-style” proofs, to contrast with the ways of writing proof terms we have seen so far, which we will call “term-style” proofs. Each style has its own advantages and disadvantages. For example, tactic-style proofs can be harder to read, because they require the reader to predict or guess the results of each instruction. But they can also be shorter and easier to write. Moreover, tactics offer a gateway to using Lean's automation, since automated procedures are themselves tactics. # Entering Tactic Mode %%% tag := "entering-tactic-mode" %%% :::leanFirst Conceptually, stating a theorem or introducing a {kw}`have` statement creates a goal, namely, the goal of constructing a term with the expected type. For example, the following creates the goal of constructing a term of type {leanRef}`p ∧ q ∧ p`, in a context with constants {leanRef}`p q : Prop`, {leanRef}`hp : p` and {leanRef}`hq : q`: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by -- PROOF_STATE: X ^ sorry ``` ::: You can write this goal as follows: ```proofState X p : Prop q : Prop hp : p hq : q ⊢ p ∧ q ∧ p ``` Indeed, if you replace the “sorry” by an underscore in the example above, Lean will report that it is exactly this goal that has been left unsolved. Ordinarily, you meet such a goal by writing an explicit term. But wherever a term is expected, Lean allows us to insert instead a {lit}`by <tactics>` block, where {lit}`<tactics>` is a sequence of commands, separated by semicolons or line breaks. You can prove the theorem above in that way: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro exact hp apply And.intro exact hq exact hp ``` We often put the {leanRef}`by` keyword on the preceding line, and write the example above as: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro -- ^ PROOF_STATE: intro exact hp apply And.intro exact hq exact hp ``` The {leanRef}`apply` tactic applies an expression, viewed as denoting a function with zero or more arguments. It unifies the conclusion with the expression in the current goal, and creates new goals for the remaining arguments, provided that no later arguments depend on them. In the example above, the command {leanRef}`apply And.intro` yields two subgoals: ```proofState intro case left p : Prop q : Prop hp : p hq : q ⊢ p case right p : Prop q : Prop hp : p hq : q ⊢ q ∧ p ``` The first goal is met with the command {leanRef}`exact hp`. The {leanRef}`exact` command is just a variant of {leanRef}`apply` which signals that the expression given should fill the goal exactly. It is good form to use it in a tactic proof, since its failure signals that something has gone wrong. It is also more robust than {leanRef}`apply`, since the elaborator takes the expected type, given by the target of the goal, into account when processing the expression that is being applied. In this case, however, {leanRef}`apply` would work just as well. You can see the resulting proof term with the {kw}`#print` command: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro exact hp apply And.intro exact hq exact hp ------ #print test ``` ::: TODO Check these. Vim? ::: You can write a tactic script incrementally. In VS Code, you can open a window to display messages by pressing {kbd}[`Ctrl` `Shift` `Enter`], and that window will then show you the current goal whenever the cursor is in a tactic block. If the proof is incomplete, the token {kw}`by` is decorated with a red squiggly line, and the error message contains the remaining goals. Tactic commands can take compound expressions, not just single identifiers. The following is a shorter version of the preceding proof: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro hp exact And.intro hq hp ``` Unsurprisingly, it produces exactly the same proof term: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro hp exact And.intro hq hp ------ #print test ``` Multiple tactic applications can be written in a single line by concatenating with a semicolon. ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro hp; exact And.intro hq hp ``` Tactics that may produce multiple subgoals often tag them. For example, the tactic {leanRef}`apply And.intro` tagged the first subgoal as {goal intro}`left`, and the second as {goal intro}`right`. In the case of the {leanRef}`apply` tactic, the tags are inferred from the parameters' names used in the {leanRef}`And.intro` declaration. You can structure your tactics using the notation {kw}`case`{lit}` <tag> => <tactics>`. The following is a structured version of our first tactic proof in this chapter. ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro case left => exact hp case right => apply And.intro case left => exact hq case right => exact hp ``` :::leanFirst You can solve the subgoal {goal intro2}`right` before {goal intro2}`left` using the {leanRef}`case` notation: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro -- ^ PROOF_STATE: intro2 case right => apply And.intro case left => exact hq -- ^ PROOF_STATE: leftBranch case right => exact hp case left => exact hp ``` ::: Note that Lean hides the other goals inside the {leanRef}`case` block. After {leanRef}`case left =>`, the proof state is: ```proofState leftBranch p : Prop q : Prop hp : p hq : q ⊢ q ``` We say that {leanRef}`case` is “focusing” on the selected goal. Moreover, Lean flags an error if the selected goal is not fully solved at the end of the {leanRef}`case` block. For simple subgoals, it may not be worth selecting a subgoal using its tag, but you may still want to structure the proof. Lean also provides the “bullet” notation {lit}`. <tactics>` (or {lit}`· <tactics>`) for structuring proofs: ```lean theorem test (p q : Prop) (hp : p) (hq : q) : p ∧ q ∧ p := by apply And.intro . exact hp . apply And.intro . exact hq . exact hp ``` # Basic Tactics %%% tag := "basic-tactics" %%% :::leanFirst In addition to {leanRef}`apply` and {leanRef}`exact`, another useful tactic is {leanRef}`intro`, which introduces a hypothesis. What follows is an example of an identity from propositional logic that we proved in a previous chapter, now proved using tactics. ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro h apply Or.elim (And.right h) . intro hq apply Or.inl apply And.intro . exact And.left h . exact hq . intro hr apply Or.inr apply And.intro . exact And.left h . exact hr . intro h apply Or.elim h . intro hpq apply And.intro . exact And.left hpq . apply Or.inl exact And.right hpq . intro hpr apply And.intro . exact And.left hpr . apply Or.inr exact And.right hpr ``` ::: The {leanRef}`intro` command can more generally be used to introduce a variable of any type: ```lean example (α : Type) : α → α := by intro a exact a example (α : Type) : ∀ x : α, x = x := by intro x exact Eq.refl x ``` You can use it to introduce several variables: ```lean example : ∀ a b c : Nat, a = b → a = c → c = b := by intro a b c h₁ h₂ exact Eq.trans (Eq.symm h₂) h₁ ``` :::setup ``` variable {α : Sort u} {p : Prop} {e : p} ``` As the {leanRef}`apply` tactic is a command for constructing function applications interactively, the {leanRef}`intro` tactic is a command for constructing function abstractions interactively (i.e., terms of the form {lean (type := "∀ (x : α), p")}`fun x => e`). As with lambda abstraction notation, the {leanRef}`intro` tactic allows us to use an implicit {kw}`match`. ::: ```lean example (p q : α → Prop) : (∃ x, p x ∧ q x) → ∃ x, q x ∧ p x := by intro ⟨w, hpw, hqw⟩ exact ⟨w, hqw, hpw⟩ ``` You can also provide multiple alternatives like in the {kw}`match` expression. ```lean example (p q : α → Prop) : (∃ x, p x ∨ q x) → ∃ x, q x ∨ p x := by intro | ⟨w, Or.inl h⟩ => exact ⟨w, Or.inr h⟩ | ⟨w, Or.inr h⟩ => exact ⟨w, Or.inl h⟩ ``` ::::leanFirst The {leanRef}`intros` tactic can be used without any arguments, in which case, it chooses names and introduces as many variables as it can. You will see an example of this in a moment. :::leanFirst The {leanRef}`assumption` tactic looks through the assumptions in context of the current goal, and if there is one matching the conclusion, it applies it. ```lean variable (x y z w : Nat) example (h₁ : x = y) (h₂ : y = z) (h₃ : z = w) : x = w := by apply Eq.trans h₁ apply Eq.trans h₂ assumption -- applied h₃ ``` ::: It will unify metavariables in the conclusion if necessary: ```lean variable (x y z w : Nat) example (h₁ : x = y) (h₂ : y = z) (h₃ : z = w) : x = w := by apply Eq.trans assumption -- solves x = ?b with h₁ apply Eq.trans assumption -- solves y = ?h₂.b with h₂ assumption -- solves z = w with h₃ ``` The following example uses the {leanRef}`intros` command to introduce the three variables and two hypotheses automatically: ```lean example : ∀ a b c : Nat, a = b → a = c → c = b := by intros apply Eq.trans apply Eq.symm assumption assumption ``` :::: :::leanFirst Note that names automatically generated by Lean are inaccessible by default. The motivation is to ensure your tactic proofs do not rely on automatically generated names, and are consequently more robust. However, you can use the combinator {leanRef}`unhygienic` to disable this restriction. ```lean example : ∀ a b c : Nat, a = b → a = c → c = b := by unhygienic intros apply Eq.trans apply Eq.symm exact a_2 exact a_1 ``` ::: :::leanFirst You can also use the {leanRef}`rename_i` tactic to rename the most recent inaccessible names in your context. In the following example, the tactic {leanRef}`rename_i h1 _ h2` renames two of the last three hypotheses in your context. ```lean example : ∀ a b c d : Nat, a = b → a = d → a = c → c = b := by intros rename_i h1 _ h2 apply Eq.trans apply Eq.symm exact h2 exact h1 ``` ::: :::leanFirst The {leanRef}`rfl` tactic solves goals that are reflexive relations applied to definitionally equal arguments. Equality is reflexive: ```lean example (y : Nat) : (fun x : Nat => 0) y = 0 := by rfl ``` ::: :::leanFirst The {leanRef}`repeat` combinator can be used to apply a tactic several times: ```lean example : ∀ a b c : Nat, a = b → a = c → c = b := by intros apply Eq.trans apply Eq.symm repeat assumption ``` ::: :::leanFirst Another tactic that is sometimes useful is the {leanRef}`revert` tactic, which is, in a sense, an inverse to {leanRef}`intro`: ```lean example (x : Nat) : x = x := by revert x -- ^ PROOF_STATE: afterRevert intro y -- ^ PROOF_STATE: afterRevertIntro rfl ``` After {leanRef}`revert x`, the proof state is: ```proofState afterRevert ⊢ ∀ (x : Nat), x = x ``` After {leanRef}`intro y`, it is: ```proofState afterRevertIntro y : Nat ⊢ y = y ``` ::: Moving a hypothesis into the goal yields an implication: ```lean example (x y : Nat) (h : x = y) : y = x := by revert h -- ^ PROOF_STATE: afterRevertH intro h₁ -- ^ PROOF_STATE: afterRevertHIntro -- goal is x y : Nat, h₁ : x = y ⊢ y = x apply Eq.symm assumption ``` After {leanRef}`revert h`, the proof state is: ```proofState afterRevertH x : Nat y : Nat ⊢ x = y → y = x ``` After {leanRef}`intro h₁`, it is: ```proofState afterRevertHIntro x : Nat y : Nat h₁ : x = y ⊢ y = x ``` :::leanFirst But {leanRef}`revert` is even more clever, in that it will revert not only an element of the context but also all the subsequent elements of the context that depend on it. For example, reverting {leanRef (in := "revert x")}`x` in the example above brings {leanRef}`h` along with it: ```lean example (x y : Nat) (h : x = y) : y = x := by revert x -- ^ PROOF_STATE: afterRevertXH intros apply Eq.symm assumption ``` After {leanRef}`revert x`, the goal is: ```proofState afterRevertXH y : Nat ⊢ ∀ (x : Nat), x = y → y = x ``` ::: You can also revert multiple elements of the context at once: ```lean example (x y : Nat) (h : x = y) : y = x := by revert x y -- ^ PROOF_STATE: revertXY intros apply Eq.symm assumption ``` After {leanRef}`revert x y`, the goal is: ```proofState revertXY ⊢ ∀ (x y : Nat), x = y → y = x ``` :::leanFirst You can only {leanRef}`revert` an element of the local context, that is, a local variable or hypothesis. But you can replace an arbitrary expression in the goal by a fresh variable using the {leanRef}`generalize` tactic: ```lean (showProofStates := "afterGen afterRevert afterIntro") example : 3 = 3 := by generalize 3 = x -- ^ PROOF_STATE: afterGen revert x -- ^ PROOF_STATE: afterRevert intro y -- ^ PROOF_STATE: afterIntro rfl ``` In particular, after {leanRef}`generalize`, the goal is ```proofState afterGen x : Nat ⊢ x = x ``` ::: The mnemonic in the notation above is that you are generalizing the goal by setting {leanRef}`3` to an arbitrary variable {leanRef (in := "revert x")}`x`. Be careful: not every generalization preserves the validity of the goal. Here, {leanRef}`generalize` replaces a goal that could be proved using {tactic}`rfl` with one that is not provable: ```lean (showProofStates := "afterGen") example : 2 + 3 = 5 := by generalize 3 = x -- ^ PROOF_STATE: afterGen sorry ``` In this example, the {leanRef}`sorry` tactic is the analogue of the {lean}`sorry` proof term. It closes the current goal, producing the usual warning that {lean}`sorry` has been used. To preserve the validity of the previous goal, the {leanRef}`generalize` tactic allows us to record the fact that {leanRef}`3` has been replaced by {leanRef}`x`. All you need to do is to provide a label, and {leanRef}`generalize` uses it to store the assignment in the local context: ```lean example : 2 + 3 = 5 := by generalize h : 3 = x -- ^ PROOF_STATE: afterGen rw [← h] ``` Following {leanRef}`generalize h : 3 = x`, {leanRef}`h` is a proof that {leanRef}`3 = x`: ```proofState afterGen x : Nat h : 3 = x ⊢ 2 + x = 5 ``` Here the rewriting tactic {leanRef}`rw` uses {leanRef}`h` to replace {leanRef}`x` by {leanRef}`3` again. The {leanRef}`rw` tactic will be discussed below. # More Tactics %%% tag := "more-tactics" %%% :::leanFirst Some additional tactics are useful for constructing and destructing propositions and data. For example, when applied to a goal of the form {leanRef}`p ∨ q`, you use tactics such as {leanRef}`apply Or.inl` and {leanRef}`apply Or.inr`. Conversely, the {leanRef}`cases` tactic can be used to decompose a disjunction: ```lean example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h with | inl hp => apply Or.inr; exact hp | inr hq => apply Or.inl; exact hq ``` ::: Note that the syntax is similar to the one used in {kw}`match` expressions. The new subgoals can be solved in any order: ```lean example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h with | inr hq => apply Or.inl; exact hq | inl hp => apply Or.inr; exact hp ``` You can also use a (unstructured) {leanRef}`cases` without the {leanRef}`with` and a tactic for each alternative: ```lean example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h apply Or.inr assumption apply Or.inl assumption ``` The (unstructured) {leanRef}`cases` is particularly useful when you can close several subgoals using the same tactic: ```lean example (p : Prop) : p ∨ p → p := by intro h cases h repeat assumption ``` You can also use the combinator {lit}`tac1 `{tactic}`<;>`{lit}` tac2` to apply {lit}`tac2` to each subgoal produced by tactic {lit}`tac1`: ```lean example (p : Prop) : p ∨ p → p := by intro h cases h <;> assumption ``` :::leanFirst You can combine the unstructured {leanRef}`cases` tactic with the {leanRef}`case` and {leanRef}`.` notation: ```lean example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h . apply Or.inr assumption . apply Or.inl assumption example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h case inr h => apply Or.inl assumption case inl h => apply Or.inr assumption example (p q : Prop) : p ∨ q → q ∨ p := by intro h cases h case inr h => apply Or.inl assumption . apply Or.inr assumption ``` ::: The {leanRef}`cases` tactic can also be used to decompose a conjunction: ```lean example (p q : Prop) : p ∧ q → q ∧ p := by intro h cases h with | intro hp hq => constructor; exact hq; exact hp -- ^ PROOF_STATE: afterIntroCase ``` In this example, there is only one goal after the {leanRef}`cases` tactic is applied, with {leanRef}`h`{lit}` : `{leanRef}`p ∧ q` replaced by a pair of assumptions, {leanRef}`hp`{lit}` : `{leanRef}`p` and {leanRef}`hq`{lit}` : `{leanRef}`q`: ```proofState afterIntroCase case intro p : Prop q : Prop hp : p hq : q ⊢ q ∧ p ``` The {leanRef}`constructor` tactic applies the unique constructor for conjunction, {lean}`And.intro`. With these tactics, an example from the previous section can be rewritten as follows: ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro h cases h with | intro hp hqr => cases hqr . apply Or.inl; constructor <;> assumption . apply Or.inr; constructor <;> assumption . intro h cases h with | inl hpq => cases hpq with | intro hp hq => constructor; exact hp; apply Or.inl; exact hq | inr hpr => cases hpr with | intro hp hr => constructor; exact hp; apply Or.inr; exact hr ``` You will see in {ref "inductive-types"}[Inductive Types] that these tactics are quite general. The {leanRef}`cases` tactic can be used to decompose any element of an inductively defined type; {leanRef}`constructor` always applies the first applicable constructor of an inductively defined type. For example, you can use {leanRef}`cases` and {leanRef}`constructor` with an existential quantifier: ```lean example (p q : Nat → Prop) : (∃ x, p x) → ∃ x, p x ∨ q x := by intro h cases h with | intro x px => constructor; apply Or.inl; exact px ``` Here, the {leanRef}`constructor` tactic leaves the first component of the existential assertion, the value of {leanRef}`x`, implicit. It is represented by a metavariable, which should be instantiated later on. In the previous example, the proper value of the metavariable is determined by the tactic {leanRef}`exact px`, since {leanRef}`px` has type {leanRef}`p x`. If you want to specify a witness to the existential quantifier explicitly, you can use the {tactic}`exists` tactic instead: ```lean example (p q : Nat → Prop) : (∃ x, p x) → ∃ x, p x ∨ q x := by intro h cases h with | intro x px => exists x; apply Or.inl; exact px ``` Here is another example: ```lean example (p q : Nat → Prop) : (∃ x, p x ∧ q x) → ∃ x, q x ∧ p x := by intro h cases h with | intro x hpq => cases hpq with | intro hp hq => exists x ``` These tactics can be used on data just as well as propositions. In the next example, they are used to define functions which swap the components of the product and sum types: ```lean def swap_pair : α × β → β × α := by intro p cases p constructor <;> assumption def swap_sum : Sum α β → Sum β α := by intro p cases p . apply Sum.inr; assumption . apply Sum.inl; assumption ``` Note that up to the names we have chosen for the variables, the definitions are identical to the proofs of the analogous propositions for conjunction and disjunction. The {leanRef}`cases` tactic will also do a case distinction on a natural number: ```lean open Nat example (P : Nat → Prop) (h₀ : P 0) (h₁ : ∀ n, P (succ n)) (m : Nat) : P m := by cases m with | zero => exact h₀ | succ m' => exact h₁ m' ``` The {leanRef}`cases` tactic, and its companion, the {tactic}`induction` tactic, are discussed in greater detail in the {ref "tactics-for-inductive-types"}[Tactics for Inductive Types] section. :::leanFirst The {leanRef}`contradiction` tactic searches for a contradiction among the hypotheses of the current goal: ```lean example (p q : Prop) : p ∧ ¬ p → q := by intro h cases h contradiction ``` ::: :::leanFirst You can also use {tactic}`match` in tactic blocks. ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro h match h with | ⟨_, Or.inl _⟩ => apply Or.inl; constructor <;> assumption | ⟨_, Or.inr _⟩ => apply Or.inr; constructor <;> assumption . intro h match h with | Or.inl ⟨hp, hq⟩ => constructor; exact hp; apply Or.inl; exact hq | Or.inr ⟨hp, hr⟩ => constructor; exact hp; apply Or.inr; exact hr ``` ::: :::leanFirst You can “combine” {leanRef}`intro` with {tactic}`match` and write the previous examples as follows: ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro | ⟨hp, Or.inl hq⟩ => apply Or.inl; constructor <;> assumption | ⟨hp, Or.inr hr⟩ => apply Or.inr; constructor <;> assumption . intro | Or.inl ⟨hp, hq⟩ => constructor; assumption; apply Or.inl; assumption | Or.inr ⟨hp, hr⟩ => constructor; assumption; apply Or.inr; assumption ``` ::: # Structuring Tactic Proofs %%% tag := "structuring-tactic-proofs" %%% Tactics often provide an efficient way of building a proof, but long sequences of instructions can obscure the structure of the argument. In this section, we describe some means that help provide structure to a tactic-style proof, making such proofs more readable and robust. :::leanFirst One thing that is nice about Lean's proof-writing syntax is that it is possible to mix term-style and tactic-style proofs, and pass between the two freely. For example, the tactics {leanRef}`apply` and {leanRef}`exact` expect arbitrary terms, which you can write using {kw}`have`, {kw}`show`, and so on. Conversely, when writing an arbitrary Lean term, you can always invoke the tactic mode by inserting a {kw}`by` block. The following is a somewhat toy example: ```lean example (p q r : Prop) : p ∧ (q ∨ r) → (p ∧ q) ∨ (p ∧ r) := by intro h exact have hp : p := h.left have hqr : q ∨ r := h.right show (p ∧ q) ∨ (p ∧ r) by cases hqr with | inl hq => exact Or.inl ⟨hp, hq⟩ | inr hr => exact Or.inr ⟨hp, hr⟩ ``` ::: The following is a more natural example: ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro h cases h.right with | inl hq => exact Or.inl ⟨h.left, hq⟩ | inr hr => exact Or.inr ⟨h.left, hr⟩ . intro h cases h with | inl hpq => exact ⟨hpq.left, Or.inl hpq.right⟩ | inr hpr => exact ⟨hpr.left, Or.inr hpr.right⟩ ``` :::leanFirst In fact, there is a {tactic}`show` tactic, which is similar to the {kw}`show` expression in a proof term. It simply declares the type of the goal that is about to be solved, while remaining in tactic mode. ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro . intro h cases h.right with | inl hq => show (p ∧ q) ∨ (p ∧ r) exact Or.inl ⟨h.left, hq⟩ | inr hr => show (p ∧ q) ∨ (p ∧ r) exact Or.inr ⟨h.left, hr⟩ . intro h cases h with | inl hpq => show p ∧ (q ∨ r) exact ⟨hpq.left, Or.inl hpq.right⟩ | inr hpr => show p ∧ (q ∨ r) exact ⟨hpr.left, Or.inr hpr.right⟩ ``` ::: The {tactic}`show` tactic can actually be used to rewrite a goal to something definitionally equivalent: ```lean example (n : Nat) : n + 1 = Nat.succ n := by show Nat.succ n = Nat.succ n rfl ``` There is also a {tactic}`have` tactic, which introduces a new subgoal, just as when writing proof terms: ```lean example (p q r : Prop) : p ∧ (q ∨ r) → (p ∧ q) ∨ (p ∧ r) := by intro ⟨hp, hqr⟩ show (p ∧ q) ∨ (p ∧ r) cases hqr with | inl hq => have hpq : p ∧ q := And.intro hp hq apply Or.inl exact hpq | inr hr => have hpr : p ∧ r := And.intro hp hr apply Or.inr exact hpr ``` :::leanFirst As with proof terms, you can omit the label in the {tactic}`have` tactic, in which case, the default label {leanRef}`this` is used: ```lean example (p q r : Prop) : p ∧ (q ∨ r) → (p ∧ q) ∨ (p ∧ r) := by intro ⟨hp, hqr⟩ show (p ∧ q) ∨ (p ∧ r) cases hqr with | inl hq => have : p ∧ q := And.intro hp hq apply Or.inl exact this | inr hr => have : p ∧ r := And.intro hp hr apply Or.inr exact this ``` ::: :::leanFirst The types in a {tactic}`have` tactic can be omitted, so you can write {lit}`have hp := h.left` and {lit}`have hqr := h.right`. In fact, with this notation, you can even omit both the type and the label, in which case the new fact is introduced with the label {leanRef}`this`: ```lean example (p q r : Prop) : p ∧ (q ∨ r) → (p ∧ q) ∨ (p ∧ r) := by intro ⟨hp, hqr⟩ cases hqr with | inl hq => have := And.intro hp hq apply Or.inl; exact this | inr hr => have := And.intro hp hr apply Or.inr; exact this ``` ::: Lean also has a {tactic}`let` tactic, which is similar to the {tactic}`have` tactic, but is used to introduce local definitions instead of auxiliary facts. It is the tactic analogue of a {kw}`let` in a proof term: ```lean example : ∃ x, x + 2 = 8 := by let a : Nat := 3 * 2 exists a ``` As with {tactic}`have`, you can leave the type implicit by writing {lit}`let a := 3 * 2`. The difference between {tactic}`let` and {tactic}`have` is that {tactic}`let` introduces a local definition in the context, so that the definition of the local declaration can be unfolded in the proof. We have used {leanRef}`.` to create nested tactic blocks. In a nested block, Lean focuses on the first goal, and generates an error if it has not been fully solved at the end of the block. This can be helpful in indicating the separate proofs of multiple subgoals introduced by a tactic. The notation {leanRef}`.` is whitespace sensitive and relies on the indentation to detect whether the tactic block ends. Alternatively, you can define tactic blocks using curly braces and semicolons: ```lean example (p q r : Prop) : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := by apply Iff.intro { intro h; cases h.right; { show (p ∧ q) ∨ (p ∧ r); exact Or.inl ⟨h.left, ‹q›⟩ } { show (p ∧ q) ∨ (p ∧ r); exact Or.inr ⟨h.left, ‹r›⟩ } } { intro h; cases h; { show p ∧ (q ∨ r); rename_i hpq; exact ⟨hpq.left, Or.inl hpq.right⟩ } { show p ∧ (q ∨ r); rename_i hpr; exact ⟨hpr.left, Or.inr hpr.right⟩ } } ``` It is useful to use indentation to structure proof: every time a tactic leaves more than one subgoal, we separate the remaining subgoals by enclosing them in blocks and indenting. Thus if the application of theorem {lit}`foo` to a single goal produces four subgoals, one would expect the proof to look like this: ``` apply foo . <proof of first goal> . <proof of second goal> . <proof of third goal> . <proof of final goal> ``` or ``` apply foo case <tag of first goal> => <proof of first goal> case <tag of second goal> => <proof of second goal> case <tag of third goal> => <proof of third goal> case <tag of final goal> => <proof of final goal> ``` or ``` apply foo { <proof of first goal> } { <proof of second goal> } { <proof of third goal> } { <proof of final goal> } ``` # Tactic Combinators %%% tag := "tactic-combinators" %%% _Tactic combinators_ are operations that form new tactics from old ones. A sequencing combinator is already implicit in the {kw}`by` block: ```lean example (p q : Prop) (hp : p) : p ∨ q := by apply Or.inl; assumption ``` Here, {leanRef}`apply Or.inl; assumption` is functionally equivalent to a single tactic which first applies {leanRef}`apply Or.inl` and then applies {leanRef}`assumption`. In {lit}`t₁ `{tactic}`<;>`{lit}` t₂`, the {leanRef}`<;>` operator provides a _parallel_ version of the sequencing operation: {lit}`t₁` is applied to the current goal, and then {lit}`t₂` is applied to _all_ the resulting subgoals: ```lean example (p q : Prop) (hp : p) (hq : q) : p ∧ q := by constructor <;> assumption ``` This is especially useful when the resulting goals can be finished off in a uniform way, or, at least, when it is possible to make progress on all of them uniformly. The {tactic}`first`{lit}` | t₁ | t₂ | ... | tₙ` applies each {lit}`tᵢ` until one succeeds, or else fails: ```lean example (p q : Prop) (hp : p) : p ∨ q := by first | apply Or.inl; assumption | apply Or.inr; assumption example (p q : Prop) (hq : q) : p ∨ q := by first | apply Or.inl; assumption | apply Or.inr; assumption ``` In the first example, the left branch succeeds, whereas in the second one, it is the right one that succeeds. In the next three examples, the same compound tactic succeeds in each case: ```lean example (p q r : Prop) (hp : p) : p ∨ q ∨ r := by repeat (first | apply Or.inl; assumption | apply Or.inr | assumption) example (p q r : Prop) (hq : q) : p ∨ q ∨ r := by repeat (first | apply Or.inl; assumption | apply Or.inr | assumption) example (p q r : Prop) (hr : r) : p ∨ q ∨ r := by repeat (first | apply Or.inl; assumption | apply Or.inr | assumption) ``` The tactic tries to solve the left disjunct immediately by assumption; if that fails, it tries to focus on the right disjunct; and if that doesn't work, it invokes the assumption tactic. :::leanFirst You will have no doubt noticed by now that tactics can fail. Indeed, it is the “failure” state that causes the _first_ combinator to backtrack and try the next tactic. The {leanRef}`try` combinator builds a tactic that always succeeds, though possibly in a trivial way: {tactic}`try`{lit}` t` executes {lit}`t` and reports success, even if {lit}`t` fails. It is equivalent to {tactic}`first`{lit}` | t | `{tactic}`skip`, where {tactic}`skip` is a tactic that does nothing (and succeeds in doing so). In the next example, the second {leanRef}`constructor` succeeds on the right conjunct {leanRef}`q ∧ r` (remember that disjunction and conjunction associate to the right) but fails on the first. The {leanRef}`try` tactic ensures that the sequential composition succeeds: ```lean example (p q r : Prop) (hp : p) (hq : q) (hr : r) : p ∧ q ∧ r := by constructor <;> (try constructor) <;> assumption ``` ::: Be careful: {tactic}`repeat`{lit}` (`{tactic}`try`{lit}` t)` will loop forever, because the inner tactic never fails. In a proof, there are often multiple goals outstanding. Parallel sequencing is one way to arrange it so that a single tactic is applied to multiple goals, but there are other ways to do this. For example, {tactic}`all_goals`{lit}` t` applies {lit}`t` to all open goals: ```lean example (p q r : Prop) (hp : p) (hq : q) (hr : r) : p ∧ q ∧ r := by constructor all_goals (try constructor) all_goals assumption ``` In this case, the {tactic}`any_goals` tactic provides a more robust solution. It is similar to {tactic}`all_goals`, except it succeeds if its argument succeeds on at least one goal: ```lean example (p q r : Prop) (hp : p) (hq : q) (hr : r) : p ∧ q ∧ r := by constructor any_goals constructor any_goals assumption ``` The first tactic in the {kw}`by` block below repeatedly splits conjunctions: ```lean example (p q r : Prop) (hp : p) (hq : q) (hr : r) : p ∧ ((p ∧ q) ∧ r) ∧ (q ∧ r ∧ p) := by repeat (any_goals constructor) all_goals assumption ``` In fact, we can compress the full tactic down to one line: ```lean example (p q r : Prop) (hp : p) (hq : q) (hr : r) : p ∧ ((p ∧ q) ∧ r) ∧ (q ∧ r ∧ p) := by repeat (any_goals (first | constructor | assumption)) ``` The combinator {tactic}`focus`{lit}` t` ensures that {lit}`t` only effects the current goal, temporarily hiding the others from the scope. So, if {lit}`t` ordinarily only effects the current goal, {tactic}`focus`{lit}` (`{tactic}`all_goals`{lit}` t)` has the same effect as {lit}`t`. # Rewriting %%% tag := "rewriting" %%% The {tactic}`rw` tactic and the {tactic}`simp` tactic were introduced briefly in {ref "calculational-proofs"}[Calculational Proofs]. In this section and the next, we discuss them in greater detail. :::setup ``` variable (x y : α) (h : x = y) theorem add_comm : ∀ (x y : Nat), x + y = y + x := by omega ``` The {tactic}`rw` tactic provides a basic mechanism for applying substitutions to goals and hypotheses, providing a convenient and efficient way of working with equality. The most basic form of the tactic is {tactic}`rw`{lit}` [t]`, where {lit}`t` is a term whose type asserts an equality. For example, {lit}`t` can be a hypothesis {lean}`h : x = y` in the context; it can be a general lemma, like {lean}`add_comm : ∀ x y, x + y = y + x`, in which the rewrite tactic tries to find suitable instantiations of {lean}`x` and {lean}`y`; or it can be any compound term asserting a concrete or general equation. In the following example, we use this basic form to rewrite the goal using a hypothesis. ::: ```lean variable (k : Nat) (f : Nat → Nat) example (h₁ : f 0 = 0) (h₂ : k = 0) : f k = 0 := by rw [h₂] -- replace k with 0 rw [h₁] -- replace f 0 with 0 ``` :::setup ``` variable (t : α) ``` In the example above, the first use of {leanRef}`rw` replaces {leanRef}`k` with {leanRef}`0` in the goal {leanRef}`f k = 0`. Then, the second one replaces {leanRef}`f 0` with {leanRef}`0`. The tactic automatically closes any goal of the form {lean}`t = t`. Here is an example of rewriting using a compound expression: ::: ```lean example (x y : Nat) (p : Nat → Prop) (q : Prop) (h : q → x = y) (h' : p y) (hq : q) : p x := by rw [h hq]; assumption ``` Here, {leanRef}`h hq` establishes the equation {leanRef}`x = y`. Multiple rewrites can be combined using the notation {tactic}`rw`{lit}` [t_1, ..., t_n]`, which is just shorthand for {tactic}`rw`{lit}` [t_1]; ...; `{tactic}`rw`{lit}` [t_n]`. The previous example can be written as follows: ```lean variable (k : Nat) (f : Nat → Nat) example (h₁ : f 0 = 0) (h₂ : k = 0) : f k = 0 := by rw [h₂, h₁] ``` By default, {leanRef}`rw` uses an equation in the forward direction, matching the left-hand side with an expression, and replacing it with the right-hand side. The notation {lit}`←t` can be used to instruct the tactic to use the equality {lit}`t` in the reverse direction. ```lean variable (a b : Nat) (f : Nat → Nat) example (h₁ : a = b) (h₂ : f a = 0) : f b = 0 := by rw [←h₁, h₂] ``` In this example, the term {leanRef}`←h₁` instructs the rewriter to replace {leanRef}`b` with {leanRef}`a`. In the editors, you can type the backwards arrow as {kbd}`\l`. You can also use the ASCII equivalent, {lit}`<-`. Sometimes the left-hand side of an identity can match more than one subterm in the pattern, in which case the {tactic}`rw` tactic chooses the first match it finds when traversing the term. If that is not the one you want, you can use additional arguments to specify the appropriate subterm. ```lean example (a b c : Nat) : a + b + c = a + c + b := by rw [Nat.add_assoc, Nat.add_comm b, ← Nat.add_assoc] example (a b c : Nat) : a + b + c = a + c + b := by rw [Nat.add_assoc, Nat.add_assoc, Nat.add_comm b] example (a b c : Nat) : a + b + c = a + c + b := by rw [Nat.add_assoc, Nat.add_assoc, Nat.add_comm _ b] ``` :::TODO Get the intermediate proof states from `rw` into the reference ring to help these examples be better ::: In the first example above, the first step rewrites {leanRef}`a + b + c` to {leanRef}`a`{lit}` + (`{leanRef}`b + c`{lit}`)`. The next step applies commutativity to the term {leanRef}`b + c`; without specifying the argument, the tactic would instead rewrite {leanRef}`a`{lit}` + (`{leanRef}`b + c`{lit}`)` to {lit}`(`{leanRef}`b + c`{lit}`) + `{leanRef}`a`. Finally, the last step applies associativity in the reverse direction, rewriting {leanRef}`a`{lit}` + (`{leanRef}`c`{lit}` + `{leanRef}`b`{lit}`)` to {leanRef}`a + c + b`. The next two examples instead apply associativity to move the parenthesis to the right on both sides, and then switch {leanRef}`b` and {leanRef}`c`. Notice that the last example specifies that the rewrite should take place on the right-hand side by specifying the second argument to {leanRef}`Nat.add_comm`. By default, the {leanRef}`rw` tactic affects only the goal. The notation {tactic}`rw`{lit}` [t] `{kw}`at`{lit}` h` applies the rewrite ```lean example (f : Nat → Nat) (a : Nat) (h : a + 0 = 0) : f a = f 0 := by rw [Nat.add_zero] at h rw [h] ``` The first step, {leanRef}`rw [Nat.add_zero] at h`, rewrites the hypothesis {leanRef}`a + 0 = 0` to {leanRef}`a = 0`. Then the new hypothesis {leanRef}`a = 0` is used to rewrite the goal to {leanRef}`f 0`{lit}` = `{leanRef}`f 0`. :::leanFirst The {leanRef}`rw` tactic is not restricted to propositions. In the following example, we use {tactic}`rw`{lit}` [h] `{kw}`at`{lit}` t` to rewrite the hypothesis {leanRef}`t : Tuple α n` to {leanRef}`t : Tuple α`{lit}` 0`. ```lean def Tuple (α : Type) (n : Nat) := { as : List α // as.length = n } example (n : Nat) (h : n = 0) (t : Tuple α n) : Tuple α 0 := by rw [h] at t exact t ``` ::: # Using the Simplifier %%% tag := "using-the-simplifier" %%% Whereas {tactic}`rw` is designed as a surgical tool for manipulating a goal, the simplifier offers a more powerful form of automation. A number of identities in Lean's library have been tagged with the {attr}`[simp]` attribute, and the {tactic}`simp` tactic uses them to iteratively rewrite subterms in an expression. ```lean example (x y z : Nat) : (x + 0) * (0 + y * 1 + z * 0) = x * y := by simp example (x y z : Nat) (p : Nat → Prop) (h : p (x * y)) : p ((x + 0) * (0 + y * 1 + z * 0)) := by simp; assumption ``` In the first example, the left-hand side of the equality in the goal is simplified using the usual identities involving 0 and 1, reducing the goal to {leanRef}`x * y`{lit}` = `{leanRef}`x * y`. At that point, {leanRef}`simp` applies reflexivity to finish it off. In the second example, {leanRef}`simp` reduces the goal to {leanRef}`p (x * y)`, at which point the assumption {leanRef}`h` finishes it off. Here are some more examples with lists: ```lean open List example (xs : List Nat) : reverse (xs ++ [1, 2, 3]) = [3, 2, 1] ++ reverse xs := by simp example (xs ys : List α) : length (reverse (xs ++ ys)) = length xs + length ys := by simp [Nat.add_comm] ``` As with {leanRef}`rw`, you can use the keyword {leanRef}`at` to simplify a hypothesis: ```lean example (x y z : Nat) (p : Nat → Prop) (h : p ((x + 0) * (0 + y * 1 + z * 0))) : p (x * y) := by simp at h; assumption ``` Moreover, you can use a “wildcard” asterisk to simplify all the hypotheses and the goal: ```lean attribute [local simp] Nat.mul_comm Nat.mul_assoc Nat.mul_left_comm attribute [local simp] Nat.add_assoc Nat.add_comm Nat.add_left_comm example (w x y z : Nat) (p : Nat → Prop) (h : p (x * y + z * w * x)) : p (x * w * z + y * x) := by simp at *; assumption example (x y z : Nat) (p : Nat → Prop) (h₁ : p (1 * x + y)) (h₂ : p (x * z * 1)) : p (y + 0 + x) ∧ p (z * x) := by simp at * <;> constructor <;> assumption ``` :::setup ``` variable (x y z : Nat) ``` For operations that are commutative and associative, like multiplication on the natural numbers, the simplifier uses these two facts to rewrite an expression, as well as _left commutativity_. In the case of multiplication the latter is expressed as follows: {lean}`x * (y * z) = y * (x * z)`. The {leanRef}`local` modifier tells the simplifier to use these rules in the current file (or section or namespace, as the case may be). It may seem that commutativity and left-commutativity are problematic, in that repeated application of either causes looping. But the simplifier detects identities that permute their arguments, and uses a technique known as _ordered rewriting_. This means that the system maintains an internal ordering of terms, and only applies the identity if doing so decreases the order. With the three identities mentioned above, this has the effect that all the parentheses in an expression are associated to the right, and the expressions are ordered in a canonical (though somewhat arbitrary) way. Two expressions that are equivalent up to associativity and commutativity are then rewritten to the same canonical form. ::: ```lean attribute [local simp] Nat.mul_comm Nat.mul_assoc Nat.mul_left_comm attribute [local simp] Nat.add_assoc Nat.add_comm Nat.add_left_comm ------ example (w x y z : Nat) (p : Nat → Prop) : x * y + z * w * x = x * w * z + y * x := by simp example (w x y z : Nat) (p : Nat → Prop) (h : p (x * y + z * w * x)) : p (x * w * z + y * x) := by simp; simp at h; assumption ``` As with {tactic}`rw`, you can send {tactic}`simp` a list of facts to use, including general lemmas, local hypotheses, definitions to unfold, and compound expressions. The {tactic}`simp` tactic also recognizes the {lit}`←t` syntax that {tactic}`rewrite` does. In any case, the additional rules are added to the collection of identities that are used to simplify a term. ```lean def f (m n : Nat) : Nat := m + n + m example {m n : Nat} (h : n = 1) (h' : 0 = m) : (f m n) = n := by simp [h, ←h', f] ``` A common idiom is to simplify a goal using local hypotheses: ```lean variable (k : Nat) (f : Nat → Nat) example (h₁ : f 0 = 0) (h₂ : k = 0) : f k = 0 := by simp [h₁, h₂] ``` :::leanFirst To use all the hypotheses present in the local context when simplifying, we can use the wildcard symbol, {leanRef}`*`: ```lean variable (k : Nat) (f : Nat → Nat) example (h₁ : f 0 = 0) (h₂ : k = 0) : f k = 0 := by simp [*] ``` ::: Here is another example: ```lean example (u w x y z : Nat) (h₁ : x = y + z) (h₂ : w = u + x) : w = z + y + u := by simp [*, Nat.add_comm] ``` :::leanFirst The simplifier will also do propositional rewriting. For example, using the hypothesis {leanRef (in := "p ∧ q")}`p`, it rewrites {leanRef}`p ∧ q` to {leanRef (in := "p ∨ q")}`q` and {leanRef}`p ∨ q` to {lean}`True`, which it then proves trivially. Iterating such rewrites produces nontrivial propositional reasoning. ```lean example (p q : Prop) (hp : p) : p ∧ q ↔ q := by simp [*] example (p q : Prop) (hp : p) : p ∨ q := by simp [*] example (p q r : Prop) (hp : p) (hq : q) : p ∧ (q ∨ r) := by simp [*] ``` ::: The next example simplifies all the hypotheses, and then uses them to prove the goal. ```lean set_option linter.unusedVariables false ------ example (u w x x' y y' z : Nat) (p : Nat → Prop) (h₁ : x + 0 = x') (h₂ : y + 0 = y') : x + y + 0 = x' + y' := by simp at * simp [*] ``` One thing that makes the simplifier especially useful is that its capabilities can grow as a library develops. For example, suppose we define a list operation that symmetrizes its input by appending its reversal: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ``` :::leanFirst Then for any list {leanRef (in := "mk_symm xs")}`xs`, {leanRef}`(mk_symm xs).reverse` is equal to {leanRef}`mk_symm xs`, which can easily be proved by unfolding the definition: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ------ theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] ``` ::: We can now use this theorem to prove new results: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] ------ example (xs ys : List Nat) : (xs ++ mk_symm ys).reverse = mk_symm ys ++ xs.reverse := by simp [reverse_mk_symm] example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp [reverse_mk_symm] at h; assumption ``` But using {leanRef}`reverse_mk_symm` is generally the right thing to do, and it would be nice if users did not have to invoke it explicitly. You can achieve that by marking it as a simplification rule when the theorem is defined: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ------ @[simp] theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] example (xs ys : List Nat) : (xs ++ mk_symm ys).reverse = mk_symm ys ++ xs.reverse := by simp example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp at h; assumption ``` The notation {leanRef}`@[simp]` declares {leanRef}`reverse_mk_symm` to have the {attr}`[simp]` attribute, and can be spelled out more explicitly: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ------ theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] attribute [simp] reverse_mk_symm example (xs ys : List Nat) : (xs ++ mk_symm ys).reverse = mk_symm ys ++ xs.reverse := by simp example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp at h; assumption ``` The attribute can also be applied any time after the theorem is declared: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ------ theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] example (xs ys : List Nat) : (xs ++ mk_symm ys).reverse = mk_symm ys ++ xs.reverse := by simp [reverse_mk_symm] attribute [simp] reverse_mk_symm example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp at h; assumption ``` :::leanFirst Once the attribute is applied, however, there is no way to permanently remove it; it persists in any file that imports the one where the attribute is assigned. As we will discuss further in {ref "attributes"}[Attributes], one can limit the scope of an attribute to the current file or section using the {leanRef}`local` modifier: ```lean def mk_symm (xs : List α) := xs ++ xs.reverse ------ theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] section attribute [local simp] reverse_mk_symm example (xs ys : List Nat) : (xs ++ mk_symm ys).reverse = mk_symm ys ++ xs.reverse := by simp example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp at h; assumption end ``` ::: Outside the section, the simplifier will no longer use {leanRef}`reverse_mk_symm` by default. Note that the various {leanRef}`simp` options we have discussed—giving an explicit list of rules, and using {leanRef}`at` to specify the location—can be combined, but the order they are listed is rigid. You can see the correct order in an editor by placing the cursor on the {leanRef}`simp` identifier to see the documentation string that is associated with it. :::leanFirst There are two additional modifiers that are useful. By default, {leanRef}`simp` includes all theorems that have been marked with the attribute {attr}`[simp]`. Writing {leanRef}`simp only` excludes these defaults, allowing you to use a more explicitly crafted list of rules. In the examples below, the minus sign and {leanRef}`only` are used to block the application of {leanRef}`reverse_mk_symm`. ```lean def mk_symm (xs : List α) := xs ++ xs.reverse @[simp] theorem reverse_mk_symm (xs : List α) : (mk_symm xs).reverse = mk_symm xs := by simp [mk_symm] example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p (mk_symm ys ++ xs.reverse) := by simp at h; assumption example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p ((mk_symm ys).reverse ++ xs.reverse) := by simp [-reverse_mk_symm] at h; assumption example (xs ys : List Nat) (p : List Nat → Prop) (h : p (xs ++ mk_symm ys).reverse) : p ((mk_symm ys).reverse ++ xs.reverse) := by simp only [List.reverse_append] at h; assumption ``` ::: The {leanRef}`simp` tactic has many configuration options. For example, we can enable contextual simplifications as follows: ```lean example : if x = 0 then y + x = y else x ≠ 0 := by simp +contextual ``` With {leanRef}`+contextual`, the {leanRef}`simp` tactic uses the fact that {leanRef}`x = 0` when simplifying {leanRef}`y + x = y`, and {leanRef}`x ≠ 0` when simplifying the other branch. Here is another example: ```lean example : ∀ (x : Nat) (h : x = 0), y + x = y := by simp +contextual ``` :::leanFirst Another useful configuration option is {leanRef}`+arith` which enables arithmetical simplifications. ```lean example : 0 < 1 + x ∧ x + y + 2 ≥ y + 1 := by simp +arith ``` ::: # Split Tactic %%% tag := "split-tactic" %%% ::::leanFirst The {leanRef}`split` tactic is useful for breaking nested {kw}`if`-{kw}`then`-{kw}`else` and {kw}`match` expressions in cases. For a {kw}`match` expression with $`n` cases, the {leanRef}`split` tactic generates at most $`n` subgoals. Here is an example: ```lean def f (x y z : Nat) : Nat := match x, y, z with | 5, _, _ => y | _, 5, _ => y | _, _, 5 => y | _, _, _ => 1 example (x y z : Nat) : x ≠ 5 → y ≠ 5 → z ≠ 5 → z = w → f x y w = 1 := by intros simp [f] split . contradiction . contradiction . contradiction . rfl ``` :::: We can compress the tactic proof above as follows. ```lean def f (x y z : Nat) : Nat := match x, y, z with | 5, _, _ => y | _, 5, _ => y | _, _, 5 => y | _, _, _ => 1 ------ example (x y z : Nat) : x ≠ 5 → y ≠ 5 → z ≠ 5 → z = w → f x y w = 1 := by intros; simp [f]; split <;> first | contradiction | rfl ``` The tactic {leanRef}`split <;> first | contradiction | rfl` first applies the {leanRef}`split` tactic, and then for each generated goal it tries {leanRef}`contradiction`, and then {leanRef}`rfl` if {leanRef}`contradiction` fails. Like {leanRef}`simp`, we can apply {leanRef}`split` to a particular hypothesis: ```lean def g (xs ys : List Nat) : Nat := match xs, ys with | [a, b], _ => a+b+1 | _, [b, _] => b+1 | _, _ => 1 example (xs ys : List Nat) (h : g xs ys = 0) : False := by simp [g] at h; split at h <;> simp +arith at h ``` # Extensible Tactics %%% tag := "extensible-tactics" %%% :::leanFirst In the following example, we define the notation {leanRef}`triv` using the command {leanRef}`syntax`. Then, we use the command {leanRef}`macro_rules` to specify what should be done when {leanRef}`triv` is used. You can provide different expansions, and the tactic interpreter will try all of them until one succeeds: ```lean -- Define a new tactic notation syntax "triv" : tactic macro_rules | `(tactic| triv) => `(tactic| assumption) example (h : p) : p := by triv -- You cannot prove the following theorem using `triv` -- example (x : α) : x = x := by -- triv -- Let's extend `triv`. The tactic interpreter -- tries all possible macro extensions for `triv` until one succeeds macro_rules | `(tactic| triv) => `(tactic| rfl) example (x : α) : x = x := by triv example (x : α) (h : p) : x = x ∧ p := by apply And.intro <;> triv -- We now add a (recursive) extension macro_rules | `(tactic| triv) => `(tactic| apply And.intro <;> triv) example (x : α) (h : p) : x = x ∧ p := by triv ``` ::: # Exercises %%% tag := none %%% 1. Go back to the exercises in {ref "propositions-and-proofs"}[Propositions and Proofs] and {ref "quantifiers-and-equality"}[Quantifiers and Equality] and redo as many as you can now with tactic proofs, using also {tactic}`rw` and {tactic}`simp` as appropriate. 2. Use tactic combinators to obtain a one-line proof of the following: ```lean example (p q r : Prop) (hp : p) : (p ∨ q ∨ r) ∧ (q ∨ p ∨ r) ∧ (q ∨ r ∨ p) := by sorry ```
theorem_proving_in_lean4/book/TPiL/DependentTypeTheory.lean
import VersoManual import TPiL.Examples open TPiL open Verso.Genre Manual #doc (Manual) "Dependent Type Theory" => %%% tag := "dependent-type-theory" htmlSplit := .never %%% Dependent type theory is a powerful and expressive language, allowing you to express complex mathematical assertions, write complex hardware and software specifications, and reason about both of these in a natural and uniform way. Lean is based on a version of dependent type theory known as the _Calculus of Constructions_, with a countable hierarchy of non-cumulative universes and inductive types. By the end of this chapter, you will understand much of what this means. # Simple Type Theory %%% tag := "simple-type-theory" %%% “Type theory” gets its name from the fact that every expression has an associated _type_. For example, in a given context, {lit}`x + 0` may denote a natural number and {lit}`f` may denote a function on the natural numbers. For those who like precise definitions, a Lean natural number is an arbitrary-precision unsigned integer. Here are some examples of how you can declare objects in Lean and check their types. ```lean /- Define some constants. -/ def m : Nat := 1 -- m is a natural number def n : Nat := 0 def b1 : Bool := true -- b1 is a Boolean def b2 : Bool := false /- Check their types. -/ #check m -- m : Nat #check n #check n + 0 -- n + 0 : Nat #check m * (n + 0) -- m * (n + 0) : Nat #check b1 -- b1 : Bool -- "&&" is the Boolean and #check b1 && b2 -- b1 && b2 : Bool -- Boolean or #check b1 || b2 -- b1 || b2 : Bool -- Boolean "true" #check true -- Bool.true : Bool /- Evaluate -/ #eval 5 * 4 -- 20 #eval m + 2 -- 3 #eval b1 && b2 -- false ``` Any text between {lit}`/-` and {lit}`-/` constitutes a comment block that is ignored by Lean. Similarly, two dashes {lean}`--` indicate that the rest of the line contains a comment that is also ignored. Comment blocks can be nested, making it possible to “comment out” chunks of code, just as in many programming languages. The {kw}`def` keyword declares new constant symbols into the working environment. In the example above, {leanRef}`def m : Nat := 1` defines a new constant {leanRef}`m` of type {lean}`Nat` whose value is {leanRef}`1`. The {kw}`#check` command asks Lean to report their types; in Lean, auxiliary commands that query the system for information typically begin with the hash (#) symbol. The {kw}`#eval` command asks Lean to evaluate the given expression. You should try declaring some constants and type checking some expressions on your own. Declaring new objects in this manner is a good way to experiment with the system. :::setup ``` variable (a b : Type) ``` What makes simple type theory powerful is that you can build new types out of others. For example, if {lean}`a` and {lean}`b` are types, {lean}`a -> b` denotes the type of functions from {lean}`a` to {lean}`b`, and {lean}`a × b` denotes the type of pairs consisting of an element of {lean}`a` paired with an element of {lean}`b`, also known as the _Cartesian product_. Note that {lit}`×` is a Unicode symbol. The judicious use of Unicode improves legibility, and all modern editors have great support for it. In the Lean standard library, you often see Greek letters to denote types, and the Unicode symbol {lit}`→` as a more compact version of {lit}`->`. ::: ```lean (check := false) #check Nat → Nat -- type the arrow as “\to” or "\r" #check Nat -> Nat -- alternative ASCII notation #check Nat × Nat -- type the product as "\times" #check Prod Nat Nat -- alternative notation #check Nat → Nat → Nat #check Nat → (Nat → Nat) -- same type as above #check Nat × Nat → Nat #check (Nat → Nat) → Nat -- a "functional" ``` ```lean #check Nat.succ -- Nat.succ (n : Nat) : Nat #check (0, 1) -- (0, 1) : Nat × Nat #check Nat.add -- Nat.add : Nat → Nat → Nat #check Nat.succ 2 -- Nat.succ 2 : Nat #check Nat.add 3 -- Nat.add 3 : Nat → Nat #check Nat.add 5 2 -- Nat.add 5 2 : Nat #check (5, 9).1 -- (5, 9).fst : Nat #check (5, 9).2 -- (5, 9).snd : Nat #eval Nat.succ 2 -- 3 #eval Nat.add 5 2 -- 7 #eval (5, 9).1 -- 5 #eval (5, 9).2 -- 9 ``` Once again, you should try some examples on your own. Let's take a look at some basic syntax. You can enter the Unicode arrow {lit}`→` by typing {kbd}`\to` or {kbd}`\r` or {kbd}`\->`. You can also use the ASCII alternative {lit}`->`, so the expressions {lean}`Nat -> Nat` and {lean}`Nat → Nat` mean the same thing. Both expressions denote the type of functions that take a natural number as input and return a natural number as output. The Unicode symbol {lit}`×` for the Cartesian product is entered as {kbd}`\times`. You will generally use lower-case Greek letters like {lit}`α`, {lit}`β`, and {lit}`γ` to range over types. You can enter these particular ones with {kbd}`\a`, {kbd}`\b`, and {kbd}`\g`. ::::setup ``` variable (α β : Type) (f : α → β) (x : α) (m n : Nat) (p : Nat × Nat) ``` There are a few more things to notice here. First, the application of a function {lean}`f` to a value {lean}`x` is denoted {lean}`f x` (e.g., {lean}`Nat.succ 2`). Second, when writing type expressions, arrows associate to the _right_; for example, the type of {lean}`Nat.add` is {lean}`Nat → Nat → Nat` which is equivalent to {lean}`Nat → (Nat → Nat)`. Thus you can view {lean}`Nat.add` as a function that takes a natural number and returns another function that takes a natural number and returns a natural number. In type theory, this is generally more convenient than writing {lean}`Nat.add` as a function that takes a pair of natural numbers as input and returns a natural number as output. For example, it allows you to “partially apply” the function {lean}`Nat.add`. The example above shows that {lean}`Nat.add 3` has type {lean}`Nat → Nat`, that is, {lean}`Nat.add 3` returns a function that “waits” for a second argument, {lean}`n`, which is then equivalent to writing {lean}`Nat.add 3 n`. :::comment ``` <!-- Taking a function ``h`` of type ``Nat × Nat → Nat`` and “redefining” it to look like ``g`` is a process known as _currying_. --> ``` ::: You have seen that if you have {lean}`m : Nat` and {lean}`n : Nat`, then {lean}`(m, n)` denotes the ordered pair of {lean}`m` and {lean}`n` which is of type {lean}`Nat × Nat`. This gives you a way of creating pairs of natural numbers. Conversely, if you have {lean}`p : Nat × Nat`, then you can write {lean}`p.1 : Nat` and {lean}`p.2 : Nat`. This gives you a way of extracting its two components. :::: # Types as objects %%% tag := "types-as-objects" %%% One way in which Lean's dependent type theory extends simple type theory is that types themselves—entities like {lean}`Nat` and {lean}`Bool`—are first-class citizens, which is to say that they themselves are objects. For that to be the case, each of them also has to have a type. ```lean #check Nat #check Bool #check Nat → Bool #check Nat × Bool #check Nat → Nat #check Nat × Nat → Nat #check Nat → Nat → Nat #check Nat → (Nat → Nat) #check Nat → Nat → Bool #check (Nat → Nat) → Nat ``` You can see that each one of the expressions above is an object of type {lean}`Type`. You can also declare new constants for types: ```lean def α : Type := Nat def β : Type := Bool def F : Type → Type := List def G : Type → Type → Type := Prod #check α -- α : Type #check F α -- F α : Type #check F Nat -- F Nat : Type #check G α -- G α : Type → Type #check G α β -- G α β : Type #check G α Nat -- G α Nat : Type ``` As the example above suggests, you have already seen an example of a function of type {lean}`Type → Type → Type`, namely, the Cartesian product {lean}`Prod`: ```lean def α : Type := Nat def β : Type := Bool #check Prod α β -- α × β : Type #check α × β -- α × β : Type #check Prod Nat Nat -- Nat × Nat : Type #check Nat × Nat -- Nat × Nat : Type ``` :::leanFirst Here is another example: given any type {leanRef}`α`, the type {leanRef}`List α` denotes the type of lists of elements of type {leanRef}`α`. ```lean def α : Type := Nat #check List α -- List α : Type #check List Nat -- List Nat : Type ``` ::: Given that every expression in Lean has a type, it is natural to ask: what type does {lean}`Type` itself have? ```lean #check Type -- Type : Type 1 ``` You have actually come up against one of the most subtle aspects of Lean's typing system. Lean's underlying foundation has an infinite hierarchy of types: ```lean #check Type -- Type : Type 1 #check Type 1 -- Type 1 : Type 2 #check Type 2 -- Type 2 : Type 3 #check Type 3 -- Type 3 : Type 4 #check Type 4 -- Type 4 : Type 5 ``` :::setup ``` universe n variable (n : Nat) ``` Think of {lean}`Type 0` as a universe of “small” or “ordinary” types. {lean}`Type 1` is then a larger universe of types, which contains {lean}`Type 0` as an element, and {lean}`Type 2` is an even larger universe of types, which contains {lean}`Type 1` as an element. The list is infinite: there is a {lean}`Type n` for every natural number {lean}`n`. {lean}`Type` is an abbreviation for {lean}`Type 0`: ::: ```lean #check Type #check Type 0 ``` The following table may help concretize the relationships being discussed. Movement along the x-axis represents a change in the universe, while movement along the y-axis represents a change in what is sometimes referred to as “degree”. :::table * * sort * {lean}`Prop` ({lean}`Sort 0`) * {lean}`Type` ({lean}`Sort 1`) * {lean}`Type 1` ({lean}`Sort 2`) * {lean}`Type 2` ({lean}`Sort 3`) * ... * * type * {lean}`True` * {lean}`Bool` * {lean}`Nat -> Type` * {lean}`Type -> Type 1` * ... * * term * {lean}`True.intro` * {lean}`true` * {lean}`fun n => Fin n` * {lean}`fun (_ : Type) => Type` * ... ::: :::setup ``` universe u variable (α : Type u) ``` Some operations, however, need to be _polymorphic_ over type universes. For example, {lean}`List α` should make sense for any type {lean}`α`, no matter which type universe {lean}`α` lives in. This explains the type signature of the function {lean}`List`: ```lean #check List -- List.{u} (α : Type u) : Type u ``` Here {lit}`u` is a variable ranging over type levels. The output of the {kw}`#check` command means that whenever {lean}`α` has type {lean}`Type u`, {lean}`List α` also has type {lean}`Type u`. The function {lean}`Prod` is similarly polymorphic: ::: ```lean #check Prod -- Prod.{u, v} (α : Type u) (β : Type v) : Type (max u v) ``` To define polymorphic constants, Lean allows you to declare universe variables explicitly using the {kw}`universe` command: ```lean universe u def F (α : Type u) : Type u := Prod α α #check F -- F.{u} (α : Type u) : Type u ``` :::leanFirst You can avoid the {kw}`universe` command by providing the universe parameters when defining {leanRef}`F`: ```lean def F.{u} (α : Type u) : Type u := Prod α α #check F -- F.{u} (α : Type u) : Type u ``` ::: # Function Abstraction and Evaluation %%% tag := "function-abstraction-and-evaluation" %%% Lean provides a {kw}`fun` (or {kw}`λ`) keyword to create a function from an expression as follows: ```lean #check fun (x : Nat) => x + 5 -- fun x => x + 5 : Nat → Nat -- λ and fun mean the same thing #check λ (x : Nat) => x + 5 -- fun x => x + 5 : Nat → Nat ``` The type {lean}`Nat` can be inferred in this example: ```lean #check fun x => x + 5 -- fun x => x + 5 : Nat → Nat #check λ x => x + 5 -- fun x => x + 5 : Nat → Nat ``` You can evaluate a lambda function by passing the required parameters: ```lean #eval (λ x : Nat => x + 5) 10 -- 15 ``` :::setup ``` variable {x : α} {t : β} ``` Creating a function from another expression is a process known as _lambda abstraction_. Suppose you have the variable {lean}`x : α` and you can construct an expression {lean}`t : β`, then the expression {lean}`fun (x : α) => t`, or, equivalently, {lean}`λ (x : α) => t`, is an object of type {lean}`α → β`. Think of this as the function from {lean}`α` to {lean}`β` which maps any value {leanRef}`x` to the value {leanRef}`t`. ::: Here are some more examples ```lean #check fun x : Nat => fun y : Bool => if not y then x + 1 else x + 2 #check fun (x : Nat) (y : Bool) => if not y then x + 1 else x + 2 #check fun x y => if not y then x + 1 else x + 2 -- fun x y => if (!y) = true then x + 1 else x + 2 : Nat → Bool → Nat ``` Lean interprets the final three examples as the same expression; in the last expression, Lean infers the type of {leanRef}`x` and {leanRef}`y` from the expression {leanRef}`if not y then x + 1 else x + 2`. Some mathematically common examples of operations of functions can be described in terms of lambda abstraction: ```lean def f (n : Nat) : String := toString n def g (s : String) : Bool := s.length > 0 #check fun x : Nat => x -- fun x => x : Nat → Nat #check fun x : Nat => true -- fun x => true : Nat → Bool #check fun x : Nat => g (f x) -- fun x => g (f x) : Nat → Bool #check fun x => g (f x) -- fun x => g (f x) : Nat → Bool ``` Think about what these expressions mean. The expression {lean}`fun x : Nat => x` denotes the identity function on {lean}`Nat`, the expression {lean}`fun x : Nat => true` denotes the constant function that always returns {lean}`true`, and {leanRef}`fun x : Nat => g (f x)` denotes the composition of {leanRef}`f` and {leanRef}`g`. You can, in general, leave off the type annotation and let Lean infer it for you. So, for example, you can write {leanRef}`fun x => g (f x)` instead of {leanRef}`fun x : Nat => g (f x)`. :::leanFirst You can pass functions as parameters and by giving them names {leanRef}`f` and {leanRef}`g` you can then use those functions in the implementation: ```lean #check fun (g : String → Bool) (f : Nat → String) (x : Nat) => g (f x) ``` ::: You can also pass types as parameters: ```lean #check fun (α β γ : Type) (g : β → γ) (f : α → β) (x : α) => g (f x) ``` The last expression, for example, denotes the function that takes three types, {leanRef}`α`, {leanRef}`β`, and {leanRef}`γ`, and two functions, {leanRef}`g : β → γ` and {leanRef}`f : α → β`, and returns the composition of {leanRef}`g` and {leanRef}`f`. (Making sense of the type of this function requires an understanding of _dependent products_, which will be explained below.) :::setup ``` variable (α : Type) (t : β) -- Avoid warnings axiom whatever : α def b : γ := whatever ``` The general form of a lambda expression is {lean}`fun (x : α) => t`, where the variable {leanRef}`x` is a “bound variable”: it is really a placeholder, whose “scope” does not extend beyond the expression {leanRef}`t`. For example, the variable {lit}`b` in the expression {lean}`fun (b : β) (x : α) => b` has nothing to do with the constant {lean}`b` declared earlier. In fact, the expression denotes the same function as {lean}`fun (u : β) (z : α) => u`. Formally, expressions that are the same up to a renaming of bound variables are called _alpha equivalent_, and are considered “the same.” Lean recognizes this equivalence. ::: :::setup ``` variable (t : α → β) (s : α) ``` Notice that applying a term {lean}`t : α → β` to a term {lean}`s : α` yields an expression {lean}`t s : β`. Returning to the previous example and renaming bound variables for clarity, notice the types of the following expressions: ::: ```lean #check (fun x : Nat => x) 1 -- (fun x => x) 1 : Nat #check (fun x : Nat => true) 1 -- (fun x => true) 1 : Bool def f (n : Nat) : String := toString n def g (s : String) : Bool := s.length > 0 #check (fun (α β γ : Type) (u : β → γ) (v : α → β) (x : α) => u (v x)) Nat String Bool g f 0 ``` As expected, the expression {lean}`(fun x : Nat => x) 1` has type {lean}`Nat`. In fact, more should be true: applying the expression {lean}`(fun x : Nat => x)` to {lean}`1` should “return” the value {lean}`1`. And, indeed, it does: ```lean #eval (fun x : Nat => x) 1 -- 1 #eval (fun x : Nat => true) 1 -- true ``` You will see later how these terms are evaluated. For now, notice that this is an important feature of dependent type theory: every term has a computational behavior, and supports a notion of _normalization_. In principle, two terms that reduce to the same value are called _definitionally equal_. They are considered “the same” by Lean's type checker, and Lean does its best to recognize and support these identifications. Lean is a complete programming language. It has a compiler that generates a binary executable and an interactive interpreter. You can use the command {kw}`#eval` to execute expressions, and it is the preferred way of testing your functions. :::comment ``` <!-- Note that `#eval` and `#reduce` are _not_ equivalent. The command `#eval` first compiles Lean expressions into an intermediate representation (IR) and then uses an interpreter to execute the generated IR. Some builtin types (e.g., `Nat`, `String`, `Array`) have a more efficient representation in the IR. The IR has support for using foreign functions that are opaque to Lean. In contrast, the ``#reduce`` command relies on a reduction engine similar to the one used in Lean's trusted kernel, the part of Lean that is responsible for checking and verifying the correctness of expressions and proofs. It is less efficient than ``#eval``, and treats all foreign functions as opaque constants. You will learn later that there are some other differences between the two commands. --> ``` ::: # Definitions %%% tag := "definitions" %%% Recall that the {kw}`def` keyword provides one important way of declaring new named objects. ```lean def double (x : Nat) : Nat := x + x ``` This might look more familiar to you if you know how functions work in other programming languages. The name {leanRef}`double` is defined as a function that takes an input parameter {leanRef}`x` of type {lean}`Nat`, where the result of the call is {leanRef}`x + x`, so it is returning type {lean}`Nat`. You can then invoke this function using: ```lean def double (x : Nat) : Nat := x + x ----- #eval double 3 -- 6 ``` In this case you can think of {kw}`def` as a kind of named {kw}`fun`. The following yields the same result: ```lean def double : Nat → Nat := fun x => x + x #eval double 3 -- 6 ``` You can omit the type declarations when Lean has enough information to infer it. Type inference is an important part of Lean: ```lean def double := fun (x : Nat) => x + x ``` The general form of a definition is {lit}`def foo : α := bar` where {lit}`α` is the type returned from the expression {lit}`bar`. Lean can usually infer the type {lit}`α`, but it is often a good idea to write it explicitly. This clarifies your intention, and Lean will flag an error if the right-hand side of the definition does not have a matching type. The right hand side {lit}`bar` can be any expression, not just a lambda. So {kw}`def` can also be used to simply name a value like this: ```lean def pi := 3.141592654 ``` {kw}`def` can take multiple input parameters. Let's create one that adds two natural numbers: ```lean def add (x y : Nat) := x + y #eval add 3 2 -- 5 ``` The parameter list can be separated like this: ```lean def double (x : Nat) : Nat := x + x ----- def add (x : Nat) (y : Nat) := x + y #eval add (double 3) (7 + 9) -- 22 ``` Notice here we called the {leanRef}`double` function to create the first parameter to {leanRef}`add`. You can use other more interesting expressions inside a {kw}`def`: ```lean def greater (x y : Nat) := if x > y then x else y ``` You can probably guess what this one will do. You can also define a function that takes another function as input. The following calls a given function twice passing the output of the first invocation to the second: ```lean def double (x : Nat) : Nat := x + x ----- def doTwice (f : Nat → Nat) (x : Nat) : Nat := f (f x) #eval doTwice double 2 -- 8 ``` Now to get a bit more abstract, you can also specify arguments that are like type parameters: ```lean def compose (α β γ : Type) (g : β → γ) (f : α → β) (x : α) : γ := g (f x) ``` This means {leanRef}`compose` is a function that takes any two functions as input arguments, so long as those functions each take only one input. The type algebra {leanRef}`β → γ` and {leanRef}`α → β` means it is a requirement that the type of the output of the second function must match the type of the input to the first function—which makes sense, otherwise the two functions would not be composable. {leanRef}`compose` also takes a 3rd argument of type {leanRef}`α` which it uses to invoke the second function (locally named {leanRef}`f`) and it passes the result of that function (which is type {leanRef}`β`) as input to the first function (locally named {leanRef}`g`). The first function returns a type {leanRef}`γ` so that is also the return type of the {leanRef}`compose` function. {leanRef}`compose` is also very general in that it works over any type {leanRef}`α β γ`. This means {leanRef}`compose` can compose just about any 2 functions so long as they each take one parameter, and so long as the type of output of the second matches the input of the first. For example: ```lean def compose (α β γ : Type) (g : β → γ) (f : α → β) (x : α) : γ := g (f x) def double (x : Nat) : Nat := x + x ----- def square (x : Nat) : Nat := x * x #eval compose Nat Nat Nat double square 3 -- 18 ``` # Local Definitions %%% tag := "local-definitions" %%% :::setup ``` variable (t1 : α) (t2 : β) ``` Lean also allows you to introduce “local” definitions using the {kw}`let` keyword. The expression {lean}`let a := t1; t2` is definitionally equal to the result of replacing every occurrence of {leanRef}`a` in {leanRef}`t2` by {leanRef}`t1`. ::: ```lean #check let y := 2 + 2; y * y -- let y := 2 + 2; y * y : Nat #eval let y := 2 + 2; y * y -- 16 def twice_double (x : Nat) : Nat := let y := x + x; y * y #eval twice_double 2 -- 16 ``` :::setup ``` def twice_double (x : Nat) : Nat := let y := x + x; y * y variable (x : Nat) ``` Here, {lean}`twice_double x` is definitionally equal to the term {lean}`(x + x) * (x + x)`. ::: You can combine multiple assignments by chaining {kw}`let` statements: ```lean #check let y := 2 + 2; let z := y + y; z * z #eval let y := 2 + 2; let z := y + y; z * z -- 64 ``` The {lit}`;` can be omitted when a line break is used. ```lean def t (x : Nat) : Nat := let y := x + x y * y ``` ::::leanFirst :::setup ``` variable (t1 : α) (t2 : β) ``` Notice that the meaning of the expression {lean}`let a := t1; t2` is very similar to the meaning of {lean}`(fun a => t2) t1`, but the two are not the same. In the first expression, you should think of every instance of {leanRef (in:="let a := t1; t2")}`a` in {leanRef (in:="let a := t1; t2")}`t2` as a syntactic abbreviation for {leanRef (in:="let a := t1; t2")}`t1`. In the second expression, {leanRef (in:="(fun a => t2) t1")}`a` is a variable, and the expression {leanRef (in:="(fun a => t2) t1")}`fun a => t2` has to make sense independently of the value of {leanRef (in:="(fun a => t2) t1")}`a`. The {kw}`let` construct is a stronger means of abbreviation, and there are expressions of the form {lean}`let a := t1; t2` that cannot be expressed as {lean}`(fun a => t2) t1`. As an exercise, try to understand why the definition of {leanRef}`foo` below type checks, but the definition of {lit}`bar` does not. ::: ```lean def foo := let a := Nat; fun x : a => x + 2 /- def bar := (fun a => fun x : a => x + 2) Nat -/ ``` :::: # Variables and Sections %%% tag := "variables-and-sections" %%% Consider the following three function definitions: ```lean def compose (α β γ : Type) (g : β → γ) (f : α → β) (x : α) : γ := g (f x) def doTwice (α : Type) (h : α → α) (x : α) : α := h (h x) def doThrice (α : Type) (h : α → α) (x : α) : α := h (h (h x)) ``` Lean provides you with the {kw}`variable` command to make such declarations look more compact: ```lean variable (α β γ : Type) def compose (g : β → γ) (f : α → β) (x : α) : γ := g (f x) def doTwice (h : α → α) (x : α) : α := h (h x) def doThrice (h : α → α) (x : α) : α := h (h (h x)) ``` You can declare variables of any type, not just {lean}`Type` itself: ```lean variable (α β γ : Type) variable (g : β → γ) (f : α → β) (h : α → α) variable (x : α) def compose := g (f x) def doTwice := h (h x) def doThrice := h (h (h x)) #print compose #print doTwice #print doThrice ``` Printing them out shows that all three groups of definitions have exactly the same effect. The {kw}`variable` command instructs Lean to insert the declared variables as bound variables in definitions that refer to them by name. Lean is smart enough to figure out which variables are used explicitly or implicitly in a definition. You can therefore proceed as though {leanRef}`α`, {leanRef}`β`, {leanRef}`γ`, {leanRef}`g`, {leanRef}`f`, {leanRef}`h`, and {leanRef}`x` are fixed objects when you write your definitions, and let Lean abstract the definitions for you automatically. When declared in this way, a variable stays in scope until the end of the file you are working on. Sometimes, however, it is useful to limit the scope of a variable. For that purpose, Lean provides the notion of a {kw}`section`: ```lean section useful variable (α β γ : Type) variable (g : β → γ) (f : α → β) (h : α → α) variable (x : α) def compose := g (f x) def doTwice := h (h x) def doThrice := h (h (h x)) end useful ``` When the section is closed, the variables go out of scope, and cannot be referenced any more. You do not have to indent the lines within a section. Nor do you have to name a section, which is to say, you can use an anonymous {kw}`section` / {kw}`end` pair. If you do name a section, however, you have to close it using the same name. Sections can also be nested, which allows you to declare new variables incrementally. # Namespaces %%% tag := "namespaces" %%% Lean provides you with the ability to group definitions into nested, hierarchical _namespaces_: ```lean namespace Foo def a : Nat := 5 def f (x : Nat) : Nat := x + 7 def fa : Nat := f a def ffa : Nat := f (f a) #check a #check f #check fa #check ffa #check Foo.fa end Foo -- #check a -- error -- #check f -- error #check Foo.a #check Foo.f #check Foo.fa #check Foo.ffa open Foo #check a #check f #check fa #check Foo.fa ``` When you declare that you are working in the namespace {leanRef}`Foo`, every identifier you declare has a full name with prefix “{lit}`Foo.`”. Within the namespace, you can refer to identifiers by their shorter names, but once you end the namespace, you have to use the longer names. Unlike {kw}`section`, namespaces require a name. There is only one anonymous namespace at the root level. The {leanRef}`open` command brings the shorter names into the current context. Often, when you import a module, you will want to open one or more of the namespaces it contains, to have access to the short identifiers. But sometimes you will want to leave this information protected by a fully qualified name, for example, when they conflict with identifiers in another namespace you want to use. Thus namespaces give you a way to manage names in your working environment. For example, Lean groups definitions and theorems involving lists into a namespace {lit}`List`. ```lean #check List.nil #check List.cons #check List.map ``` :::leanFirst The command {leanRef}`open List` allows you to use the shorter names: ```lean open List #check nil #check cons #check map ``` ::: Like sections, namespaces can be nested: ```lean namespace Foo def a : Nat := 5 def f (x : Nat) : Nat := x + 7 def fa : Nat := f a namespace Bar def ffa : Nat := f (f a) #check fa #check ffa end Bar #check fa #check Bar.ffa end Foo #check Foo.fa #check Foo.Bar.ffa open Foo #check fa #check Bar.ffa ``` Namespaces that have been closed can later be reopened, even in another file: ```lean namespace Foo def a : Nat := 5 def f (x : Nat) : Nat := x + 7 def fa : Nat := f a end Foo #check Foo.a #check Foo.f namespace Foo def ffa : Nat := f (f a) end Foo ``` Like sections, nested namespaces have to be closed in the order they are opened. Namespaces and sections serve different purposes: namespaces organize data and sections declare variables for insertion in definitions. Sections are also useful for delimiting the scope of commands such as {kw}`set_option` and {kw}`open`. In many respects, however, a {kw}`namespace`{lit}` ... `{kw}`end` block behaves the same as a {kw}`section`{lit}` ... `{kw}`end` block. In particular, if you use the {kw}`variable` command within a namespace, its scope is limited to the namespace. Similarly, if you use an {kw}`open` command within a namespace, its effects disappear when the namespace is closed. # What makes dependent type theory dependent? %%% tag := "what-makes-dependent-type-theory-dependent" %%% :::setup ``` variable (α : Type) (n : Nat) ``` The short explanation is that types can depend on parameters. You have already seen a nice example of this: the type {lean}`List α` depends on the argument {lean}`α`, and this dependence is what distinguishes {lean}`List Nat` and {lean}`List Bool`. For another example, consider the type {lean}`Vector α n`, the type of vectors of elements of {lean}`α` of length {lean}`n`. This type depends on _two_ parameters: the type of the elements in the vector ({lean}`α : Type`) and the length of the vector {lean}`n : Nat`. ::: ::::setup ``` def cons (α : Type) (a : α) (as : List α) : List α := List.cons a as variable (α : Type) (a : α) (as : List α) ``` :::leanFirst Suppose you wish to write a function {leanRef}`cons` which inserts a new element at the head of a list. What type should {leanRef}`cons` have? Such a function is _polymorphic_: you expect the {leanRef}`cons` function for {lean}`Nat`, {lean}`Bool`, or an arbitrary type {leanRef}`α` to behave the same way. So it makes sense to take the type to be the first argument to {leanRef}`cons`, so that for any type, {lean}`α`, {lean}`cons α` is the insertion function for lists of type {lean}`α`. In other words, for every {lean}`α`, {lean}`cons α` is the function that takes an element {lean}`a : α` and a list {lean}`as : List α`, and returns a new list, so you have {lean}`cons α a as : List α`. It is clear that {lean}`cons α` should have type {lean}`α → List α → List α`. But what type should {leanRef}`cons` have? A first guess might be {lean}`Type → α → List α → List α`, but, on reflection, this does not make sense: the {leanRef}`α` in this expression does not refer to anything, whereas it should refer to the argument of type {lean}`Type`. In other words, _assuming_ {lean}`α : Type` is the first argument to the function, the type of the next two elements are {lean}`α` and {lean}`List α`. These types vary depending on the first argument, {leanRef}`α`. ```lean def cons (α : Type) (a : α) (as : List α) : List α := List.cons a as #check cons Nat -- cons Nat : Nat → List Nat → List Nat #check cons Bool -- cons Bool : Bool → List Bool → List Bool #check cons -- cons (α : Type) (a : α) (as : List α) : List α ``` ::: :::: :::setup ``` variable (α : Type) (β : α → Type) (a : α) (f : (a : α) → β a) ``` This is an instance of a _dependent function type_, or *dependent arrow type*. Given {lean}`α : Type` and {lean}`β : α → Type`, think of {lean}`β` as a family of types over {lean}`α`, that is, a type {lean}`β a` for each {lean}`a : α`. In that case, the type {lean}`(a : α) → β a` denotes the type of functions {lean}`f` with the property that, for each {lean}`a : α`, {lean}`f a` is an element of {lean}`β a`. In other words, the type of the value returned by {lean}`f` depends on its input. ::: :::setup ``` variable (α : Type) (β : Type) (a : α) (f : (a : α) → β a) ``` Notice that {lean}`(a : α) → β` makes sense for any expression {lean}`β : Type`. When the value of {lean}`β` depends on {leanRef}`a` (as does, for example, the expression {leanRef}`β a` in the previous paragraph), {leanRef}`(a : α) → β` denotes a dependent function type. When {lean}`β` doesn't depend on {leanRef}`a`, {leanRef}`(a : α) → β` is no different from the type {lean}`α → β`. Indeed, in dependent type theory (and in Lean), {lean}`α → β` is just notation for {lean}`(a : α) → β` when {lean}`β` does not depend on {leanRef (in := "a : α")}`a`. ::: Returning to the example of lists, you can use the command {kw}`#check` to inspect the type of the following {lean}`List` functions. The {lit}`@` symbol and the difference between the round and curly braces will be explained momentarily. ```lean #check @List.cons -- @List.cons : {α : Type u_1} → α → List α → List α #check @List.nil -- @List.nil : {α : Type u_1} → List α #check @List.length -- @List.length : {α : Type u_1} → List α → Nat #check @List.append -- @List.append : {α : Type u_1} → List α → List α → List α ``` :::setup ``` variable (α : Type) (β : α → Type) (a : α) (b : β a) ``` Just as dependent function types {lean}`(a : α) → β a` generalize the notion of a function type {leanRef}`α → β` by allowing {leanRef (in := "α → β")}`β` to depend on {lean}`a`, dependent Cartesian product types {lean}`(a : α) × β a` generalize the Cartesian product {lit}`α × β` in the same way. Dependent products are also called _sigma_ types, and you can also write them as {lean}`Σ a : α, β a`. You can use {lean (type := "(a : α) × β a")}`⟨a, b⟩` or {lean}`Sigma.mk a b` to create a dependent pair. The {lit}`⟨` and {lit}`⟩` characters may be typed with {kbd}`\langle` and {kbd}`\rangle` or {kbd}`\<` and {kbd}`\>`, respectively. ::: ```lean universe u v def f (α : Type u) (β : α → Type v) (a : α) (b : β a) : (a : α) × β a := ⟨a, b⟩ def g (α : Type u) (β : α → Type v) (a : α) (b : β a) : Σ a : α, β a := Sigma.mk a b def h1 (x : Nat) : Nat := (f Type (fun α => α) Nat x).2 #eval h1 5 -- 5 def h2 (x : Nat) : Nat := (g Type (fun α => α) Nat x).2 #eval h2 5 -- 5 ``` The functions {leanRef}`f` and {leanRef}`g` above denote the same function. # Implicit Arguments %%% tag := "implicit-arguments" %%% Suppose we have an implementation of lists as: ```lean universe u def Lst (α : Type u) : Type u := List α def Lst.cons (α : Type u) (a : α) (as : Lst α) : Lst α := List.cons a as def Lst.nil (α : Type u) : Lst α := List.nil def Lst.append (α : Type u) (as bs : Lst α) : Lst α := List.append as bs ----- #check Lst -- Lst.{u} (α : Type u) : Type u #check Lst.cons -- Lst.cons.{u} (α : Type u) (a : α) (as : Lst α) : Lst α #check Lst.nil -- Lst.nil.{u} (α : Type u) : Lst α #check Lst.append -- Lst.append.{u} (α : Type u) (as bs : Lst α) : Lst α ``` Then, you can construct lists of {lean}`Nat` as follows: ```lean universe u def Lst (α : Type u) : Type u := List α def Lst.cons (α : Type u) (a : α) (as : Lst α) : Lst α := List.cons a as def Lst.nil (α : Type u) : Lst α := List.nil def Lst.append (α : Type u) (as bs : Lst α) : Lst α := List.append as bs ----- #check Lst.cons Nat 0 (Lst.nil Nat) def as : Lst Nat := Lst.nil Nat def bs : Lst Nat := Lst.cons Nat 5 (Lst.nil Nat) #check Lst.append Nat as bs ``` :::setup ``` def Lst (α : Type u) : Type u := List α variable (α : Type) ``` Because the constructors are polymorphic over types, we have to insert the type {lean}`Nat` as an argument repeatedly. But this information is redundant: one can infer the argument {leanRef}`α` in {leanRef}`Lst.cons Nat 5 (Lst.nil Nat)` from the fact that the second argument, {leanRef}`5`, has type {lean}`Nat`. One can similarly infer the argument in {leanRef}`Lst.nil Nat`, not from anything else in that expression, but from the fact that it is sent as an argument to the function {leanRef}`Lst.cons`, which expects an element of type {lean}`Lst α` in that position. ::: This is a central feature of dependent type theory: terms carry a lot of information, and often some of that information can be inferred from the context. In Lean, one uses an underscore, {lit}`_`, to specify that the system should fill in the information automatically. This is known as an “implicit argument.” ```lean universe u def Lst (α : Type u) : Type u := List α def Lst.cons (α : Type u) (a : α) (as : Lst α) : Lst α := List.cons a as def Lst.nil (α : Type u) : Lst α := List.nil def Lst.append (α : Type u) (as bs : Lst α) : Lst α := List.append as bs #check Lst #check Lst.cons #check Lst.nil #check Lst.append ----- #check Lst.cons _ 0 (Lst.nil _) def as : Lst Nat := Lst.nil _ def bs : Lst Nat := Lst.cons _ 5 (Lst.nil _) #check Lst.append _ as bs -- Lst.append Nat as bs : Lst Nat ``` It is still tedious, however, to type all these underscores. When a function takes an argument that can generally be inferred from context, Lean allows you to specify that this argument should, by default, be left implicit. This is done by putting the arguments in curly braces, as follows: ```lean universe u def Lst (α : Type u) : Type u := List α def Lst.cons {α : Type u} (a : α) (as : Lst α) : Lst α := List.cons a as def Lst.nil {α : Type u} : Lst α := List.nil def Lst.append {α : Type u} (as bs : Lst α) : Lst α := List.append as bs #check Lst.cons 0 Lst.nil def as : Lst Nat := Lst.nil def bs : Lst Nat := Lst.cons 5 Lst.nil #check Lst.append as bs ``` All that has changed are the braces around {leanRef}`α : Type u` in the declaration of the variables. We can also use this device in function definitions: ```lean universe u def ident {α : Type u} (x : α) := x ``` Checking the type of {leanRef}`ident` requires wrapping it in parentheses to avoid having its signature shown: ```lean universe u def ident {α : Type u} (x : α) := x --------- #check (ident) -- ident : ?m.22 → ?m.22 #check ident 1 -- ident 1 : Nat #check ident "hello" -- ident "hello" : String #check @ident -- @ident : {α : Type u_1} → α → α ``` The makes the first argument to {leanRef}`ident` implicit. Notationally, this hides the specification of the type, making it look as though {leanRef}`ident` simply takes an argument of any type. In fact, the function {lean}`id` is defined in the standard library in exactly this way. We have chosen a nontraditional name here only to avoid a clash of names. Variables can also be specified as implicit when they are declared with the {kw}`variable` command: ```lean universe u section variable {α : Type u} variable (x : α) def ident := x end #check ident #check ident 4 #check ident "hello" ``` This definition of {leanRef}`ident` here has the same effect as the one above. Lean has very complex mechanisms for instantiating implicit arguments, and we will see that they can be used to infer function types, predicates, and even proofs. The process of instantiating these “holes,” or “placeholders,” in a term is often known as _elaboration_. The presence of implicit arguments means that at times there may be insufficient information to fix the meaning of an expression precisely. An expression like {lean}`id` or {lean}`List.nil` is said to be _polymorphic_, because it can take on different meanings in different contexts. :::setup ``` variable (T : Type) (e : T) ``` One can always specify the type {lean}`T` of an expression {lean}`e` by writing {lean}`(e : T)`. This instructs Lean's elaborator to use the value {lean}`T` as the type of {lean}`e` when trying to resolve implicit arguments. In the second pair of examples below, this mechanism is used to specify the desired types of the expressions {lean}`id` and {lean}`List.nil`: ::: ```lean #check (List.nil) -- [] : List ?m.2 #check (id) -- id : ?m.1 → ?m.1 #check (List.nil : List Nat) -- [] : List Nat #check (id : Nat → Nat) -- id : Nat → Nat ``` Numerals are overloaded in Lean, but when the type of a numeral cannot be inferred, Lean assumes, by default, that it is a natural number. So the expressions in the first two {kw}`#check` commands below are elaborated in the same way, whereas the third {kw}`#check` command interprets {lean (type := "Int")}`2` as an integer. ```lean #check 2 -- 2 : Nat #check (2 : Nat) -- 2 : Nat #check (2 : Int) -- 2 : Int ``` :::setup ``` variable (foo : {α : Type} → α → β) ``` Sometimes, however, we may find ourselves in a situation where we have declared an argument to a function to be implicit, but now want to provide the argument explicitly. If {lean}`foo` is such a function, the notation {lean}`@foo` denotes the same function with all the arguments made explicit. ::: ```lean #check @id -- @id : {α : Sort u_1} → α → α #check @id Nat -- id : Nat → Nat #check @id Bool -- id : Bool → Bool #check @id Nat 1 -- id 1 : Nat #check @id Bool true -- id true : Bool ``` Notice that now the first {kw}`#check` command gives the type of the identifier, {leanRef}`id`, without inserting any placeholders. Moreover, the output indicates that the first argument is implicit.
theorem_proving_in_lean4/book/TPiL/AxiomsComputation.lean
import VersoManual import TPiL.Examples open Verso.Genre Manual open TPiL #doc (Manual) "Axioms and Computation" => %%% tag := "axioms-and-computation" %%% We have seen that the version of the Calculus of Constructions that has been implemented in Lean includes dependent function types, inductive types, and a hierarchy of universes that starts with an {tech}[impredicative], {tech (key := "proof irrelevance")}[proof-irrelevant] {lean}`Prop` at the bottom. In this chapter, we consider ways of extending the CIC with additional axioms and rules. Extending a foundational system in such a way is often convenient; it can make it possible to prove more theorems, as well as make it easier to prove theorems that could have been proved otherwise. But there can be negative consequences of adding additional axioms, consequences which may go beyond concerns about their correctness. In particular, the use of axioms bears on the computational content of definitions and theorems, in ways we will explore here. Lean is designed to support both computational and classical reasoning. Users that are so inclined can stick to a “computationally pure” fragment, which guarantees that closed expressions in the system evaluate to canonical normal forms. In particular, any closed computationally pure expression of type {lean}`Nat`, for example, will reduce to a numeral. Lean's standard library defines an additional axiom, propositional extensionality, and a quotient construction which in turn implies the principle of function extensionality. These extensions are used, for example, to develop theories of sets and finite sets. We will see below that using these theorems can block evaluation in Lean's kernel, so that closed terms of type {lean}`Nat` no longer evaluate to numerals. But Lean erases types and propositional information when compiling definitions to executable code, and since these axioms only add new propositions, they are compatible with that computational interpretation. Even computationally inclined users may wish to use the classical law of the excluded middle to reason about computation. This also blocks evaluation in the kernel, but it is compatible with compiled code. The standard library also defines a choice principle that is entirely antithetical to a computational interpretation, since it magically produces “data” from a proposition asserting its existence. Its use is essential to some classical constructions, and users can import it when needed. But expressions that use this construction to produce data do not have computational content, and in Lean we are required to mark such definitions as {kw}`noncomputable` to flag that fact. Using a clever trick (known as Diaconescu's theorem), one can use propositional extensionality, function extensionality, and choice to derive the law of the excluded middle. As noted above, however, use of the law of the excluded middle is still compatible with compilation, as are other classical principles, as long as they are not used to manufacture data. To summarize, then, on top of the underlying framework of universes, dependent function types, and inductive types, the standard library adds three additional components: - the axiom of propositional extensionality - a quotient construction, which implies function extensionality - a choice principle, which produces data from an existential proposition. The first two of these block normalization within Lean, but are compatible with code generation, whereas the third is not amenable to computational interpretation. We will spell out the details more precisely below. # Historical and Philosophical Context %%% tag := "historical-and-philosophical-context" %%% :::setup ``` variable (x : α) (y : β) ``` For most of its history, mathematics was essentially computational: geometry dealt with constructions of geometric objects, algebra was concerned with algorithmic solutions to systems of equations, and analysis provided means to compute the future behavior of systems evolving over time. From the proof of a theorem to the effect that “for every {lean}`x`, there is a {lean}`y` such that ...”, it was generally straightforward to extract an algorithm to compute such a {lean}`y` given {lean}`x`. ::: In the nineteenth century, however, increases in the complexity of mathematical arguments pushed mathematicians to develop new styles of reasoning that suppress algorithmic information and invoke descriptions of mathematical objects that abstract away the details of how those objects are represented. The goal was to obtain a powerful “conceptual” understanding without getting bogged down in computational details, but this had the effect of admitting mathematical theorems that are simply _false_ on a direct computational reading. There is still fairly uniform agreement today that computation is important to mathematics. But there are different views as to how best to address computational concerns. From a _constructive_ point of view, it is a mistake to separate mathematics from its computational roots; every meaningful mathematical theorem should have a direct computational interpretation. From a _classical_ point of view, it is more fruitful to maintain a separation of concerns: we can use one language and body of methods to write computer programs, while maintaining the freedom to use nonconstructive theories and methods to reason about them. Lean is designed to support both of these approaches. Core parts of the library are developed constructively, but the system also provides support for carrying out classical mathematical reasoning. :::setup ``` open Nat notation "… " e "…" => e ``` Computationally, the purest part of dependent type theory avoids the use of {lean}`Prop` entirely. Inductive types and dependent function types can be viewed as data types, and terms of these types can be “evaluated” by applying reduction rules until no more rules can be applied. In principle, any closed term (that is, term with no free variables) of type {lean}`Nat` should evaluate to a numeral, {lean}`succ (… (succ zero)…)`. ::: :::setup ``` variable (p : Prop) (s t : α) (prf : p) notation x " = " y " : " α => @Eq α x y ``` Introducing a proof-irrelevant {lean}`Prop` and marking theorems irreducible represents a first step towards separation of concerns. The intention is that elements of a type {lean}`p : Prop` should play no role in computation, and so the particular construction of a term {lean}`prf : p` is “irrelevant” in that sense. One can still define computational objects that incorporate elements of type {lean}`Prop`; the point is that these elements can help us reason about the effects of the computation, but can be ignored when we extract “code” from the term. Elements of type {lean}`Prop` are not entirely innocuous, however. They include equations {lean}`s = t : α` for any type {lean}`α`, and such equations can be used as casts, to type check terms. Below, we will see examples of how such casts can block computation in the system. However, computation is still possible under an evaluation scheme that erases propositional content, ignores intermediate typing constraints, and reduces terms until they reach a normal form. This is precisely what Lean's virtual machine does. Having adopted a proof-irrelevant {lean}`Prop`, one might consider it legitimate to use, for example, the law of the excluded middle, {lean}`p ∨ ¬p`, where {lean}`p` is any proposition. Of course, this, too, can block computation according to the rules of CIC, but it does not prevent the generation of executable code, as described above. It is only the choice principles discussed in {ref "choice"}[the section on choice] that completely erase the distinction between the proof-irrelevant and data-relevant parts of the theory. ::: # Propositional Extensionality %%% tag := "propositional-extensionality" %%% Propositional extensionality is the following axiom: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden ------ axiom propext {a b : Prop} : (a ↔ b) → a = b ------ end Hidden ``` :::setup ``` variable (a : Prop) ``` It asserts that when two propositions imply one another, they are actually equal. This is consistent with set-theoretic interpretations in which any element {lean}`a : Prop` is either empty or the singleton set $`\{\ast\}`, for some distinguished element $`\ast`. The axiom has the effect that equivalent propositions can be substituted for one another in any context: ::: ```lean variable (a b c d e : Prop) theorem thm₁ (h : a ↔ b) : (c ∧ a ∧ d → e) ↔ (c ∧ b ∧ d → e) := propext h ▸ Iff.refl _ theorem thm₂ (p : Prop → Prop) (h : a ↔ b) (h₁ : p a) : p b := propext h ▸ h₁ ``` :::comment ``` <!-- The first example could be proved more laboriously without `propext` using the fact that the propositional connectives respect propositional equivalence. The second example represents a more essential use of `propext`. In fact, it is equivalent to `propext` itself, a fact which we encourage you to prove. Given any definition or theorem in Lean, you can use the ``#print axioms`` command to display the axioms it depends on. .. code-block:: lean variables a b c d e : Prop variable p : Prop → Prop theorem thm₁ (h : a ↔ b) : (c ∧ a ∧ d → e) ↔ (c ∧ b ∧ d → e) := propext h ▸ iff.refl _ theorem thm₂ (h : a ↔ b) (h₁ : p a) : p b := propext h ▸ h₁ -- BEGIN #print axioms thm₁ -- propext #print axioms thm₂ -- propext -- END --> ``` ::: # Function Extensionality %%% tag := "function-extensionality" %%% :::leanFirst Similar to propositional extensionality, function extensionality asserts that any two functions of type {leanRef}`(x : α) → β x` that agree on all their inputs are equal: ```signature funext.{u, v} {α : Sort u} {β : α → Sort v} {f g : (x : α) → β x} (h : ∀ (x : α), f x = g x) : f = g ``` ::: From a classical, set-theoretic perspective, this is exactly what it means for two functions to be equal. This is known as an “extensional” view of functions. From a constructive perspective, however, it is sometimes more natural to think of functions as algorithms, or computer programs, that are presented in some explicit way. It is certainly the case that two computer programs can compute the same answer for every input despite the fact that they are syntactically quite different. In much the same way, you might want to maintain a view of functions that does not force you to identify two functions that have the same input / output behavior. This is known as an “intensional” view of functions. In fact, function extensionality follows from the existence of quotients, which we describe in the next section. In the Lean standard library, therefore, {leanRef}`funext` is thus [proved from the quotient construction](https://github.com/leanprover/lean4/blob/master/src/Init/Core.lean). :::leanFirst Suppose that for {leanRef}`α : Type u` we define the {leanRef}`Set `{leanRef (in := "(α : Type u)")}`α`{leanRef}` := α → Prop` to denote the type of subsets of {leanRef (in := "(α : Type u)")}`α`, essentially identifying subsets with predicates. By combining {leanRef}`funext` and {leanRef}`propext`, we obtain an extensional theory of such sets: ```lean def Set (α : Type u) := α → Prop namespace Set def mem (x : α) (a : Set α) := a x infix:50 (priority := high) "∈" => mem theorem setext {a b : Set α} (h : ∀ x, x ∈ a ↔ x ∈ b) : a = b := funext (fun x => propext (h x)) end Set ``` ::: We can then proceed to define the empty set and set intersection, for example, and prove set identities: ```lean def Set (α : Type u) := α → Prop namespace Set def mem (x : α) (a : Set α) := a x infix:50 (priority := high) "∈" => mem theorem setext {a b : Set α} (h : ∀ x, x ∈ a ↔ x ∈ b) : a = b := funext (fun x => propext (h x)) ------ def empty : Set α := fun _ => False notation (priority := high) "∅" => empty def inter (a b : Set α) : Set α := fun x => x ∈ a ∧ x ∈ b infix:70 " ∩ " => inter theorem inter_self (a : Set α) : a ∩ a = a := setext fun x => Iff.intro (fun ⟨h, _⟩ => h) (fun h => ⟨h, h⟩) theorem inter_empty (a : Set α) : a ∩ ∅ = ∅ := setext fun _ => Iff.intro (fun ⟨_, h⟩ => h) (fun h => False.elim h) theorem empty_inter (a : Set α) : ∅ ∩ a = ∅ := setext fun _ => Iff.intro (fun ⟨h, _⟩ => h) (fun h => False.elim h) theorem inter.comm (a b : Set α) : a ∩ b = b ∩ a := setext fun _ => Iff.intro (fun ⟨h₁, h₂⟩ => ⟨h₂, h₁⟩) (fun ⟨h₁, h₂⟩ => ⟨h₂, h₁⟩) ----- end Set ``` The following is an example of how function extensionality blocks computation inside the Lean kernel: ```lean def f (x : Nat) := x def g (x : Nat) := 0 + x theorem f_eq_g : f = g := funext fun x => (Nat.zero_add x).symm def val : Nat := Eq.recOn (motive := fun _ _ => Nat) f_eq_g 0 -- does not reduce to 0 #reduce val -- evaluates to 0 #eval val ``` First, we show that the two functions {leanRef}`f` and {leanRef}`g` are equal using function extensionality, and then we cast {leanRef}`0` of type {lean}`Nat` by replacing {leanRef}`f` by {leanRef}`g` in the type. Of course, the cast is vacuous, because {lean}`Nat` does not depend on {leanRef}`f`. But that is enough to do the damage: under the computational rules of the system, we now have a closed term of {lean}`Nat` that does not reduce to a numeral. In this case, we may be tempted to reduce the expression to {lean}`0`. But in nontrivial examples, eliminating cast changes the type of the term, which might make an ambient expression type incorrect. The virtual machine, however, has no trouble evaluating the expression to {lean}`0`. Here is a similarly contrived example that shows how {lean}`propext` can get in the way: ```lean theorem tteq : (True ∧ True) = True := propext (Iff.intro (fun ⟨h, _⟩ => h) (fun h => ⟨h, h⟩)) def val : Nat := Eq.recOn (motive := fun _ _ => Nat) tteq 0 -- does not reduce to 0 #reduce val -- evaluates to 0 #eval val ``` Current research programs, including work on _observational type theory_ and _cubical type theory_, aim to extend type theory in ways that permit reductions for casts involving function extensionality, quotients, and more. But the solutions are not so clear-cut, and the rules of Lean's underlying calculus do not sanction such reductions. In a sense, however, a cast does not change the meaning of an expression. Rather, it is a mechanism to reason about the expression's type. Given an appropriate semantics, it then makes sense to reduce terms in ways that preserve their meaning, ignoring the intermediate bookkeeping needed to make the reductions type-correct. In that case, adding new axioms in {lean}`Prop` does not matter; by {tech}[proof irrelevance], an expression in {lean}`Prop` carries no information, and can be safely ignored by the reduction procedures. # Quotients %%% tag := "quotients" %%% :::setup ``` variable (α : Sort u) (r : α → α → Prop) (f : α → β) (x y : α) (f' : Quot r → β) notation α " / " r:max => Quot (α := α) r notation "⟦" x "⟧" => Quot.mk _ x ``` Let {lean}`α` be any type, and let {lean}`r` be an equivalence relation on {lean}`α`. It is mathematically common to form the “quotient” {lean}`α / r`, that is, the type of elements of {lean}`α` “modulo” {lean}`r`. Set theoretically, one can view {lean}`α / r` as the set of equivalence classes of {lean}`α` modulo {lean}`r`. If {lean}`f : α → β` is any function that respects the equivalence relation in the sense that for every {lean}`x y : α`, {lean}`r x y` implies {lean}`f x = f y`, then {lean}`f` “lifts” to a function {lean}`f' : α / r → β` defined on each equivalence class {lean (type := "Quot r")}`⟦x⟧` by {lean}`f' ⟦x⟧ = f x`. Lean's standard library extends the Calculus of Constructions with additional constants that perform exactly these constructions, and installs this last equation as a definitional reduction rule. In its most basic form, the quotient construction does not even require {lean}`r` to be an equivalence relation. The following constants are built into Lean: ::: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden ------ universe u v axiom Quot : {α : Sort u} → (α → α → Prop) → Sort u axiom Quot.mk : {α : Sort u} → (r : α → α → Prop) → α → Quot r axiom Quot.ind : ∀ {α : Sort u} {r : α → α → Prop} {β : Quot r → Prop}, (∀ a, β (Quot.mk r a)) → (q : Quot r) → β q axiom Quot.lift : {α : Sort u} → {r : α → α → Prop} → {β : Sort u} → (f : α → β) → (∀ a b, r a b → f a = f b) → Quot r → β ------ end Hidden ``` :::setup ``` variable (α : Type u) (r : α → α → Prop) (a : α) (f : α → β) (h : ∀ a b, r a b → f a = f b) ``` The first one forms a type {lean}`Quot r` given a type {lean}`α` by any binary relation {lean}`r` on {lean}`α`. The second maps {lean}`α` to {lit}`Quot α`, so that if {lean}`r : α → α → Prop` and {lit}`a : α`, then {lean}`Quot.mk r a` is an element of {lean}`Quot r`. The third principle, {lean}`Quot.ind`, says that every element of {lean}`Quot.mk r a` is of this form. As for {lean}`Quot.lift`, given a function {lean}`f : α → β`, if {lean}`h` is a proof that {lean}`f` respects the relation {lean}`r`, then {lean}`Quot.lift f h` is the corresponding function on {lean}`Quot r`. The idea is that for each element {lean}`a` in {lean}`α`, the function {lean}`Quot.lift f h` maps {lean}`Quot.mk r a` (the {lean}`r`-class containing {lean}`a`) to {lean}`f a`, wherein {lean}`h` shows that this function is well defined. In fact, the computation principle is declared as a reduction rule, as the proof below makes clear. ```lean def mod7Rel (x y : Nat) : Prop := x % 7 = y % 7 -- the quotient type #check (Quot mod7Rel : Type) -- the class of numbers equivalent to 4 #check (Quot.mk mod7Rel 4 : Quot mod7Rel) def f (x : Nat) : Bool := x % 7 = 0 theorem f_respects (a b : Nat) (h : mod7Rel a b) : f a = f b := by simp [mod7Rel, f] at * rw [h] #check (Quot.lift f f_respects : Quot mod7Rel → Bool) -- the computation principle example (a : Nat) : Quot.lift f f_respects (Quot.mk mod7Rel a) = f a := rfl ``` The four constants, {lean}`Quot`, {lean}`Quot.mk`, {lean}`Quot.ind`, and {lean}`Quot.lift` in and of themselves are not very strong. You can check that the {lean}`Quot.ind` is satisfied if we take {lean}`Quot r` to be simply {lean}`α`, and take {lean}`Quot.lift` to be the identity function (ignoring {lean}`h`). For that reason, these four constants are not viewed as additional axioms. ::: :::comment ``` <!-- variables α β : Type variable r : α → α → Prop variable a : α variable f : α → β variable h : ∀ a₁ a₂, r a₁ a₂ → f a₁ = f a₂ theorem thm : quot.lift f h (quot.mk r a) = f a := rfl -- BEGIN #print axioms thm -- no axioms -- END --> ``` ::: They are, like inductively defined types and the associated constructors and recursors, viewed as part of the logical framework. What makes the {lean}`Quot` construction into a bona fide quotient is the following additional axiom: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden universe u v ------ axiom Quot.sound : ∀ {α : Type u} {r : α → α → Prop} {a b : α}, r a b → Quot.mk r a = Quot.mk r b ``` This is the axiom that asserts that any two elements of {leanRef}`α` that are related by {leanRef}`r` become identified in the quotient. If a theorem or definition makes use of {leanRef}`Quot.sound`, it will show up in the {kw}`#print axioms` command. :::setup ``` variable (α : Type u) (r : α → α → Prop) (r' r'': α → α → Prop) (a b : α) ``` Of course, the quotient construction is most commonly used in situations when {lean}`r` is an equivalence relation. Given {lean}`r` as above, if we define {lean}`r'` according to the rule {lean}`r' a b` iff {lean}`Quot.mk r a = Quot.mk r b`, then it's clear that {lean}`r'` is an equivalence relation. Indeed, {lean}`r'` is the _kernel_ of the function {lean}`fun a => Quot.mk r a`. The axiom {lean}`Quot.sound` says that {lean}`r a b` implies {lean}`r' a b`. Using {lean}`Quot.lift` and {lean}`Quot.ind`, we can show that {lean}`r'` is the smallest equivalence relation containing {lean}`r`, in the sense that if {lean}`r''` is any equivalence relation containing {lean}`r`, then {lean}`r' a b` implies {lean}`r'' a b`. In particular, if {lean}`r` was an equivalence relation to start with, then for all {lean}`a` and {lean}`b` we have {lean}`r a b` iff {lean}`r' a b`. ::: To support this common use case, the standard library defines the notion of a _setoid_, which is simply a type with an associated equivalence relation: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden ------ class Setoid (α : Sort u) where r : α → α → Prop iseqv : Equivalence r instance {α : Sort u} [Setoid α] : HasEquiv α := ⟨Setoid.r⟩ namespace Setoid variable {α : Sort u} [Setoid α] theorem refl (a : α) : a ≈ a := iseqv.refl a theorem symm {a b : α} (hab : a ≈ b) : b ≈ a := iseqv.symm hab theorem trans {a b c : α} (hab : a ≈ b) (hbc : b ≈ c) : a ≈ c := iseqv.trans hab hbc end Setoid ------ end Hidden ``` Given a type {leanRef (in := "Setoid (α")}`α`, a relation {leanRef (in := "Equivalence r")}`r` on {leanRef (in := "Setoid (α")}`α`, and a proof {leanRef}`iseqv` that {leanRef (in := "Equivalence r")}`r` is an equivalence relation, we can define an instance of the {leanRef (in := "class Setoid")}`Setoid` class. ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden ------ def Quotient {α : Sort u} (s : Setoid α) := @Quot α Setoid.r ------ end Hidden ``` :::setup ``` variable (α : Type u) [Setoid α] (a b : α) ``` The constants {lean}`Quotient.mk`, {lean}`Quotient.ind`, {lean}`Quotient.lift`, and {lean}`Quotient.sound` are nothing more than the specializations of the corresponding elements of {lean}`Quot`. The fact that type class inference can find the setoid associated to a type {lean}`α` brings a number of benefits. First, we can use the notation {lean}`a ≈ b` (entered with {kbd}`\approx`) for {lean}`Setoid.r a b`, where the instance of {lean}`Setoid` is implicit in the notation {lean}`Setoid.r`. We can use the generic theorems {lean}`Setoid.refl`, {lean}`Setoid.symm`, {lean}`Setoid.trans` to reason about the relation. Specifically with quotients we can use the theorem {lean}`Quotient.exact`: ```signature Quotient.exact {α : Sort u} {s : Setoid α} {a b : α} : Quotient.mk s a = Quotient.mk s b → a ≈ b ``` Together with {lean}`Quotient.sound`, this implies that the elements of the quotient correspond exactly to the equivalence classes of elements in {lean}`α`. ::: :::setup ``` variable (α : Type u) (β : Type v) ``` Recall that in the standard library, {lean}`α × β` represents the Cartesian product of the types {lean}`α` and {lean}`β`. To illustrate the use of quotients, let us define the type of _unordered_ pairs of elements of a type {lean}`α` as a quotient of the type {lean}`α × α`. First, we define the relevant equivalence relation: ::: ```lean private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv ``` The next step is to prove that {leanRef}`eqv` is in fact an equivalence relation, which is to say, it is reflexive, symmetric and transitive. We can prove these three facts in a convenient and readable way by using dependent pattern matching to perform case-analysis and break the hypotheses into pieces that are then reassembled to produce the conclusion. ```lean private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv ------ private theorem eqv.refl (p : α × α) : p ~ p := Or.inl ⟨rfl, rfl⟩ private theorem eqv.symm : ∀ {p₁ p₂ : α × α}, p₁ ~ p₂ → p₂ ~ p₁ | (a₁, a₂), (b₁, b₂), (Or.inl ⟨a₁b₁, a₂b₂⟩) => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (Or.inr ⟨a₁b₂, a₂b₁⟩) => Or.inr (by simp_all) private theorem eqv.trans : ∀ {p₁ p₂ p₃ : α × α}, p₁ ~ p₂ → p₂ ~ p₃ → p₁ ~ p₃ | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inl (by simp_all) private theorem is_equivalence : Equivalence (@eqv α) := { refl := eqv.refl, symm := eqv.symm, trans := eqv.trans } ``` :::leanFirst Now that we have proved that {leanRef}`eqv` is an equivalence relation, we can construct a {leanRef}`Setoid (α × α)`, and use it to define the type {leanRef}`UProd α` of unordered pairs. ```lean private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv private theorem eqv.refl (p : α × α) : p ~ p := Or.inl ⟨rfl, rfl⟩ private theorem eqv.symm : ∀ {p₁ p₂ : α × α}, p₁ ~ p₂ → p₂ ~ p₁ | (a₁, a₂), (b₁, b₂), (Or.inl ⟨a₁b₁, a₂b₂⟩) => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (Or.inr ⟨a₁b₂, a₂b₁⟩) => Or.inr (by simp_all) private theorem eqv.trans : ∀ {p₁ p₂ p₃ : α × α}, p₁ ~ p₂ → p₂ ~ p₃ → p₁ ~ p₃ | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inl (by simp_all) private theorem is_equivalence : Equivalence (@eqv α) := { refl := eqv.refl, symm := eqv.symm, trans := eqv.trans } ------ instance uprodSetoid (α : Type u) : Setoid (α × α) where r := eqv iseqv := is_equivalence def UProd (α : Type u) : Type u := Quotient (uprodSetoid α) namespace UProd def mk {α : Type} (a₁ a₂ : α) : UProd α := Quotient.mk' (a₁, a₂) notation "{ " a₁ ", " a₂ " }" => mk a₁ a₂ end UProd ``` ::: :::setup ``` private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv private theorem eqv.refl (p : α × α) : p ~ p := Or.inl ⟨rfl, rfl⟩ private theorem eqv.symm : ∀ {p₁ p₂ : α × α}, p₁ ~ p₂ → p₂ ~ p₁ | (a₁, a₂), (b₁, b₂), (Or.inl ⟨a₁b₁, a₂b₂⟩) => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (Or.inr ⟨a₁b₂, a₂b₁⟩) => Or.inr (by simp_all) private theorem eqv.trans : ∀ {p₁ p₂ p₃ : α × α}, p₁ ~ p₂ → p₂ ~ p₃ → p₁ ~ p₃ | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inl (by simp_all) private theorem is_equivalence : Equivalence (@eqv α) := { refl := eqv.refl, symm := eqv.symm, trans := eqv.trans } instance uprodSetoid (α : Type u) : Setoid (α × α) where r := eqv iseqv := is_equivalence def UProd (α : Type u) : Type u := Quotient (uprodSetoid α) namespace UProd def mk {α : Type} (a₁ a₂ : α) : UProd α := Quotient.mk' (a₁, a₂) notation "{ " a₁ ", " a₂ " }" => mk a₁ a₂ end UProd variable (a₁ a₂ : α) ``` Notice that we locally define the notation {lean}`{a₁, a₂}` for unordered pairs as {lean}`Quotient.mk' (a₁, a₂)`. This is useful for illustrative purposes, but it is not a good idea in general, since the notation will shadow other uses of curly brackets, such as for records and sets. We can easily prove that {lean}`{a₁, a₂} = {a₂, a₁}` using {lean}`Quot.sound`, since we have {lean}`(a₁, a₂) ~ (a₂, a₁)`. ::: ```lean private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv private theorem eqv.refl (p : α × α) : p ~ p := Or.inl ⟨rfl, rfl⟩ private theorem eqv.symm : ∀ {p₁ p₂ : α × α}, p₁ ~ p₂ → p₂ ~ p₁ | (a₁, a₂), (b₁, b₂), (Or.inl ⟨a₁b₁, a₂b₂⟩) => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (Or.inr ⟨a₁b₂, a₂b₁⟩) => Or.inr (by simp_all) private theorem eqv.trans : ∀ {p₁ p₂ p₃ : α × α}, p₁ ~ p₂ → p₂ ~ p₃ → p₁ ~ p₃ | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inl (by simp_all) private theorem is_equivalence : Equivalence (@eqv α) := { refl := eqv.refl, symm := eqv.symm, trans := eqv.trans } instance uprodSetoid (α : Type u) : Setoid (α × α) where r := eqv iseqv := is_equivalence def UProd (α : Type u) : Type u := Quotient (uprodSetoid α) namespace UProd def mk {α : Type} (a₁ a₂ : α) : UProd α := Quotient.mk' (a₁, a₂) notation "{ " a₁ ", " a₂ " }" => mk a₁ a₂ ------ theorem mk_eq_mk (a₁ a₂ : α) : {a₁, a₂} = {a₂, a₁} := Quot.sound (Or.inr ⟨rfl, rfl⟩) ------ end UProd ``` :::leanFirst To complete the example, given {leanRef}`a : α` and {leanRef}`u : UProd α`, we define the proposition {leanRef (in := "mem (a : α) (u : UProd α)")}`a`{lit}` ∈ `{leanRef (in := "mem (a : α) (u : UProd α)")}`u` which should hold if {leanRef (in := "mem (a : α) (u : UProd α)")}`a` is one of the elements of the unordered pair {leanRef (in := "mem (a : α) (u : UProd α)")}`u`. First, we define a similar proposition {leanRef}`mem_fn`{leanRef (in := "mem (a : α) (u : UProd α)")}` a`{leanRef (in := "mem (a : α) (u : UProd α)")}` u` on (ordered) pairs; then we show that {leanRef}`mem_fn` respects the equivalence relation {leanRef}`eqv` with the lemma {leanRef}`mem_respects`. This is an idiom that is used extensively in the Lean standard library. ```lean set_option linter.unusedVariables false private def eqv (p₁ p₂ : α × α) : Prop := (p₁.1 = p₂.1 ∧ p₁.2 = p₂.2) ∨ (p₁.1 = p₂.2 ∧ p₁.2 = p₂.1) infix:50 " ~ " => eqv private theorem eqv.refl (p : α × α) : p ~ p := Or.inl ⟨rfl, rfl⟩ private theorem eqv.symm : ∀ {p₁ p₂ : α × α}, p₁ ~ p₂ → p₂ ~ p₁ | (a₁, a₂), (b₁, b₂), (Or.inl ⟨a₁b₁, a₂b₂⟩) => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (Or.inr ⟨a₁b₂, a₂b₁⟩) => Or.inr (by simp_all) private theorem eqv.trans : ∀ {p₁ p₂ p₃ : α × α}, p₁ ~ p₂ → p₂ ~ p₃ → p₁ ~ p₃ | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inl (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inl ⟨a₁b₁, a₂b₂⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inl ⟨b₁c₁, b₂c₂⟩ => Or.inr (by simp_all) | (a₁, a₂), (b₁, b₂), (c₁, c₂), Or.inr ⟨a₁b₂, a₂b₁⟩, Or.inr ⟨b₁c₂, b₂c₁⟩ => Or.inl (by simp_all) private theorem is_equivalence : Equivalence (@eqv α) := { refl := eqv.refl, symm := eqv.symm, trans := eqv.trans } instance uprodSetoid (α : Type u) : Setoid (α × α) where r := eqv iseqv := is_equivalence def UProd (α : Type u) : Type u := Quotient (uprodSetoid α) namespace UProd def mk {α : Type} (a₁ a₂ : α) : UProd α := Quotient.mk' (a₁, a₂) notation "{ " a₁ ", " a₂ " }" => mk a₁ a₂ theorem mk_eq_mk (a₁ a₂ : α) : {a₁, a₂} = {a₂, a₁} := Quot.sound (Or.inr ⟨rfl, rfl⟩) ------ private def mem_fn (a : α) : α × α → Prop | (a₁, a₂) => a = a₁ ∨ a = a₂ -- auxiliary lemma for proving mem_respects private theorem mem_swap {a : α} : ∀ {p : α × α}, mem_fn a p = mem_fn a (⟨p.2, p.1⟩) | (a₁, a₂) => by apply propext apply Iff.intro . intro | Or.inl h => exact Or.inr h | Or.inr h => exact Or.inl h . intro | Or.inl h => exact Or.inr h | Or.inr h => exact Or.inl h private theorem mem_respects : {p₁ p₂ : α × α} → (a : α) → p₁ ~ p₂ → mem_fn a p₁ = mem_fn a p₂ | (a₁, a₂), (b₁, b₂), a, Or.inl ⟨a₁b₁, a₂b₂⟩ => by simp_all | (a₁, a₂), (b₁, b₂), a, Or.inr ⟨a₁b₂, a₂b₁⟩ => by simp_all only apply mem_swap def mem (a : α) (u : UProd α) : Prop := Quot.liftOn u (fun p => mem_fn a p) (fun p₁ p₂ e => mem_respects a e) infix:50 (priority := high) " ∈ " => mem theorem mem_mk_left (a b : α) : a ∈ {a, b} := Or.inl rfl theorem mem_mk_right (a b : α) : b ∈ {a, b} := Or.inr rfl theorem mem_or_mem_of_mem_mk {a b c : α} : c ∈ {a, b} → c = a ∨ c = b := fun h => h --------- end UProd ``` ::: For convenience, the standard library also defines {lean}`Quotient.lift₂` for lifting binary functions, and {lit}`Quotient.ind₂` for induction on two variables. :::setup ``` variable (α : Sort u) (β : α → Sort v) (f₁ f₂ f : (x : α) → β x) (a : α) def extfun (α : Sort u) (β : α → Sort v) := Quot (fun (f g : (x : α) → β x) => ∀ x, f x = g x) def extfun_app {α β} : extfun α β → (x : α) → β x := fun f x => Quot.lift (· x) (by intros; simp [*]) f ``` We close this section with some hints as to why the quotient construction implies function extensionality. It is not hard to show that extensional equality on the {lean}`(x : α) → β x` is an equivalence relation, and so we can consider the type {lean}`extfun α β` of functions “up to equivalence.” Of course, application respects that equivalence in the sense that if {lean}`f₁` is equivalent to {lean}`f₂`, then {lean}`f₁ a` is equal to {lean}`f₂ a`. Thus application gives rise to a function {lean}`extfun_app : extfun α β → (x : α) → β x`. But for every {lean}`f`, {lean}`extfun_app (.mk _ f)` is definitionally equal to {lean}`fun x => f x`, which is in turn definitionally equal to {lean}`f`. So, when {lean}`f₁` and {lean}`f₂` are extensionally equal, we have the following chain of equalities: ```lean variable {α : Sort u} {β : α → Sort v} def extfun (α : Sort u) (β : α → Sort v) := Quot (fun (f g : (x : α) → β x) => ∀ x, f x = g x) def extfun_app {α β} (f : extfun α β) (x : α) : β x := Quot.lift (· x) (by intros; simp [*]) f ---------- example (f₁ f₂ : (x : α) → β x) (h : ∀ x, f₁ x = f₂ x) := calc f₁ _ = extfun_app (.mk _ f₁) := rfl _ = extfun_app (.mk _ f₂) := by rw [Quot.sound]; trivial _ = f₂ := rfl ``` As a result, {leanRef}`f₁` is equal to {leanRef}`f₂`. ::: # Choice %%% tag := "choice" %%% :::leanFirst To state the final axiom defined in the standard library, we need the {leanRef}`Nonempty` type, which is defined as follows: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden ------ class inductive Nonempty (α : Sort u) : Prop where | intro (val : α) : Nonempty α ------ end Hidden ``` ::: :::setup ``` variable {α : Sort u} ``` Because {lean}`Nonempty α` has type {lean}`Prop` and its constructor contains data, it can only eliminate to {lean}`Prop`. In fact, {lean}`Nonempty α` is equivalent to {lean}`∃ x : α, True`: ::: ```lean example (α : Type u) : Nonempty α ↔ ∃ x : α, True := Iff.intro (fun ⟨a⟩ => ⟨a, trivial⟩) (fun ⟨a, h⟩ => ⟨a⟩) ``` Our axiom of choice is now expressed simply as follows: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden universe u ------ axiom choice {α : Sort u} : Nonempty α → α ------ end Hidden ``` :::setup ``` variable {α : Sort u} {h : Nonempty α} open Classical ``` Given only the assertion {lean}`h` that {lean}`α` is nonempty, {lean}`choice h` magically produces an element of {lean}`α`. Of course, this blocks any meaningful computation: by the interpretation of {lean}`Prop`, {lean}`h` contains no information at all as to how to find such an element. ::: This is found in the {lit}`Classical` namespace, so the full name of the theorem is {lean}`Classical.choice`. The choice principle is equivalent to the principle of *indefinite description*, which can be expressed with subtypes as follows: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) namespace Hidden universe u axiom choice {α : Sort u} : Nonempty α → α ------ noncomputable def indefiniteDescription {α : Sort u} (p : α → Prop) (h : ∃ x, p x) : {x // p x} := choice <| let ⟨x, px⟩ := h; ⟨⟨x, px⟩⟩ ------ end Hidden ``` :::setup ``` variable {α : Sort u} {h : Nonempty α} open Classical ``` Because it depends on {lean}`choice`, Lean cannot generate executable code for {lean}`indefiniteDescription`, and so requires us to mark the definition as {kw}`noncomputable`. Also in the {lit}`Classical` namespace, the function {lean}`choose` and the property {lean}`choose_spec` decompose the two parts of the output of {lean}`indefiniteDescription`: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) open Classical namespace Hidden ------ variable {α : Sort u} {p : α → Prop} noncomputable def choose (h : ∃ x, p x) : α := (indefiniteDescription p h).val theorem choose_spec (h : ∃ x, p x) : p (choose h) := (indefiniteDescription p h).property ------ end Hidden ``` The {lean}`choice` principle also erases the distinction between the property of being {lean}`Nonempty` and the more constructive property of being {lean}`Inhabited`: ```lean open Classical ------ noncomputable def inhabited_of_nonempty (h : Nonempty α) : Inhabited α := choice (let ⟨a⟩ := h; ⟨⟨a⟩⟩) ``` ::: In the next section, we will see that {lean}`propext`, {lean}`funext`, and {leanRef}`choice`, taken together, imply the law of the excluded middle and the decidability of all propositions. Using those, one can strengthen the principle of indefinite description as follows: ::::setup ``` open Classical ``` ```signature strongIndefiniteDescription {α : Sort u} (p : α → Prop) (h : Nonempty α) : {x // (∃ (y : α), p y) → p x} ``` Assuming the ambient type {leanRef}`α` is nonempty, {leanRef}`strongIndefiniteDescription`{lit}` `{leanRef}`p` produces an element of {leanRef}`α` satisfying {leanRef}`p` if there is one. The data component of this definition is conventionally known as *Hilbert's epsilon function*: ```signature epsilon {α : Sort u} [h : Nonempty α] (p : α → Prop) : α ``` ```signature epsilon_spec {α : Sort u} {p : α → Prop} (hex : ∃ (y : α), p y) : p (@epsilon _ (nonempty_of_exists hex) p) ``` :::: # The Law of the Excluded Middle %%% tag := "the-law-of-the-excluded-middle" %%% The law of the excluded middle is the following: ```signature Classical.em : ∀ (p : Prop), p ∨ ¬p ``` [Diaconescu's theorem](https://en.wikipedia.org/wiki/Diaconescu%27s_theorem) states that the axiom of choice is sufficient to derive the law of excluded middle. More precisely, it shows that the law of the excluded middle follows from {lean}`Classical.choice`, {lean}`propext`, and {lean}`funext`. We sketch the proof that is found in the standard library. ```save emProof -- ANCHOR: emSetup open Classical theorem em (p : Prop) : p ∨ ¬p := by let U (x : Prop) : Prop := x = True ∨ p let V (x : Prop) : Prop := x = False ∨ p have exU : ∃ x, U x := ⟨True, Or.inl rfl⟩ have exV : ∃ x, V x := ⟨False, Or.inl rfl⟩ -- ^ PROOF_STATE: em1 -- ANCHOR_END: emSetup -- ANCHOR: emChoose let u : Prop := choose exU let v : Prop := choose exV have u_def : U u := choose_spec exU have v_def : V v := choose_spec exV -- ^ PROOF_STATE: em2 -- ANCHOR_END: emChoose -- ANCHOR: emCases have not_uv_or_p : u ≠ v ∨ p := by match u_def, v_def with | Or.inr h, _ => exact Or.inr h | _, Or.inr h => exact Or.inr h | Or.inl hut, Or.inl hvf => apply Or.inl simp [hvf, hut, true_ne_false] -- ANCHOR_END: emCases -- ANCHOR: emNext have p_implies_uv : p → u = v := fun hp => have hpred : U = V := funext fun x => have hl : (x = True ∨ p) → (x = False ∨ p) := fun _ => Or.inr hp have hr : (x = False ∨ p) → (x = True ∨ p) := fun _ => Or.inr hp show (x = True ∨ p) = (x = False ∨ p) from propext (Iff.intro hl hr) have h₀ : ∀ exU exV, @choose _ U exU = @choose _ V exV := by rw [hpred]; intros; rfl show u = v from h₀ _ _ -- ANCHOR_END: emNext -- ANCHOR: emDone match not_uv_or_p with | Or.inl hne => exact Or.inr (mt p_implies_uv hne) | Or.inr h => exact Or.inl h -- ANCHOR_END: emDone ``` :::leanFirst First, we import the necessary axioms, and define two predicates {leanRef}`U` and {leanRef}`V`: ```savedAnchor emSetup open Classical theorem em (p : Prop) : p ∨ ¬p := by let U (x : Prop) : Prop := x = True ∨ p let V (x : Prop) : Prop := x = False ∨ p have exU : ∃ x, U x := ⟨True, Or.inl rfl⟩ have exV : ∃ x, V x := ⟨False, Or.inl rfl⟩ ``` ::: If {leanRef}`p` is true, then every element of {lean}`Prop` is in both {leanRef}`U` and {leanRef}`V`. If {leanRef}`p` is false, then {leanRef}`U` is the singleton {leanRef}`True`, and {leanRef}`V` is the singleton {leanRef}`False`. :::leanFirst Next, we use {leanRef}`choose` to choose an element from each of {leanRef}`U` and {leanRef}`V`: ```savedAnchor emChoose let u : Prop := choose exU let v : Prop := choose exV have u_def : U u := choose_spec exU have v_def : V v := choose_spec exV ``` ::: :::leanFirst Each of {leanRef}`U` and {leanRef}`V` is a disjunction, so {leanRef}`u_def` and {leanRef}`v_def` represent four cases. In one of these cases, {leanRef}`u = True` and {leanRef}`v = False`, and in all the other cases, {leanRef}`p` is true. Thus we have: ```savedAnchor emCases have not_uv_or_p : u ≠ v ∨ p := by match u_def, v_def with | Or.inr h, _ => exact Or.inr h | _, Or.inr h => exact Or.inr h | Or.inl hut, Or.inl hvf => apply Or.inl simp [hvf, hut, true_ne_false] ``` ::: On the other hand, if {leanRef}`p` is true, then, by function extensionality and propositional extensionality, {leanRef}`U` and {leanRef}`V` are equal. By the definition of {leanRef}`u` and {leanRef}`v`, this implies that they are equal as well. ```savedAnchor emNext have p_implies_uv : p → u = v := fun hp => have hpred : U = V := funext fun x => have hl : (x = True ∨ p) → (x = False ∨ p) := fun _ => Or.inr hp have hr : (x = False ∨ p) → (x = True ∨ p) := fun _ => Or.inr hp show (x = True ∨ p) = (x = False ∨ p) from propext (Iff.intro hl hr) have h₀ : ∀ exU exV, @choose _ U exU = @choose _ V exV := by rw [hpred]; intros; rfl show u = v from h₀ _ _ ``` Putting these last two facts together yields the desired conclusion: ```savedAnchor emDone match not_uv_or_p with | Or.inl hne => exact Or.inr (mt p_implies_uv hne) | Or.inr h => exact Or.inl h ``` Consequences of excluded middle include double-negation elimination, proof by cases, and proof by contradiction, all of which are described in the section on {ref "classical-logic"}[classical logic]. The law of the excluded middle and propositional extensionality imply propositional completeness: ```lean (suppressNamespaces := "Hidden") (allowVisible := false) open Classical theorem propComplete (a : Prop) : a = True ∨ a = False := match em a with | Or.inl ha => Or.inl (propext (Iff.intro (fun _ => True.intro) (fun _ => ha))) | Or.inr hn => Or.inr (propext (Iff.intro (fun h => hn h) (fun h => False.elim h))) ``` Together with choice, we also get the stronger principle that every proposition is decidable. Recall that the class of {lean}`Decidable` propositions is defined as follows: ```lean namespace Hidden ------ class inductive Decidable (p : Prop) where | isFalse (h : ¬p) : Decidable p | isTrue (h : p) : Decidable p ------ end Hidden ``` ::::setup ``` variable {p : Prop} {f : α → β} {c : Prop} [Decidable c] {t e : α} open Classical (choose propDecidable) ``` :::leanFirst In contrast to {lean}`p ∨ ¬ p`, which can only eliminate to {lean}`Prop`, the type {lean}`Decidable p` is equivalent to the sum type {lit}`Sum p (¬ p)`, which can eliminate to any type. It is this data that is needed to write an if-then-else expression. As an example of classical reasoning, we use {lean}`choose` to show that if {lean}`f : α → β` is injective and {lean}`α` is inhabited, then {lean}`f` has a left inverse. To define the left inverse {leanRef}`linv`, we use a dependent if-then-else expression. Recall that {lean}`if h : c then t else e` is notation for {lean}`dite c (fun h : c => t) (fun h : ¬ c => e)`. In the definition of {leanRef}`linv`, choice is used twice: first, to show that {leanRef}`(∃ a : α, f a = b)` is “decidable,” and then to choose an {leanRef}`a` such that {leanRef}`f a = b`. Notice that {lean}`propDecidable` is a scoped instance and is activated by the {leanRef}`open Classical` command. We use this instance to justify the {kw}`if`-{kw}`then`-{kw}`else` expression. (See also the discussion in {ref "decidable-propositions"}[Decidable Propositions]). ```lean open Classical noncomputable def linv [Inhabited α] (f : α → β) : β → α := fun b : β => if ex : (∃ a : α, f a = b) then choose ex else default theorem linv_comp_self {f : α → β} [Inhabited α] (inj : ∀ {a b}, f a = f b → a = b) : linv f ∘ f = id := funext fun a => have ex : ∃ a₁ : α, f a₁ = f a := ⟨a, rfl⟩ have feq : f (choose ex) = f a := choose_spec ex calc linv f (f a) _ = choose ex := rfl _ = a := inj feq ``` From a classical point of view, {leanRef}`linv` is a function. From a constructive point of view, it is unacceptable; because there is no way to implement such a function in general, the construction is not informative. ::: ::::
theorem_proving_in_lean4/book/TPiL/StructuresAndRecords.lean
import VersoManual import TPiL.Examples open Verso.Genre Manual open TPiL #doc (Manual) "Structures and Records" => %%% tag := "structures-and-records" %%% We have seen that Lean's foundational system includes inductive types. We have, moreover, noted that it is a remarkable fact that it is possible to construct a substantial edifice of mathematics based on nothing more than the type universes, dependent arrow types, and inductive types; everything else follows from those. The Lean standard library contains many instances of inductive types (e.g., {lean}`Nat`, {lean}`Prod`, {lean}`List`), and even the logical connectives are defined using inductive types. Recall that a non-recursive inductive type that contains only one constructor is called a _structure_ or _record_. The product type is a structure, as is the dependent product (Sigma) type. In general, whenever we define a structure {lit}`S`, we usually define _projection_ functions that allow us to “destruct” each instance of {lit}`S` and retrieve the values that are stored in its fields. The functions {lean}`Prod.fst` and {lean}`Prod.snd`, which return the first and second elements of a pair, are examples of such projections. When writing programs or formalizing mathematics, it is not uncommon to define structures containing many fields. The {kw}`structure` command, available in Lean, provides infrastructure to support this process. When we define a structure using this command, Lean automatically generates all the projection functions. The {kw}`structure` command also allows us to define new structures based on previously defined ones. Moreover, Lean provides convenient notation for defining instances of a given structure. # Declaring Structures %%% tag := "declaring-structures" %%% The structure command is essentially a “front end” for defining inductive data types. Every {kw}`structure` declaration introduces a namespace with the same name. The general form is as follows: ``` structure <name> <parameters> <parent-structures> where <constructor> :: <fields> ``` Most parts are optional. Here is an example: ```lean structure Point (α : Type u) where mk :: x : α y : α ``` :::setup ``` structure Point (α : Type u) where mk :: x : α y : α variable (p : Point α) (a b : α) ``` Values of type {leanRef}`Point` are created using {lean}`Point.mk a b`, and the fields of a point {lean}`p` are accessed using {lean}`Point.x p` and {lean}`Point.y p` (but {lean}`p.x` and {lean}`p.y` also work, see below). The structure command also generates useful recursors and theorems. Here are some of the constructions generated for the declaration above. ::: ```lean structure Point (α : Type u) where mk :: x : α y : α ------ -- a Type #check Point -- the eliminator #check @Point.rec -- the constructor #check @Point.mk -- @Point.mk : {α : Type u_1} → α → α → Point α -- a projection #check @Point.x -- @Point.x : {α : Type u_1} → Point α → α -- a projection #check @Point.y -- @Point.y : {α : Type u_1} → Point α → α ``` If the constructor name is not provided, then a constructor is named {lit}`mk` by default. :::leanFirst Here are some simple theorems and expressions that use the generated constructions. As usual, you can avoid the prefix {leanRef}`Point` by using the command {leanRef}`open Point`. ```lean structure Point (α : Type u) where x : α y : α ------ #eval Point.x (Point.mk 10 20) -- 10 #eval Point.y (Point.mk 10 20) -- 20 open Point example (a b : α) : x (mk a b) = a := rfl example (a b : α) : y (mk a b) = b := rfl ``` ::: :::setup ``` structure Point (α : Type u) where x : α y : α variable (p : Point Nat) ``` Given {lean}`p : Point Nat`, the dot notation {lean}`p.x` is shorthand for {lean}`Point.x p`. This provides a convenient way of accessing the fields of a structure. ::: ```lean structure Point (α : Type u) where x : α y : α ------ def p := Point.mk 10 20 #check p.x -- p.x : Nat #eval p.x -- 10 #eval p.y -- 20 ``` :::leanFirst The dot notation is convenient not just for accessing the projections of a record, but also for applying functions defined in a namespace with the same name. Recall from the {ref "conjunction"}[Conjunction section] if {leanRef}`p` has type {leanRef}`Point`, the expression {lit}`p.foo` is interpreted as {lit}`Point.foo p`, assuming that the first non-implicit argument to {lit}`foo` has type {leanRef}`Point`. The expression {leanRef}`p.add q` is therefore shorthand for {lit}`Point.add p q` in the example below. ```lean structure Point (α : Type u) where x : α y : α deriving Repr def Point.add (p q : Point Nat) := mk (p.x + q.x) (p.y + q.y) def p : Point Nat := Point.mk 1 2 def q : Point Nat := Point.mk 3 4 #eval p.add q -- { x := 4, y := 6 } ``` ::: :::setup ``` structure Point (α : Type u) where x : α y : α deriving Repr variable {α : Type u} ``` In the next chapter, you will learn how to define a function like {leanRef}`add` so that it works generically for elements of {lean}`Point α` rather than just {lean}`Point Nat`, assuming {lean}`α` has an associated addition operation. ::: :::leanFirst More generally, given an expression {lit}`p.foo x y z` where {lit}`p : Point`, Lean will insert {lit}`p` at the first argument to {lit}`Point.foo` of type {lit}`Point`. For example, with the definition of scalar multiplication below, {leanRef}`p.smul 3` is interpreted as {leanRef}`Point.smul 3 p`. ```lean structure Point (α : Type u) where x : α y : α deriving Repr ------ def Point.smul (n : Nat) (p : Point Nat) := Point.mk (n * p.x) (n * p.y) def p : Point Nat := Point.mk 1 2 #eval p.smul 3 -- { x := 3, y := 6 } example {p : Point Nat} : p.smul 3 = Point.smul 3 p := rfl ``` ::: It is common to use a similar trick with the {name}`List.map` function, which takes a list as its second non-implicit argument: ```lean #check @List.map def xs : List Nat := [1, 2, 3] def f : Nat → Nat := fun x => x * x #eval xs.map f -- [1, 4, 9] example {xs : List α} {f : α → β} : xs.map f = List.map f xs := rfl ``` Here {leanRef}`xs.map f` is interpreted as {leanRef}`List.map f xs`. # Objects %%% tag := "objects" %%% We have been using constructors to create elements of a structure type. For structures containing many fields, this is often inconvenient, because we have to remember the order in which the fields were defined. Lean therefore provides the following alternative notations for defining elements of a structure type. ``` { (<field-name> := <expr>)* : structure-type } or { (<field-name> := <expr>)* } ``` The suffix {lit}`: structure-type` can be omitted whenever the name of the structure can be inferred from the expected type. For example, we use this notation to define “points.” The order that the fields are specified does not matter, so all the expressions below define the same point. ```lean structure Point (α : Type u) where x : α y : α #check { x := 10, y := 20 : Point Nat } -- { x := 10, y := 20 } : Point Nat #check { y := 20, x := 10 : Point _ } -- { x := 10, y := 20 } : Point Nat #check ({ x := 10, y := 20 } : Point Nat) -- { x := 10, y := 20 } : Point Nat example : Point Nat := { y := 20, x := 10 } ``` Fields can be marked as implicit using curly braces. Implicit fields become implicit parameters to the constructor. If the value of a field is not specified, Lean tries to infer it. If the unspecified fields cannot be inferred, Lean flags an error indicating the corresponding placeholder could not be synthesized. ```lean structure MyStruct where {α : Type u} {β : Type v} a : α b : β #check { a := 10, b := true : MyStruct } ``` _Record update_ is another common operation which amounts to creating a new record object by modifying the value of one or more fields in an old one. Lean allows you to specify that unassigned fields in the specification of a record should be taken from a previously defined structure object {lit}`s` by adding the annotation {lit}`s `{kw}`with` before the field assignments. If more than one record object is provided, then they are visited in order until Lean finds one that contains the unspecified field. Lean raises an error if any of the field names remain unspecified after all the objects are visited. ```lean structure Point (α : Type u) where x : α y : α deriving Repr def p : Point Nat := { x := 1, y := 2 } #eval { p with y := 3 } -- { x := 1, y := 3 } #eval { p with x := 4 } -- { x := 4, y := 2 } structure Point3 (α : Type u) where x : α y : α z : α def q : Point3 Nat := { x := 5, y := 5, z := 5 } def r : Point3 Nat := { p, q with x := 6 } example : r.x = 6 := rfl example : r.y = 2 := rfl example : r.z = 5 := rfl ``` # Inheritance %%% tag := "inheritance" %%% We can _extend_ existing structures by adding new fields. This feature allows us to simulate a form of _inheritance_. ```lean structure Point (α : Type u) where x : α y : α inductive Color where | red | green | blue structure ColorPoint (α : Type u) extends Point α where c : Color ``` In the next example, we define a structure using multiple inheritance, and then define an object using objects of the parent structures. ```lean structure Point (α : Type u) where x : α y : α z : α structure RGBValue where red : Nat green : Nat blue : Nat structure RedGreenPoint (α : Type u) extends Point α, RGBValue where no_blue : blue = 0 def p : Point Nat := { x := 10, y := 10, z := 20 } def rgp : RedGreenPoint Nat := { p with red := 200, green := 40, blue := 0, no_blue := rfl } example : rgp.x = 10 := rfl example : rgp.red = 200 := rfl ```
theorem_proving_in_lean4/book/TPiL/TypeClasses.lean
import VersoManual import TPiL.Examples open Verso.Genre open Verso.Genre.Manual hiding tactic open TPiL #doc (Manual) "Type Classes" => %%% tag := "type-classes" %%% Type classes were introduced as a principled way of enabling ad-hoc polymorphism in functional programming languages. We first observe that it would be easy to implement an ad-hoc polymorphic function (such as addition) if the function simply took the type-specific implementation of addition as an argument and then called that implementation on the remaining arguments. For example, suppose we declare a structure in Lean to hold implementations of addition. ```lean namespace Ex ------ structure Add (α : Type) where add : α → α → α #check @Add.add -- @Add.add : {α : Type} → Add α → α → α → α ------ end Ex ``` ::::setup ``` namespace Ex structure Add (α : Type) where add : α → α → α def double (s : Add α) (x : α) : α := s.add x x variable {n : Nat} ``` :::leanFirst In the above Lean code, the field {leanRef}`add` has type {lean}`Add.add : {α : Type} → Add α → α → α → α` where the curly braces around the type {leanRef}`α` mean that it is an implicit argument. We could implement {leanRef}`double` by: ```lean namespace Ex structure Add (α : Type) where add : α → α → α ------ def double (s : Add α) (x : α) : α := s.add x x #eval double { add := Nat.add } 10 -- 20 #eval double { add := Nat.mul } 10 -- 100 #eval double { add := Int.add } 10 -- 20 ------ end Ex ``` ::: Note that you can double a natural number {lean}`n` by {lean}`double { add := Nat.add } n`. Of course, it would be highly cumbersome for users to manually pass the implementations around in this way. Indeed, it would defeat most of the potential benefits of ad-hoc polymorphism. :::: :::leanFirst The main idea behind type classes is to make arguments such as {leanRef}`Add α` implicit, and to use a database of user-defined instances to synthesize the desired instances automatically through a process known as typeclass resolution. In Lean, by changing {kw}`structure` to {kw}`class` in the example above, the type of {leanRef}`Add.add` becomes: ```lean namespace Ex ------ class Add (α : Type) where add : α → α → α #check @Add.add -- @Add.add : {α : Type} → [self : Add α] → α → α → α ------ end Ex ``` ::: where the square brackets indicate that the argument of type {leanRef}`Add α` is _instance implicit_, i.e. that it should be synthesized using typeclass resolution. This version of {leanRef}`add` is the Lean analogue of the Haskell term {lit}`add :: Add a => a -> a -> a`. Similarly, we can register instances by: ```lean namespace Ex class Add (α : Type) where add : α → α → α ------ instance : Add Nat where add := Nat.add instance : Add Int where add := Int.add instance : Add Float where add := Float.add ------ end Ex ``` ::::leanFirst :::setup ``` namespace Ex class Add (α : Type) where add : α → α → α ------ instance : Add Nat where add := Nat.add instance : Add Int where add := Int.add instance : Add Float where add := Float.add variable (n m : Nat) ``` Then for {lean}`n : Nat` and {lean}`m : Nat`, the term {lean}`Add.add n m` triggers typeclass resolution with the goal of {lean}`Add Nat`, and typeclass resolution will synthesize the instance for {lean}`Nat` above. We can now reimplement {leanRef}`double` using an instance implicit by: ::: ```lean namespace Ex class Add (α : Type) where add : α → α → α instance : Add Nat where add := Nat.add instance : Add Int where add := Int.add instance : Add Float where add := Float.add ------ def double [Add α] (x : α) : α := Add.add x x #check @double -- @double : {α : Type} → [Add α] → α → α #eval double 10 -- 20 #eval double (10 : Int) -- 20 #eval double (7 : Float) -- 14.000000 #eval double (239.0 + 2) -- 482.000000 ------ end Ex ``` :::: :::leanFirst In general, instances may depend on other instances in complicated ways. For example, you can declare an instance stating that if {leanRef}`α` has addition, then {leanRef}`Array α` has addition: ```lean instance [Add α] : Add (Array α) where add x y := Array.zipWith (· + ·) x y #eval Add.add #[1, 2] #[3, 4] -- #[4, 6] #eval #[1, 2] + #[3, 4] -- #[4, 6] ``` ::: Note that {leanRef}`(· + ·)` is notation for {lean}`fun x y => x + y` in Lean. :::setup ``` def head [Inhabited α] (xs : List α) : α := default variable {α : Type u} {x : α} {xs : List α} [Inhabited α] ``` The example above demonstrates how type classes are used to overload notation. Now, we explore another application. We often need an arbitrary element of a given type. Recall that types may not have any elements in Lean. It often happens that we would like a definition to return an arbitrary element in a “corner case.” For example, we may like the expression {lean}`head xs` to be of type {lean}`α` when {lean}`xs` is of type {lean}`List α`. Similarly, many theorems hold under the additional assumption that a type is not empty. For example, if {lean}`α` is a type, {lean}`∃ x : α, x = x` is true only if {lean}`α` is not empty. The standard library defines a type class {lean}`Inhabited` to enable type class inference to infer a “default” element of an inhabited type. Let us start with the first step of the program above, declaring an appropriate class: ```lean namespace Ex ------ class Inhabited (α : Type u) where default : α #check @Inhabited.default -- @Inhabited.default : {α : Type u_1} → [self : Inhabited α] → α ------ end Ex ``` Note {leanRef}`Inhabited.default` doesn't have any explicit arguments. An element of the class {lean}`Inhabited α` is simply an expression of the form {lean}`Inhabited.mk x`, for some element {lean}`x : α`. The projection {lean}`Inhabited.default` will allow us to “extract” such an element of {lean}`α` from an element of {lean}`Inhabited α`. Now we populate the class with some instances: ::: ```lean namespace Ex class Inhabited (a : Type _) where default : a ------ instance : Inhabited Bool where default := true instance : Inhabited Nat where default := 0 instance : Inhabited Unit where default := () instance : Inhabited Prop where default := True #eval (Inhabited.default : Nat) -- 0 #eval (Inhabited.default : Bool) -- true -------- end Ex ``` You can use the command {kw}`export` to create the alias {lean}`default` for {lean}`Inhabited.default`. ```lean namespace Ex class Inhabited (a : Type _) where default : a instance : Inhabited Bool where default := true instance : Inhabited Nat where default := 0 instance : Inhabited Unit where default := () instance : Inhabited Prop where default := True ------ export Inhabited (default) #eval (default : Nat) -- 0 #eval (default : Bool) -- true ------ end Ex ``` # Chaining Instances %%% tag := "chaining-instances" %%% If that were the extent of type class inference, it would not be all that impressive; it would be simply a mechanism of storing a list of instances for the elaborator to find in a lookup table. What makes type class inference powerful is that one can _chain_ instances. That is, an instance declaration can in turn depend on an implicit instance of a type class. This causes class inference to chain through instances recursively, backtracking when necessary, in a Prolog-like search. :::leanFirst For example, the following definition shows that if two types {leanRef}`α` and {leanRef}`β` are inhabited, then so is their product: ```lean instance [Inhabited α] [Inhabited β] : Inhabited (α × β) where default := (default, default) ``` ::: With this added to the earlier instance declarations, type class instance can infer, for example, a default element of {lean}`Nat × Bool`: ```lean namespace Ex class Inhabited (α : Type u) where default : α instance : Inhabited Bool where default := true instance : Inhabited Nat where default := 0 opaque default [Inhabited α] : α := Inhabited.default ------ instance [Inhabited α] [Inhabited β] : Inhabited (α × β) where default := (default, default) #eval (default : Nat × Bool) -- (0, true) ------ end Ex ``` Similarly, we can inhabit type function with suitable constant functions: ```lean instance [Inhabited β] : Inhabited (α → β) where default := fun _ => default ``` As an exercise, try defining default instances for other types, such as {lean}`List` and {lean}`Sum` types. :::setup ``` universe u set_option checkBinderAnnotations false ``` The Lean standard library contains the definition {name}`inferInstance`. It has type {lean}`{α : Sort u} → [i : α] → α`, and is useful for triggering the type class resolution procedure when the expected type is an instance. ::: ```lean #check (inferInstance : Inhabited Nat) -- inferInstance : Inhabited Nat def foo : Inhabited (Nat × Nat) := inferInstance theorem ex : foo.default = (default, default) := rfl ``` :::leanFirst You can use the command {leanRef}`#print` to inspect how simple {leanRef}`inferInstance` is. ```lean #print inferInstance ``` ::: # ToString %%% tag := "ToString" %%% ```setup universe u ``` :::leanFirst The polymorphic method {leanRef}`toString` has type {lean}`{α : Type u} → [ToString α] → α → String`. You implement the instance for your own types and use chaining to convert complex values into strings. Lean comes with {lean}`ToString` instances for most builtin types. ```lean structure Person where name : String age : Nat instance : ToString Person where toString p := p.name ++ "@" ++ toString p.age #eval toString { name := "Leo", age := 542 : Person } -- "Leo@542" #eval toString ({ name := "Daniel", age := 18 : Person }, "hello") -- "(Daniel@18, hello)" ``` ::: # Numerals %%% tag := "numerals" %%% Numerals are polymorphic in Lean. You can use a numeral (e.g., {lit}`2`) to denote an element of any type that implements the type class {name}`OfNat`. ```lean structure Rational where num : Int den : Nat inv : den ≠ 0 instance : OfNat Rational n where ofNat := { num := n, den := 1, inv := by decide } instance : ToString Rational where toString r := s!"{r.num}/{r.den}" #eval (2 : Rational) -- 2/1 #check (2 : Rational) -- 2 : Rational #check (2 : Nat) -- 2 : Nat ``` :::setup ``` structure Rational where num : Int den : Nat inv : den ≠ 0 instance : OfNat Rational n where ofNat := { num := n, den := 1, inv := by decide } instance : ToString Rational where toString r := s!"{r.num}/{r.den}" ``` Lean elaborates the terms {lean}`(2 : Nat)` and {lean}`(2 : Rational)` as {lean (type := "Nat")}`@OfNat.ofNat Nat 2 (@instOfNatNat 2)` and {lean}`@OfNat.ofNat Rational 2 (@instOfNatRational 2)` respectively. We say the numerals {lit}`2` occurring in the elaborated terms are _raw_ natural numbers. You can input the raw natural number {lit}`2` using the macro {lean}`nat_lit 2`. ::: ```lean #check nat_lit 2 -- 2 : Nat ``` Raw natural numbers are _not_ polymorphic. The {lean}`OfNat` instance is parametric on the numeral. So, you can define instances for particular numerals. The second argument is often a variable as in the example above, or a _raw_ natural number. ```lean class Monoid (α : Type u) where unit : α op : α → α → α instance [s : Monoid α] : OfNat α (nat_lit 1) where ofNat := s.unit def getUnit [Monoid α] : α := 1 ``` # Output Parameters %%% tag := "output-parameters" %%% :::setup ``` universe u variable (T : Type u) ``` By default, Lean only tries to synthesize an instance {lean}`Inhabited T` when the term {lean}`T` is known and does not contain missing parts. The following command produces the error {lit}`typeclass instance problem is stuck, it is often due to metavariables` because the type has a missing part (i.e., the {lit}`_`). ::: ```lean /-- error: typeclass instance problem is stuck, it is often due to metavariables Inhabited (Nat × ?m.2) -/ #guard_msgs (error) in #eval (inferInstance : Inhabited (Nat × _)) ``` You can view the parameter of the type class {lean}`Inhabited` as an _input_ value for the type class synthesizer. When a type class has multiple parameters, you can mark some of them as {deftech}_output parameters_. Lean will start type class synthesizer even when these parameters have missing parts. In the following example, we use output parameters to define a _heterogeneous_ polymorphic multiplication. ```lean namespace Ex ------ class HMul (α : Type u) (β : Type v) (γ : outParam (Type w)) where hMul : α → β → γ export HMul (hMul) instance : HMul Nat Nat Nat where hMul := Nat.mul instance : HMul Nat (Array Nat) (Array Nat) where hMul a bs := bs.map (fun b => hMul a b) #eval hMul 4 3 -- 12 #eval hMul 4 #[2, 3, 4] -- #[8, 12, 16] ------ end Ex ``` The parameters {leanRef}`α` and {leanRef}`β` are considered input parameters and {leanRef}`γ` an output one. Given an application {leanRef}`hMul a b`, after the types of {leanRef}`a` and {leanRef}`b` are known, the type class synthesizer is invoked, and the resulting type is obtained from the output parameter {leanRef}`γ`. In the example above, we defined two instances. The first one is the homogeneous multiplication for natural numbers. The second is the scalar multiplication for arrays. Note that you chain instances and generalize the second instance. ```lean namespace Ex ------ class HMul (α : Type u) (β : Type v) (γ : outParam (Type w)) where hMul : α → β → γ export HMul (hMul) instance : HMul Nat Nat Nat where hMul := Nat.mul instance : HMul Int Int Int where hMul := Int.mul instance [HMul α β γ] : HMul α (Array β) (Array γ) where hMul a bs := bs.map (fun b => hMul a b) #eval hMul 4 3 -- 12 #eval hMul 4 #[2, 3, 4] -- #[8, 12, 16] #eval hMul (-2) #[3, -1, 4] -- #[-6, 2, -8] #eval hMul 2 #[#[2, 3], #[0, 4]] -- #[#[4, 6], #[0, 8]] ------ end Ex ``` You can use our new scalar array multiplication instance on arrays of type {leanRef}`Array β` with a scalar of type {leanRef}`α` whenever you have an instance {leanRef}`HMul α β γ`. In the last {kw}`#eval`, note that the instance was used twice on an array of arrays. Output parameters are ignored during instance synthesis. Even when instance synthesis occurs in a context in which the values of output parameters are already determined, their values are ignored. Once an instance is found using its input parameters, Lean ensures that the already-known values of the output parameters match those which were found. Lean also features {deftech}_semi-output parameters_, which have some features of input parameters and some features of output parameters. Like input parameters, semi-output parameters are considered when selecting instances. Like output parameters, they can be used to instantiate unknown values. However, they do not do so uniquely. Instance synthesis with semi-output parameters can be more difficult to predict, because the order in which instances are considered can determine which is selected, but it is also more flexible. # Default Instances %%% tag := "default-instances" %%% In the class {leanRef}`HMul`, the parameters {leanRef}`α` and {leanRef}`β` are treated as input values. Thus, type class synthesis only starts after these two types are known. This may often be too restrictive. ```lean namespace Ex ------ class HMul (α : Type u) (β : Type v) (γ : outParam (Type w)) where hMul : α → β → γ export HMul (hMul) instance : HMul Int Int Int where hMul := Int.mul def xs : List Int := [1, 2, 3] /-- error: typeclass instance problem is stuck HMul Int ?m.2 (?m.11 y) Note: Lean will not try to resolve this typeclass instance problem because the second type argument to `HMul` is a metavariable. This argument must be fully determined before Lean will try to resolve the typeclass. Hint: Adding type annotations and supplying implicit arguments to functions can give Lean more information for typeclass resolution. For example, if you have a variable `x` that you intend to be a `Nat`, but Lean reports it as having an unresolved type like `?m`, replacing `x` with `(x : Nat)` can get typeclass resolution un-stuck. -/ #guard_msgs (error) in #eval fun y => xs.map (fun x => hMul x y) ------ end Ex ``` The instance {leanRef}`HMul` is not synthesized by Lean because the type of {leanRef}`y` has not been provided. However, it is natural to assume that the type of {leanRef}`y` and {leanRef}`x` should be the same in this kind of situation. We can achieve exactly that using _default instances_. ```lean namespace Ex ------ class HMul (α : Type u) (β : Type v) (γ : outParam (Type w)) where hMul : α → β → γ export HMul (hMul) @[default_instance] instance : HMul Int Int Int where hMul := Int.mul def xs : List Int := [1, 2, 3] #check fun y => xs.map (fun x => hMul x y) -- fun y => List.map (fun x => hMul x y) xs : Int → List Int ------ end Ex ``` :::setup ``` variable {α : Type u} {β : Type v} {γ : Type w} {a : α} {b : β} {n : Nat} variable [HAdd α β γ] [HSub α β γ] [HMul α β γ] [HDiv α β γ] [HMod α β γ] ``` By tagging the instance above with the attribute {attr}`[default_instance]`, we are instructing Lean to use this instance on pending type class synthesis problems. The actual Lean implementation defines homogeneous and heterogeneous classes for arithmetical operators. Moreover, {lean}`a + b`, {lean}`a * b`, {lean}`a - b`, {lean}`a / b`, and {lean}`a % b` are notations for the heterogeneous versions. The instance {lean}`OfNat Nat n` is the default instance (with priority 100) for the {lean}`OfNat` class. This is why the numeral {lean}`2` has type {lean}`Nat` when the expected type is not known. You can define default instances with higher priority to override the builtin ones. ::: ```lean structure Rational where num : Int den : Nat inv : den ≠ 0 @[default_instance 200] instance : OfNat Rational n where ofNat := { num := n, den := 1, inv := by decide } instance : ToString Rational where toString r := s!"{r.num}/{r.den}" #check 2 -- 2 : Rational ``` :::setup ``` variable {α : Type u} {xs : List α} [Mul α] [OfNat α 2] ``` Priorities are also useful to control the interaction between different default instances. For example, suppose {lean}`xs` has type {lean}`List α`. When elaborating {lean}`xs.map (fun x => 2 * x)`, we want the homogeneous instance for multiplication to have higher priority than the default instance for {lean}`OfNat α 2`. This is particularly important when we have implemented only the instance {lean}`HMul α α α`, and did not implement {lean}`HMul Nat α α`. Now, we reveal how the notation {lit}`a * b` is defined in Lean. ::: ```lean namespace Ex ------ class OfNat (α : Type u) (n : Nat) where ofNat : α @[default_instance] instance (n : Nat) : OfNat Nat n where ofNat := n class HMul (α : Type u) (β : Type v) (γ : outParam (Type w)) where hMul : α → β → γ class Mul (α : Type u) where mul : α → α → α @[default_instance 10] instance [Mul α] : HMul α α α where hMul a b := Mul.mul a b infixl:70 " * " => HMul.hMul ------ end Ex ``` The {leanRef}`Mul` class is convenient for types that only implement the homogeneous multiplication. # Local Instances %%% tag := "local-instances" %%% Type classes are implemented using attributes in Lean. Thus, you can use the {kw}`local` modifier to indicate that they only have effect until the current {kw}`section` or {kw}`namespace` is closed, or until the end of the current file. ```lean structure Point where x : Nat y : Nat section local instance : Add Point where add a b := { x := a.x + b.x, y := a.y + b.y } def double (p : Point) := p + p end -- instance `Add Point` is not active anymore /-- error: failed to synthesize HAdd Point Point ?m.5 Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. -/ #guard_msgs in def triple (p : Point) := p + p + p ``` You can also temporarily disable an instance using the {kw}`attribute` command until the current {kw}`section` or {kw}`namespace` is closed, or until the end of the current file. ```lean structure Point where x : Nat y : Nat instance addPoint : Add Point where add a b := { x := a.x + b.x, y := a.y + b.y } def double (p : Point) := p + p attribute [-instance] addPoint /-- error: failed to synthesize HAdd Point Point ?m.5 Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. -/ #guard_msgs in def triple (p : Point) := p + p + p -- Error: failed to synthesize instance ``` We recommend you only use this command to diagnose problems. # Scoped Instances %%% tag := "scoped-instances" %%% You can also declare scoped instances in namespaces. This kind of instance is only active when you are inside of the namespace or open the namespace. ```lean structure Point where x : Nat y : Nat namespace Point scoped instance : Add Point where add a b := { x := a.x + b.x, y := a.y + b.y } def double (p : Point) := p + p end Point -- instance `Add Point` is not active anymore /-- error: failed to synthesize HAdd Point Point ?m.3 Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. -/ #guard_msgs (error) in #check fun (p : Point) => p + p + p namespace Point -- instance `Add Point` is active again #check fun (p : Point) => p + p + p end Point open Point -- activates instance `Add Point` #check fun (p : Point) => p + p + p ``` You can use the command {kw}`open scoped`{lit}` <namespace>` to activate scoped attributes but will not “open” the names from the namespace. ```lean structure Point where x : Nat y : Nat namespace Point scoped instance : Add Point where add a b := { x := a.x + b.x, y := a.y + b.y } def double (p : Point) := p + p end Point open scoped Point -- activates instance `Add Point` #check fun (p : Point) => p + p + p /-- error: Unknown identifier `double` -/ #guard_msgs (error) in #check fun (p : Point) => double p ``` # Decidable Propositions %%% tag := "decidable-propositions" %%% Let us consider another example of a type class defined in the standard library, namely the type class of {lean}`Decidable` propositions. Roughly speaking, an element of {lean}`Prop` is said to be decidable if we can decide whether it is true or false. The distinction is only useful in constructive mathematics; classically, every proposition is decidable. But if we use the classical principle, say, to define a function by cases, that function will not be computable. Algorithmically speaking, the {lean}`Decidable` type class can be used to infer a procedure that effectively determines whether or not the proposition is true. As a result, the type class supports such computational definitions when they are possible while at the same time allowing a smooth transition to the use of classical definitions and classical reasoning. In the standard library, {lean}`Decidable` is defined formally as follows: ```lean namespace Hidden ------ class inductive Decidable (p : Prop) where | isFalse (h : ¬p) : Decidable p | isTrue (h : p) : Decidable p ------ end Hidden ``` :::setup ``` variable {p : Prop} (t : Decidable p) (t' : p ∨ ¬p) (a b : α) ``` Logically speaking, having an element {lean}`t : Decidable p` is stronger than having an element {lean}`t' : p ∨ ¬p`; it enables us to define values of an arbitrary type depending on the truth value of {lean}`p`. For example, for the expression {lean}`if p then a else b` to make sense, we need to know that {lean}`p` is decidable. That expression is syntactic sugar for {lean}`ite p a b`, where {lean}`ite` is defined as follows: ::: ```lean namespace Hidden ------ def ite {α : Sort u} (c : Prop) [h : Decidable c] (t e : α) : α := h.casesOn (motive := fun _ => α) (fun _ => e) (fun _ => t) ------ end Hidden ``` :::leanFirst The standard library also contains a variant of {leanRef}`ite` called {leanRef}`dite`, the dependent if-then-else expression. It is defined as follows: ```lean namespace Hidden ------ def dite {α : Sort u} (c : Prop) [h : Decidable c] (t : c → α) (e : Not c → α) : α := Decidable.casesOn (motive := fun _ => α) h e t ------ end Hidden ``` ::: :::setup ``` variable {c : Prop} [Decidable c] (t : c → α) (e : ¬c → α) (hc : c) (hnc : ¬c) ``` ```lean (show := false) example [Decidable c] (t e : α) : α := if h : c then t else e ``` That is, in {lean}`dite c t e`, we can assume {lean}`hc : c` in the “then” branch, and {lean}`hnc : ¬c` in the “else” branch. To make {lean}`dite` more convenient to use, Lean allows us to write {leanRef}`if h : c then t else e` instead of {lean}`dite c (fun h : c => t h) (fun h : ¬c => e h)`. ::: Without classical logic, we cannot prove that every proposition is decidable. But we can prove that _certain_ propositions are decidable. For example, we can prove the decidability of basic operations like equality and comparisons on the natural numbers and the integers. Moreover, decidability is preserved under propositional connectives: ```lean #check @instDecidableAnd -- @instDecidableAnd : {p q : Prop} → [dp : Decidable p] → [dq : Decidable q] → Decidable (p ∧ q) #check @instDecidableOr #check @instDecidableNot ``` Thus we can carry out definitions by cases on decidable predicates on the natural numbers: ```lean def step (a b x : Nat) : Nat := if x < a ∨ x > b then 0 else 1 set_option pp.explicit true #print step ``` Turning on implicit arguments shows that the elaborator has inferred the decidability of the proposition {leanRef}`x < a ∨ x > b`, simply by applying appropriate instances. With the classical axioms, we can prove that every proposition is decidable. You can import the classical axioms and make the generic instance of decidability available by opening the {lit}`Classical` namespace. ```lean open Classical ``` :::setup ``` open Classical variable {p : Prop} ``` Thereafter {lean}`Decidable p` has an instance for every {leanRef}`p`. Thus all theorems in the library that rely on decidability assumptions are freely available when you want to reason classically. In {ref "axioms-and-computation"}[Axioms and Computation], we will see that using the law of the excluded middle to define functions can prevent them from being used computationally. Thus, the standard library assigns a low priority to the {lean}`propDecidable` instance. ::: ```lean namespace Hidden ------ open Classical noncomputable scoped instance (priority := low) propDecidable (a : Prop) : Decidable a := choice <| match em a with | Or.inl h => ⟨isTrue h⟩ | Or.inr h => ⟨isFalse h⟩ ------ end Hidden ``` This guarantees that Lean will favor other instances and fall back on {leanRef}`propDecidable` only after other attempts to infer decidability have failed. The {lean}`Decidable` type class also provides a bit of small-scale automation for proving theorems. The standard library introduces the tactic {tactic}`decide` that uses the {lean}`Decidable` instance to solve simple goals, as well as a function {name}`decide` that uses a {lean}`Decidable` instance to compute the corresponding {lean}`Bool`. ```lean example : 10 < 5 ∨ 1 > 0 := by decide example : ¬(True ∧ False) := by decide example : 10 * 20 = 200 := by decide theorem ex : True ∧ 2 = 1 + 1 := by decide #print ex #check @of_decide_eq_true -- @of_decide_eq_true : ∀ {p : Prop} [inst : Decidable p], decide p = true → p #check @decide -- decide : (p : Prop) → [h : Decidable p] → Bool ``` :::setup ``` variable {p : Prop} [Decidable p] ``` They work as follows. The expression {lean}`decide p` tries to infer a decision procedure for {leanRef}`p`, and, if it is successful, evaluates to either {lean}`true` or {lean}`false`. In particular, if {leanRef}`p` is a true closed expression, {leanRef}`decide p` will reduce definitionally to the Boolean {lean}`true`. On the assumption that {lean}`decide p = true` holds, {lean}`of_decide_eq_true` produces a proof of {lean}`p`. The tactic {tactic}`decide` puts it all together to prove a target {lean}`p`. By the previous observations, {tactic}`decide` will succeed any time the inferred decision procedure for {lean}`p` has enough information to evaluate, definitionally, to the {lean}`isTrue` case. ::: # Managing Type Class Inference %%% tag := "managing-type-class-inference" %%% If you are ever in a situation where you need to supply an expression that Lean can infer by type class inference, you can ask Lean to carry out the inference using {name}`inferInstance`: ```lean def foo : Add Nat := inferInstance def bar : Inhabited (Nat → Nat) := inferInstance #check @inferInstance -- @inferInstance : {α : Sort u_1} → [i : α] → α ``` :::setup ``` variable (t : T) ``` In fact, you can use Lean's {lean}`(t : T)` notation to specify the class whose instance you are looking for, in a concise manner: ::: ```lean #check (inferInstance : Add Nat) ``` You can also use the auxiliary definition {lean}`inferInstanceAs`: ```lean #check inferInstanceAs (Add Nat) #check @inferInstanceAs -- inferInstanceAs : (α : Sort u_1) → [i : α] → α ``` :::leanFirst Sometimes Lean can't find an instance because the class is buried under a definition. For example, Lean cannot find an instance of {leanRef}`Inhabited (Set α)`. We can declare one explicitly: ```lean def Set (α : Type u) := α → Prop /-- error: failed to synthesize Inhabited (Set α) Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. -/ #guard_msgs in example : Inhabited (Set α) := inferInstance instance : Inhabited (Set α) := inferInstanceAs (Inhabited (α → Prop)) ``` ::: At times, you may find that the type class inference fails to find an expected instance, or, worse, falls into an infinite loop and times out. To help debug in these situations, Lean enables you to request a trace of the search: ```lean set_option trace.Meta.synthInstance true ``` If you are using VS Code, you can read the results by hovering over the relevant theorem or definition, or opening the messages window with {kbd}[`Ctrl` `Shift` `Enter`]. You can also limit the search using the following options: ```lean set_option synthInstance.maxHeartbeats 10000 set_option synthInstance.maxSize 400 ``` Option {option}`synthInstance.maxHeartbeats` specifies the maximum amount of heartbeats per typeclass resolution problem. A heartbeat is the number of (small) memory allocations (in thousands), 0 means there is no limit. Option {option}`synthInstance.maxSize` is the maximum number of instances used to construct a solution in the type class instance synthesis procedure. Remember also that in both the VS Code and Emacs editor modes, tab completion works in {kw}`set_option`, to help you find suitable options. As noted above, the type class instances in a given context represent a Prolog-like program, which gives rise to a backtracking search. Both the efficiency of the program and the solutions that are found can depend on the order in which the system tries the instance. Instances which are declared last are tried first. Moreover, if instances are declared in other modules, the order in which they are tried depends on the order in which namespaces are opened. Instances declared in namespaces which are opened later are tried earlier. You can change the order that type class instances are tried by assigning them a _priority_. When an instance is declared, it is assigned a default priority value. You can assign other priorities when defining an instance. The following example illustrates how this is done: ```lean class Foo where a : Nat b : Nat instance (priority := default + 1) i1 : Foo where a := 1 b := 1 instance i2 : Foo where a := 2 b := 2 example : Foo.a = 1 := rfl instance (priority := default + 2) i3 : Foo where a := 3 b := 3 example : Foo.a = 3 := rfl ``` # Coercions using Type Classes %%% tag := "coercions-using-type-classes" %%% :::setup ``` variable {n : Nat} {α : Type u} {as : List α} def Set (α : Type u) := α → Prop ``` The most basic type of coercion maps elements of one type to another. For example, a coercion from {lean}`Nat` to {lean}`Int` allows us to view any element {lean}`n : Nat` as an element of {lean}`Int`. But some coercions depend on parameters; for example, for any type {lean}`α`, we can view any element {lean}`as : List α` as an element of {lean}`Set α`, namely, the set of elements occurring in the list. The corresponding coercion is defined on the “family” of types {lean}`List α`, parameterized by {lean}`α`. ::: Lean allows us to declare three kinds of coercions: - from a family of types to another family of types - from a family of types to the class of sorts - from a family of types to the class of function types The first kind of coercion allows us to view any element of a member of the source family as an element of a corresponding member of the target family. The second kind of coercion allows us to view any element of a member of the source family as a type. The third kind of coercion allows us to view any element of the source family as a function. Let us consider each of these in turn. :::setup ``` variable {α : Type u} {β : Type v} [Coe α β] ``` In Lean, coercions are implemented on top of the type class resolution framework. We define a coercion from {lean}`α` to {lean}`β` by declaring an instance of {lean}`Coe α β`. For example, we can define a coercion from {lean}`Bool` to {lean}`Prop` as follows: ```lean instance : Coe Bool Prop where coe b := b = true ``` ::: This enables us to use boolean terms in {kw}`if`-{kw}`then`-{kw}`else` expressions: ```lean #eval if true then 5 else 3 #eval if false then 5 else 3 ``` :::leanFirst We can define a coercion from {leanRef}`List α` to {leanRef}`Set α` as follows: ```lean def Set (α : Type u) := α → Prop def Set.empty {α : Type u} : Set α := fun _ => False def Set.mem (a : α) (s : Set α) : Prop := s a def Set.singleton (a : α) : Set α := fun x => x = a def Set.union (a b : Set α) : Set α := fun x => a x ∨ b x notation "{ " a " }" => Set.singleton a infix:55 " ∪ " => Set.union ------ def List.toSet : List α → Set α | [] => Set.empty | a::as => {a} ∪ as.toSet instance : Coe (List α) (Set α) where coe a := a.toSet def s : Set Nat := {1} #check s ∪ [2, 3] -- s ∪ [2, 3].toSet : Set Nat ``` ::: We can use the notation {lit}`↑` to force a coercion to be introduced in a particular place. It is also helpful to make our intent clear, and work around limitations of the coercion resolution system. ```lean def Set (α : Type u) := α → Prop def Set.empty {α : Type u} : Set α := fun _ => False def Set.mem (a : α) (s : Set α) : Prop := s a def Set.singleton (a : α) : Set α := fun x => x = a def Set.union (a b : Set α) : Set α := fun x => a x ∨ b x notation "{ " a " }" => Set.singleton a infix:55 " ∪ " => Set.union def List.toSet : List α → Set α | [] => Set.empty | a::as => {a} ∪ as.toSet instance : Coe (List α) (Set α) where coe a := a.toSet ------ def s : Set Nat := {1} #check let x := ↑[2, 3]; s ∪ x -- let x := [2, 3].toSet; s ∪ x : Set Nat #check let x := [2, 3]; s ∪ x -- let x := [2, 3]; s ∪ x.toSet : Set Nat ``` Lean also supports dependent coercions using the type class {lean}`CoeDep`. For example, we cannot coerce arbitrary propositions to {lean}`Bool`, only the ones that implement the {lean}`Decidable` typeclass. ```lean instance (p : Prop) [Decidable p] : CoeDep Prop p Bool where coe := decide p ``` Lean will also chain (non-dependent) coercions as necessary. Actually, the type class {lean}`CoeT` is the transitive closure of {lean}`Coe`. Let us now consider the second kind of coercion. By the _class of sorts_, we mean the collection of universes {lean}`Type u`. A coercion of the second kind is of the form: ``` c : (x1 : A1) → ... → (xn : An) → F x1 ... xn → Type u ``` where {lit}`F` is a family of types as above. This allows us to write {lit}`s : t` whenever {lit}`t` is of type {lit}`F a₁ ... aₙ`. In other words, the coercion allows us to view the elements of {lit}`F a₁ ... aₙ` as types. This is very useful when defining algebraic structures in which one component, the carrier of the structure, is a {lean}`Type`. For example, we can define a semigroup as follows: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b ``` :::setup ``` structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b variable {S : Semigroup} (a b : S.carrier) instance : CoeSort Semigroup (Type u) where coe s := s.carrier universe u ``` In other words, a semigroup consists of a type, {leanRef}`carrier`, and a multiplication, {leanRef}`mul`, with the property that the multiplication is associative. The {kw}`instance` command allows us to write {lean}`a * b` instead of {lean}`Semigroup.mul S a b` whenever we have {lean}`a b : S.carrier`; notice that Lean can infer the argument {leanRef}`S` from the types of {leanRef}`a` and {leanRef}`b`. The function {lean}`Semigroup.carrier` maps the class {leanRef}`Semigroup` to the sort {leanRef}`Type u`: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b ------ #check Semigroup.carrier -- Semigroup.carrier.{u} (self : Semigroup) : Type u ``` If we declare this function to be a coercion, then whenever we have a semigroup {lean}`S : Semigroup`, we can write {lean}`a : S` instead of {lean}`a : S.carrier`: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b ------ instance : CoeSort Semigroup (Type u) where coe s := s.carrier example (S : Semigroup) (a b c : S) : (a * b) * c = a * (b * c) := Semigroup.mul_assoc _ a b c ``` It is the coercion that makes it possible to write {leanRef}`(a b c : S)`. Note that, we define an instance of {leanRef}`CoeSort Semigroup (Type u)` instead of {lean}`Coe Semigroup (Type u)`. ::: ::::setup ``` variable (B : Type u) (C : Type v) ``` By the _class of function types_, we mean the collection of Pi types {lean}`(z : B) → C`. The third kind of coercion has the form: ``` c : (x₁ : A₁) → ... → (xₙ : Aₙ) → (y : F x₁ ... xₙ) → (z : B) → C ``` :::leanFirst where {lit}`F` is again a family of types and {lit}`B` and {lit}`C` can depend on {lit}`x₁, ..., xₙ, y`. This makes it possible to write {lit}`t s` whenever {lit}`t` is an element of {lit}`F a₁ ... aₙ`. In other words, the coercion enables us to view elements of {lit}`F a₁ ... aₙ` as functions. Continuing the example above, we can define the notion of a morphism between semigroups {leanRef}`S1` and {leanRef}`S2`. That is, a function from the carrier of {leanRef}`S1` to the carrier of {leanRef}`S2` (note the implicit coercion) that respects the multiplication. The projection {leanRef}`Morphism.mor` takes a morphism to the underlying function: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b instance : CoeSort Semigroup (Type u) where coe s := s.carrier ------ structure Morphism (S1 S2 : Semigroup) where mor : S1 → S2 resp_mul : ∀ a b : S1, mor (a * b) = (mor a) * (mor b) #check @Morphism.mor ``` ::: As a result, it is a prime candidate for the third type of coercion. :::: ```lean structure Semigroup where carrier : Type u mul : carrier → carrier → carrier mul_assoc (a b c : carrier) : mul (mul a b) c = mul a (mul b c) instance (S : Semigroup) : Mul S.carrier where mul a b := S.mul a b instance : CoeSort Semigroup (Type u) where coe s := s.carrier structure Morphism (S1 S2 : Semigroup) where mor : S1 → S2 resp_mul : ∀ a b : S1, mor (a * b) = (mor a) * (mor b) ------ instance (S1 S2 : Semigroup) : CoeFun (Morphism S1 S2) (fun _ => S1 → S2) where coe m := m.mor theorem resp_mul {S1 S2 : Semigroup} (f : Morphism S1 S2) (a b : S1) : f (a * b) = f a * f b := f.resp_mul a b example (S1 S2 : Semigroup) (f : Morphism S1 S2) (a : S1) : f (a * a * a) = f a * f a * f a := calc f (a * a * a) _ = f (a * a) * f a := by rw [resp_mul f] _ = f a * f a * f a := by rw [resp_mul f] ``` With the coercion in place, we can write {leanRef}`f (a * a * a)` instead of {leanRef}`f.mor (a * a * a)`. When the {leanRef}`Morphism`, {leanRef}`f`, is used where a function is expected, Lean inserts the coercion. Similar to {lean}`CoeSort`, we have yet another class {lean}`CoeFun` for this class of coercions. The parameter {lit}`γ` is used to specify the function type we are coercing to. This type may depend on the type we are coercing from.
theorem_proving_in_lean4/book/TPiL/Examples.lean
import SubVerso.Examples import Lean.Data.NameMap import Lean.DocString.Syntax import VersoManual import Verso.Code.HighlightedToTex open Lean (NameMap MessageSeverity) open Lean.Doc.Syntax namespace TPiL open Verso Doc Elab Genre.Manual ArgParse Code Highlighted WebAssets Output Html Log Code External open SubVerso.Highlighting open SubVerso.Examples.Messages open Lean open Std export Verso.Code.External (lit) private def projectDir : System.FilePath := "../examples/" def alphabet := "abcdefghijklmnopqrstuvwxyz0123456789" def hashString (n : UInt64) : String := Id.run do let mut n : Nat := n.toNat let mut out : String := "Example" -- always start with a letter while n > 0 do out := out.push ({ byteIdx := n % 36 : String.Pos.Raw} |>.get! alphabet ) n := n / 36 return out section open System open SubVerso.Module variable [Monad m] [MonadError m] [MonadLiftT BaseIO m] [MonadLiftT IO m] [MonadFinally m] variable [MonadTrace m] [AddMessageContext m] [MonadOptions m] [MonadAlwaysExcept ε m] def extractFile (contents : String) (suppressNamespaces : Option String) : m (Array ModuleItem) := do let codeHash := hash contents let modBase := hashString codeHash let filename := modBase ++ ".lean" let mod := "Examples." ++ modBase let jsonFile := s!"{modBase}.json" let jsonPath := (projectDir / "Examples" / jsonFile) let jsonExists : Bool ← if (← jsonPath.pathExists) then (IO.FS.readFile jsonPath) <&> (!·.isEmpty) else pure false unless jsonExists do IO.FS.writeFile (projectDir / "Examples" / filename) contents -- Validate that the path is really a Lean project let lakefile := projectDir / "lakefile.lean" let lakefile' := projectDir / "lakefile.toml" if !(← lakefile.pathExists) && !(← lakefile'.pathExists) then throwError m!"Neither {lakefile} nor {lakefile'} exist, couldn't load project" let toolchainfile := projectDir / "lean-toolchain" let toolchain ← do if !(← toolchainfile.pathExists) then throwError m!"File {toolchainfile} doesn't exist, couldn't load project" pure (← IO.FS.readFile toolchainfile).trimAscii.copy -- Kludge: remove variables introduced by Lake. Clearing out DYLD_LIBRARY_PATH and -- LD_LIBRARY_PATH is useful so the version selected by Elan doesn't get the wrong shared -- libraries. let lakeVars := #["LAKE", "LAKE_HOME", "LAKE_PKG_URL_MAP", "LEAN_SYSROOT", "LEAN_AR", "LEAN_PATH", "LEAN_SRC_PATH", "LEAN", "ELAN", "ELAN_HOME", "LEAN_GITHASH", "ELAN_TOOLCHAIN", "DYLD_LIBRARY_PATH", "LD_LIBRARY_PATH"] let toolchainFile ← IO.FS.Handle.mk toolchainfile .read toolchainFile.lock (exclusive := true) try let cmd := "elan" let runCmd' (args : Array String) : m Unit := do let res ← IO.Process.output { cmd, args, cwd := projectDir -- Unset Lake's environment variables env := lakeVars.map (·, none) } if res.exitCode != 0 then reportFail projectDir cmd args res let runCmd (trace : MessageData) (args : Array String) : m Unit := withTraceNode `Elab.Verso.Code.External.loadModule (fun _ => pure trace) (runCmd' args) runCmd m!"loadModuleContent': building subverso" #["run", "--install", toolchain, "lake", "build", "subverso-extract-mod"] runCmd m!"loadModuleContent': building example project's module" #["run", "--install", toolchain, "lake", "build", "+" ++ mod] let suppressArgs := if let some nss := suppressNamespaces then nss |>.splitToList (· == ' ') |>.filter (!String.isEmpty ·) |>.map (#["--suppress-namespace", ·]) |>.toArray |>.flatten else #[] withTraceNode `Elab.Verso.Code.External.loadModule (fun _ => pure m!"loadModuleContent': extracting '{mod}'") do let args := #["run", "--install", toolchain, "lake", "exe", "subverso-extract-mod"] ++ suppressArgs ++ #[mod, "Examples/" ++ jsonFile] runCmd' args finally toolchainFile.unlock let jsonString ← IO.FS.readFile (projectDir / "Examples" / jsonFile) let .ok json := Json.parse jsonString | if jsonString.isEmpty then throwError s!"Expected JSON in {projectDir / "Examples" / jsonFile}, got empty output" else throwError s!"Expected JSON in {projectDir / "Examples" / jsonFile}, got {jsonString}" match Module.fromJson? json with | .error err => throwError s!"Couldn't parse JSON from output file: {err}\nIn:\n{json}" | .ok m => pure m.items where decorateOut (name : String) (out : String) : String := if out.isEmpty then "" else s!"\n{name}:\n{out}\n" reportFail {α} (projectDir : FilePath) (cmd : String) (args : Array String) (res : IO.Process.Output) : m α := do throwError ("Build process failed." ++ "\nCWD: " ++ projectDir.toString ++ "\nCommand: " ++ cmd ++ "\nArgs: " ++ repr args ++ "\nExit code: " ++ toString res.exitCode ++ "\nstdout: " ++ res.stdout ++ "\nstderr: " ++ res.stderr) end open Std.Iterators in private def hasAtLeastM [Monad m] [Iterator α m β] [Productive α m] (it : IterM (α := α) m β) : Nat → m Bool | 0 => pure true | n + 1 => do match (← it.step).inflate with | .done .. => pure false | .skip it' .. => hasAtLeastM it' (n + 1) | .yield it' .. => hasAtLeastM it' n termination_by n => (n, it.finitelyManySkips) open Std.Iterators in private def hasAtLeast [Iterator α Id β] [Productive α Id] (it : Iter (α := α) β) (n : Nat) : Bool := hasAtLeastM it.toIterM n def splitExample (code : Highlighted) : Option Highlighted × Highlighted := Id.run do let lines := code.lines let mut out := .empty for h : i in [0:lines.size] do let line := lines[i] if isSplit line then return (some out, lines.extract (i+1) lines.size |>.foldl (init := .empty) (· ++ ·)) out := out ++ line return (none, out) where isSplit (line : Highlighted) : Bool := let trimmed := line.toString.trimAscii hasAtLeast trimmed.positions 4 && trimmed.all (· == '-') partial def hlIsWs (hl : Highlighted) : Bool := match hl with | .text s | .unparsed s => s.all Char.isWhitespace | .seq xs => xs.all hlIsWs | .span _ x | .tactics _ _ _ x => hlIsWs x | .point .. => true | .token .. => false section open SubVerso.Module private inductive LineType where | whitespace | split | other private def lineType (line : Highlighted) : LineType := let trimmed := line.toString.trimAscii if trimmed.isEmpty then .whitespace else if hasAtLeast trimmed.positions 4 && trimmed.all (· == '-') then .split else .other def detachPrefix (code : Array ModuleItem) : Option (Array ModuleItem) × Array ModuleItem := Id.run do let mut out : Array ModuleItem := #[] for h : i in [0:code.size] do let lines := code[i].code.lines -- Check for pre-split for h : j in [0:lines.size] do match lineType lines[j] with | .whitespace => continue | .other => break -- not a pre-split | .split => let leadingWs := lines.extract 0 j let pre := out.modify (out.size - 1) (fun c => {c with code := c.code ++ .seq leadingWs}) return (pre, #[{code[i] with code := .seq <| lines.extract (j+1)}] ++ code.extract (i + 1) code.size) -- Check for post-split for h : j in [0:lines.size] do let j' := lines.size - (j + 1) have : j' < lines.size := by have : j < lines.size := by get_elem_tactic omega match lineType lines[j'] with | .whitespace => continue | .other => break -- not a post-split | .split => let trailingWs := lines.extract (j' + 1) lines.size let thisItem := { code[i] with code := lines.extract 0 j' |>.foldl (init := .empty) (· ++ ·) } let nextItem? := code[i + 1]? |>.map (fun c => {c with code := Highlighted.seq trailingWs ++ c.code}) return (out.push thisItem, nextItem?.toArray ++ code.extract (i + 2) code.size) -- No split here out := out.push code[i] -- No split anywhere return (none, out) def detachSuffix (code : Array ModuleItem) : Array ModuleItem × Option (Array ModuleItem) := Id.run do let mut out : Array ModuleItem := #[] for h : i in [0:code.size] do let i := code.size - (i + 1) have : i < code.size := by rename_i i' _ _ have : i' < code.size := by get_elem_tactic omega let lines := code[i].code.lines -- Check for post-split for h : j in [0:lines.size] do let j' := lines.size - (j + 1) have : j' < lines.size := by have : j < lines.size := by get_elem_tactic omega match lineType lines[j'] with | .whitespace => continue | .other => break -- not a post-split | .split => let trailingWs := lines.extract (j' + 1) lines.size let thisItem := { code[i] with code := lines.extract 0 j' |>.foldl (init := .empty) (· ++ ·) } let nextItem? := if i > 0 then some {code[i - 1] with code := Highlighted.seq trailingWs ++ code[i-1].code} else none return (nextItem?.toArray ++ code.extract (i + 2) code.size, out.push thisItem |>.reverse) -- Check for pre-split for h : j in [0:lines.size] do match lineType lines[j] with | .whitespace => continue | .other => break -- not a pre-split | .split => let leadingWs := lines.extract 0 j return (code.extract 0 i |>.modify (i-1) (fun c => {c with code := c.code ++ .seq leadingWs}), some (out.push {code[i] with code := .seq (lines.extract (j+1))} |>.reverse)) -- No split here out := out.push code[i] -- No split anywhere return (out.reverse, none) def splitExample' (code : Array ModuleItem) : Option (Array ModuleItem) × Array ModuleItem × Option (Array ModuleItem) := let (pre, code) := detachPrefix code let (code, suffix) := detachSuffix code (pre, code, suffix) end def copyButtonCss : String := r#" .tpil-code-container { position: relative; margin: 20px 0; } .copy-btn, .toggle-btn { position: absolute; top: 0px; background: inherit; color: black; border: none; border-radius: 4px; cursor: pointer; font-size: 12px; display: flex; align-items: center; opacity: 0.7; transition: opacity 0.2s; padding: 8px; } .copy-btn:hover, .toggle-btn:hover { opacity: 1; background: #555; } .copy-btn { right: 10px; gap: 5px; } .toggle-btn { right: 45px; /* Position to the left of copy button */ } .copy-btn.copied { background: #28a745; } .copy-icon, .toggle-icon { width: 14px; height: 14px; } .hidden { display: grid; grid-template-rows: 1fr; transition: grid-template-rows 0.3s ease-out, opacity 0.2s ease-out, margin 0.3s ease-out; opacity: 0.8; } .tpil-hide-prefix .hidden { grid-template-rows: 0fr; opacity: 0; margin: 0; } .hidden > .hl.lean { overflow: hidden; margin-bottom: 0; } "# def copyButtonJs : String := r#" function addToggleButtonToElement(elementId, className = 'tpil-hide-prefix') { const element = document.getElementById(elementId); if (!element) { console.error(`Element with ID '${elementId}' not found`); return false; } // Create container wrapper if it doesn't exist let container = element.parentElement; if (!container.classList.contains('code-container')) { container = document.createElement('div'); container.className = 'code-container'; container.classList.toggle(className); // Insert container before element element.parentNode.insertBefore(container, element); // Move element into container container.appendChild(element); } // Remove existing toggle button if present const existingBtn = container.querySelector('.toggle-btn'); if (existingBtn) { existingBtn.remove(); } // Create toggle button const toggleBtn = document.createElement('button'); toggleBtn.className = 'toggle-btn'; toggleBtn.title = 'Show hidden lines'; toggleBtn.innerHTML = ` <svg class="toggle-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M12 4.5C7 4.5 2.73 7.61 1 12c1.73 4.39 6 7.5 11 7.5s9.27-3.11 11-7.5c-1.73-4.39-6-7.5-11-7.5zM12 17c-2.76 0-5-2.24-5-5s2.24-5 5-5 5 2.24 5 5-2.24 5-5 5zm0-8c-1.66 0-3 1.34-3 3s1.34 3 3 3 3-1.34 3-3-1.34-3-3-3z"/> </svg> `; // Add click event listener toggleBtn.addEventListener('click', () => { container.classList.toggle(className); // Update icon based on state const isHidden = container.classList.contains(className); toggleBtn.innerHTML = !isHidden ? ` <svg class="toggle-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M12 7c2.76 0 5 2.24 5 5 0 .65-.13 1.26-.36 1.83l2.92 2.92c1.51-1.26 2.7-2.89 3.43-4.75-1.73-4.39-6-7.5-11-7.5-1.4 0-2.74.25-3.98.7l2.16 2.16C10.74 7.13 11.35 7 12 7zM2 4.27l2.28 2.28.46.46C3.08 8.3 1.78 10.02 1 12c1.73 4.39 6 7.5 11 7.5 1.55 0 3.03-.3 4.38-.84l.42.42L19.73 22 21 20.73 3.27 3 2 4.27zM7.53 9.8l1.55 1.55c-.05.21-.08.43-.08.65 0 1.66 1.34 3 3 3 .22 0 .44-.03.65-.08l1.55 1.55c-.67.33-1.41.53-2.2.53-2.76 0-5-2.24-5-5 0-.79.2-1.53.53-2.2zm4.31-.78l3.15 3.15.02-.16c0-1.66-1.34-3-3-3l-.17.01z"/> </svg> ` : ` <svg class="toggle-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M12 4.5C7 4.5 2.73 7.61 1 12c1.73 4.39 6 7.5 11 7.5s9.27-3.11 11-7.5c-1.73-4.39-6-7.5-11-7.5zM12 17c-2.76 0-5-2.24-5-5s2.24-5 5-5 5 2.24 5 5-2.24 5-5 5zm0-8c-1.66 0-3 1.34-3 3s1.34 3 3 3 3-1.34 3-3-1.34-3-3-3z"/> </svg> `; toggleBtn.title = isHidden ? 'Show hidden lines' : 'Hide lines'; }); // Position toggle button to the left of copy button if it exists const copyBtn = container.querySelector('.copy-btn'); if (copyBtn) { container.insertBefore(toggleBtn, copyBtn); } else { container.appendChild(toggleBtn); } return true; } function addCopyButtonToElement(elementId, codeText) { const element = document.getElementById(elementId); if (!element) { console.error(`Element with ID '${elementId}' not found`); return false; } // Create container wrapper if it doesn't exist let container = element.parentElement; if (!container.classList.contains('code-container')) { container = document.createElement('div'); container.className = 'tpil-code-container'; // Insert container before element element.parentNode.insertBefore(container, element); // Move element into container container.appendChild(element); } // Remove existing copy button if present const existingBtn = container.querySelector('.copy-btn'); if (existingBtn) { existingBtn.remove(); } // Create copy button const copyBtn = document.createElement('button'); copyBtn.className = 'copy-btn'; copyBtn.title = 'Copy to clipboard'; copyBtn.innerHTML = ` <svg class="copy-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M16 1H4c-1.1 0-2 .9-2 2v14h2V3h12V1zm3 4H8c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h11c1.1 0 2-.9 2-2V7c0-1.1-.9-2-2-2zm0 16H8V7h11v14z"/> </svg> `; // Add click event listener copyBtn.addEventListener('click', async () => { try { // Copy the provided code text to clipboard await navigator.clipboard.writeText(codeText); // Show success feedback const originalText = copyBtn.innerHTML; copyBtn.innerHTML = ` <svg class="copy-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M9 16.17L4.83 12l-1.42 1.41L9 19 21 7l-1.41-1.41z"/> </svg> Copied! `; copyBtn.classList.add('copied'); // Reset after 2 seconds setTimeout(() => { copyBtn.innerHTML = originalText; copyBtn.classList.remove('copied'); }, 2000); } catch (err) { // Fallback for older browsers fallbackCopyTextToClipboard(codeText); // Show feedback copyBtn.textContent = 'Copied!'; setTimeout(() => { copyBtn.innerHTML = ` <svg class="copy-icon" viewBox="0 0 24 24" fill="currentColor"> <path d="M16 1H4c-1.1 0-2 .9-2 2v14h2V3h12V1zm3 4H8c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h11c1.1 0 2-.9 2-2V7c0-1.1-.9-2-2-2zm0 16H8V7h11v14z"/> </svg> `; }, 2000); } }); // Add button to container container.appendChild(copyBtn); return true; } // Fallback function for older browsers function fallbackCopyTextToClipboard(text) { const textArea = document.createElement('textarea'); textArea.value = text; textArea.style.position = 'fixed'; textArea.style.left = '-999999px'; textArea.style.top = '-999999px'; document.body.appendChild(textArea); textArea.focus(); textArea.select(); try { document.execCommand('copy'); } catch (err) { console.error('Fallback: Oops, unable to copy', err); } document.body.removeChild(textArea); } // Expose API function globally window.addCopyButtonToElement = addCopyButtonToElement; window.addToggleButtonToElement = addToggleButtonToElement; "# def examplesCss := r#" .example { margin-left: 0.75em; } .example .hl.lean.block { margin: 0; } .example .information pre { margin: 0 0 0.25em 0; } "# def tpilBlock (block : BlockDescr) : BlockDescr := { block with extraJsFiles := block.extraJsFiles |>.insert { filename := "copybutton.js", contents := copyButtonJs, sourceMap? := none } extraCssFiles := block.extraCssFiles |>.insert { filename := "copybutton.css", contents := copyButtonCss } |>.insert { filename := "examples.css", contents := examplesCss } } def tpilInline (inline : InlineDescr) : InlineDescr := { inline with extraJsFiles := inline.extraJsFiles |>.insert {filename := "copybutton.js", contents := copyButtonJs, sourceMap? := none} extraCssFiles := inline.extraCssFiles |>.insert { filename := "copybutton.css", contents := copyButtonCss } |>.insert { filename := "examples.css", contents := examplesCss } } structure ExampleItem where code : Highlighted output : Option Highlighted.Message trailing : String deriving ToJson, FromJson, Repr, Quote def verbatimBlock (cmd : Highlighted) : TeX := let contents := cmd.trimOneTrailingNl.trimOneLeadingNl.toVerbatimTeX .seq #[.raw s!"\\begin\{LeanVerbatim}[vspace=0pt]\n", contents, .raw "\n\\end{LeanVerbatim}\n"] block_extension Block.lean (allowToggle : Bool) (pre : Option Highlighted) (code : Array ExampleItem) (post : Option Highlighted) (goalVisibility : HighlightHtmlM.VisibleProofStates := .none) via withHighlighting, tpilBlock where data := let defined : Array (Name × String) := code.flatMap (definedNames ·.code) .arr #[.bool allowToggle, toJson pre, toJson code, toJson post, toJson goalVisibility, toJson defined] traverse id data _ := do let .arr #[_allowToggle, _pre, _code, _post, _visibility, definesJson] := data | logError s!"Expected array for Lean block, got {data.compress}"; return none match FromJson.fromJson? definesJson with | .error err => logError <| "Failed to deserialize code config during traversal:" ++ err return none | .ok (defines : Array (Name × String)) => for (d, s) in defines do if d.isAnonymous then continue let d := d.toString let path ← (·.path) <$> read let _ ← externalTag id path d let context := (← read).headers.map (·.titleString) modify (·.saveDomainObject exampleDomain d id) if let some link := (← get).externalTags[id]? then modify (·.modifyDomainObjectData exampleDomain d fun v => let v := if let .obj _ := v then v else .obj {} v.setObjVal! link.link (json%{"context": $context, "display": $s})) pure none toTeX := open Verso.Output.TeX in open Verso.Doc.TeX in some <| fun _ _ _ data _ => do let .arr #[.bool _allowToggle, hlPreJson, hlJson, hlPostJson, goalVisibilityJson, _defs] := data | logError "Expected five-element JSON for Lean code" pure .empty let pre ← match FromJson.fromJson? (α := Option Highlighted) hlPreJson with | .error err => logError <| "Couldn't deserialize Lean code intro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let code ← match FromJson.fromJson? (α := Array ExampleItem) hlJson with | .error err => logError <| "Couldn't deserialize Lean code block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let _post ← match FromJson.fromJson? (α := Option Highlighted) hlPostJson with | .error err => logError <| "Couldn't deserialize Lean code outro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let _visibility ← match FromJson.fromJson? (α := HighlightHtmlM.VisibleProofStates) goalVisibilityJson with | .error err => logError <| "Couldn't deserialize Lean code outro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let codeIndent := code.foldl (init := pre.map (·.indentation)) (fun i? y => i?.map (min · y.1.indentation)) |>.getD 0 let mut codeTeX : TeX := .empty for ⟨cmd, out?, ws⟩ in code do let cmd := cmd.deIndent codeIndent codeTeX := codeTeX ++ verbatimBlock cmd if let some msg := out? then codeTeX := codeTeX ++ (← msg.toTeX) unless ws.isEmpty do codeTeX := codeTeX ++ (← (Highlighted.text ws).toTeX) pure codeTeX extraJsFiles := .ofList [{ filename := "copybutton.js", contents := copyButtonJs, sourceMap? := none }] extraCssFiles := .ofList [ { filename := "copybutton.css", contents := copyButtonCss }, { filename := "examples.css", contents := examplesCss } ] toHtml := open Verso.Output.Html in some <| fun _ _ _ data _ => do let .arr #[.bool allowToggle, hlPreJson, hlJson, hlPostJson, goalVisibilityJson, _defs] := data | HtmlT.logError "Expected five-element JSON for Lean code" pure .empty let pre ← match FromJson.fromJson? (α := Option Highlighted) hlPreJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code intro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let code ← match FromJson.fromJson? (α := Array ExampleItem) hlJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let post ← match FromJson.fromJson? (α := Option Highlighted) hlPostJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code outro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let visibility ← match FromJson.fromJson? (α := HighlightHtmlM.VisibleProofStates) goalVisibilityJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code outro block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let codeIndent := code.foldl (init := pre.map (·.indentation)) (fun i? y => i?.map (min · y.1.indentation)) |>.getD 0 let mut codeHtml : Html := .empty let mut codeString := "" if allowToggle then if let some p := pre then let p := p.deIndent codeIndent let inner ← withDefinitionsAsTargets false <| withVisibleProofStates visibility <| p.trimOneLeadingNl |>.blockHtml "examples" (trim := false) (g := Verso.Genre.Manual) codeHtml := codeHtml ++ {{ <div class="hidden">{{ inner }}</div> }} codeString := codeString ++ p.toString for ⟨cmd, out?, ws⟩ in code do let cmd := cmd.deIndent codeIndent let moreCode ← withDefinitionsAsTargets true <| withVisibleProofStates visibility <| cmd.trimOneLeadingNl |>.blockHtml "examples" (trim := false) (g := Verso.Genre.Manual) codeHtml := codeHtml ++ moreCode codeString := codeString ++ cmd.toString if let some msg := out? then let msgHtml ← msg.toHtml (g := Verso.Genre.Manual) [] codeHtml := codeHtml ++ {{<pre class=s!"hl lean lean-output {msg.severity.class}">{{msgHtml}}</pre>}} unless ws.isEmpty do codeHtml := codeHtml ++ (← (Highlighted.text ws).blockHtml "examples" (trim := false) (g := Verso.Genre.Manual)) if allowToggle then if let some p := post then let p := p.deIndent codeIndent let inner ← withDefinitionsAsTargets false <| withVisibleProofStates visibility <| p.trimOneLeadingNl |>.blockHtml "examples" (trim := false) (g := Verso.Genre.Manual) codeHtml := codeHtml ++ {{ <div class="hidden">{{ inner }}</div> }} codeString := codeString ++ p.toString let i ← uniqueId (g := Verso.Genre.Manual) let toCopy := (pre.map (·.toString)).getD "" ++ codeString let mut script := s!"addCopyButtonToElement({i.quote}, {toCopy.quote});" if allowToggle && (pre.isSome || post.isSome) then script := script ++ s!"\naddToggleButtonToElement({i.quote});" return {{ <div class="example" id={{i}}>{{codeHtml}}</div> <script> {{Html.text false script}} </script> }} block_extension Block.leanAnchor (code : Highlighted) (completeCode : String) via withHighlighting, tpilBlock where data := .arr #[toJson code, toJson completeCode] traverse _ _ _ := pure none toTeX := open Verso.Output.TeX in open Verso.Doc.TeX in some <| fun _ _ _ data _ => do let .arr #[hlJson, completeCodeJson] := data | logError "Expected two-element JSON for Lean code" pure .empty let code ← match FromJson.fromJson? (α := Highlighted) hlJson with | .error err => logError <| "Couldn't deserialize Lean code block while rendering TeX: " ++ err return .empty | .ok hl => pure hl let _completeCode ← match FromJson.fromJson? (α := String) completeCodeJson with | .error err => logError <| "Couldn't deserialize Lean code string while rendering TeX: " ++ err return .empty | .ok hl => pure hl let code := code.deIndent code.indentation pure (verbatimBlock code) toHtml := open Verso.Output.Html in some <| fun _ _ _ data _ => do let .arr #[hlJson, completeCodeJson] := data | HtmlT.logError "Expected two-element JSON for Lean code" pure .empty let code ← match FromJson.fromJson? (α := Highlighted) hlJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code block while rendering HTML: " ++ err return .empty | .ok hl => pure hl let completeCode ← match FromJson.fromJson? (α := String) completeCodeJson with | .error err => HtmlT.logError <| "Couldn't deserialize Lean code string while rendering HTML: " ++ err return .empty | .ok hl => pure hl let code := code.deIndent code.indentation let codeHtml : Html := (← code.blockHtml "examples" (trim := false) (g := Verso.Genre.Manual)) let i ← uniqueId (g := Verso.Genre.Manual) let mut script := s!"addCopyButtonToElement({i.quote}, {completeCode.quote});" return {{ <div class="example" id={{i}}>{{codeHtml}}</div> <script> {{Html.text false script}} </script> }} def proofStateStyle := r#" .hl.lean.proof-state-view { white-space: collapse; margin-left: 0.75em; } .hl.lean.proof-state-view .hypothesis { display: table !important; border-spacing: 0 0.2rem; border-collapse: separate; } .hl.lean.proof-state-view .tactic-state { display: block; left: 0; padding: 0; border: none; } .hl.lean.proof-state-view .tactic-state:has(.goal + .goal) { display: flex; flex-wrap: wrap; gap: 2rem; justify-content: space-evenly; width: 100%; } .hl.lean.proof-state-view .tactic-state .goal { margin: 0 !important; align-self: flex-start; width: fit-content; } "# block_extension Block.goals (goals : Array (Highlighted.Goal Highlighted)) via withHighlighting, tpilBlock where data := toJson goals traverse _ _ _ := pure none toTeX := open Verso.Output.TeX in open Verso.Doc.TeX in some <| fun _ _ _ data _ => do let goals ← match fromJson? (α := Array (Highlighted.Goal Highlighted)) data with | .ok v => pure v | .error e => logError <| "Failed to deserialize proof state: " ++ e return .empty -- TODO: lay these out side-by-side pure <| .seq (← goals.mapM (·.toTeX)) extraCssFiles := .ofList [{ filename := "proof-state.css", contents := proofStateStyle }] toHtml := open Verso.Output.Html in some <| fun _ _ _ data _ => do let goals ← match fromJson? (α := Array (Highlighted.Goal Highlighted)) data with | .ok v => pure v | .error e => HtmlT.logError <| "Failed to deserialize proof state: " ++ e return .empty pure {{ <div class="hl lean proof-state-view" data-lean-context="examples"> <span class="tactic-state"> {{← if goals.isEmpty then pure {{"All goals completed! 🐙"}} else withCollapsedSubgoals (g := Verso.Genre.Manual) .never <| .seq <$> (goals.mapIdxM (fun i x => x.toHtml (·.toHtml) i))}} </span> </div> }} inline_extension Inline.goal (goal : Highlighted.Goal Highlighted) via withHighlighting, tpilInline where data := toJson goal traverse _ _ _ := pure none toTeX := open Verso.Doc.TeX in open Verso.Output.TeX in some <| fun _ _ data _ => do let goal ← match fromJson? (α := Highlighted.Goal Highlighted) data with | .ok v => pure v | .error e => logError <| "Failed to deserialize proof goal: " ++ e return .empty verbatimInline (goal.name.getD "<anonymous>") toHtml := open Verso.Output.Html in some <| fun _ _ data _ => do let goal ← match fromJson? (α := Highlighted.Goal Highlighted) data with | .ok v => pure v | .error e => HtmlT.logError <| "Failed to deserialize proof goal: " ++ e return .empty pure {{ <code class="proof-goal-ref hl lean"> <span class="tactic"> {{goal.name |>.getD "<anonymous>"}} <span class="tactic-state"> {{← goal.toHtml (g := Verso.Genre.Manual) (·.toHtml) 0}} </span> </span> </code> }} def kbdCSS := r#" code.unicode-abbrev { background-color: #eee; border-radius: 3px; border: 1px solid #ccc; white-space: nowrap; } kbd { white-space: nowrap; } kbd > code { background-color: #eee; border-radius: 3px; border: 1px solid #b4b4b4; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.2), 0 2px 0 0 rgba(255, 255, 255, 0.7) inset; color: #333; display: inline-block; font-size: 0.85em; font-weight: 700; line-height: 1; padding: 2px 4px; white-space: nowrap; vertical-align: middle; } "# inline_extension Inline.kbd (items : Array String) where data := toJson items traverse _ _ _ := pure none toTeX := open Verso.Output.TeX in open Verso.Doc.TeX in let verb (s : String) : TeX := .seq #[.raw "\\verb|", .raw s, raw "|"] -- Fails if s contains "|" let verbs (ss : List String) : TeX := List.intersperse (TeX.text " ") (ss.map verb) some <| fun _ _ data _ => do let items ← match fromJson? (α := Array String) data with | .ok v => pure v | .error e => logError <| "Failed to deserialize keyboard shortcut: " ++ e return .empty if let #[item] := items then if item.startsWith "\\" then pure (verb item) else let items : List String := item.toList.map fun c => s!"{c}" pure (verbs items) else pure (verbs items.toList) extraCss := [kbdCSS] toHtml := open Verso.Output.Html in some <| fun _ _ data _ => do let items ← match fromJson? (α := Array String) data with | .ok v => pure v | .error e => HtmlT.logError <| "Failed to deserialize keyboard shortcut: " ++ e return .empty if let #[item] := items then if item.startsWith "\\" then pure {{<code class="unicode-abbrev">{{item}}</code>}} else let items : Array Html := item.toList.toArray.map fun c => {{<code>{{s!"{c}"}}</code>}} pure {{<kbd>{{items}}</kbd>}} else let items : Array Html := items.map (fun (s : String) => {{<code>s!"{s}"</code>}}) pure {{<kbd>{{items}}</kbd>}} private def oneCodeStr [Monad m] [MonadError m] (inlines : Array (TSyntax `inline)) : m StrLit := do let #[code] := inlines | (if inlines.size == 0 then (throwError ·) else (throwErrorAt (mkNullNode inlines) ·)) "Expected one code element" let `(inline|code($code)) := code | throwErrorAt code "Expected a code element" return code private def codeStrs [Monad m] [MonadError m] (inlines : Array (TSyntax `inline)) : m (Array StrLit) := do let mut out := #[] for i in inlines do match i with | `(inline|code($code)) => out := out.push code | `(inline|$s:str) => unless s.getString.all (·.isWhitespace) do throwErrorAt s "Expected a code literal" | other => throwErrorAt other "Expected a code literal" return out structure Kept (α : Type u) where values : Array α next : Nat in_bounds : next < values.size deriving Repr instance [Inhabited α] : Inhabited (Kept α) where default := ⟨#[default], 0, by simp⟩ def Kept.add (kept : Kept α) (val : α) : Kept α where values := kept.values.set kept.next val (h := kept.in_bounds) next := if kept.next = 0 then kept.values.size - 1 else kept.next - 1 in_bounds := by have := kept.in_bounds rw [Array.size_set] split <;> omega instance [Monad m] : ForM m (Kept α) α where forM kept f := do for h : i in [kept.next:kept.values.size] do f kept.values[i] for h : i in [0:kept.next] do have := kept.in_bounds have : i < kept.next := by get_elem_tactic f kept.values[i] instance [Monad m] : ForIn m (Kept α) α := ⟨ForM.forIn⟩ initialize recentHighlightsExt : EnvExtension (Kept Highlighted) ← registerEnvExtension (pure ⟨.replicate 12 .empty, 0, by simp⟩) /-- A mapping from anchor names to the corresponding code. Each code element is paired with it's de-anchored context for copy-paste purposes. -/ initialize savedAnchorExt : EnvExtension (HashMap String (Highlighted × String)) ← registerEnvExtension (pure {}) def allProofInfo (hl : Highlighted) : Array Highlighted := go #[] hl where go (acc : Array Highlighted) : Highlighted → Array Highlighted | .seq xs => xs.foldl (init := acc) go | .span _ x => go acc x | .tactics gs _ _ x => gs.foldl (init := (go acc x)) (fromGoal · ·) | .point .. | .text .. | .token .. | .unparsed .. => acc fromGoal (acc : Array Highlighted) (g : Highlighted.Goal Highlighted) := g.hypotheses.foldl (init := acc.push g.conclusion) fun acc ⟨xs, hl⟩ => let names : Highlighted := xs.foldl (init := .empty) fun hl tok => if hl.isEmpty then .token tok else hl ++ .text " " ++ .token tok acc.push (names ++ .text " " ++ .token ⟨.unknown, ":"⟩ ++ .text " " ++ hl) def saveBackref (hl : Highlighted) : DocElabM Unit := do -- Construct a document with all the proof states in it, so references can target them but they -- don't eat up individual slots in the history ring let hl := allProofInfo hl |>.foldl (init := hl) (· ++ .text "\n" ++ ·) modifyEnv (recentHighlightsExt.modifyState · (·.add hl)) structure ProofState where goals : Array (Highlighted.Goal Highlighted) start : Nat stop : Nat «syntax» : Highlighted deriving Repr initialize proofStatesExt : EnvExtension (HashMap String ProofState) ← registerEnvExtension (pure {}) /-- Extracts all messages from the given code. -/ def allInfo (hl : Highlighted) : Array (Highlighted.Message × Option Highlighted) := match hl with | .seq xs => xs.flatMap allInfo | .point k str => #[(⟨k, str⟩, none)] | .tactics _ _ _ x => allInfo x | .span infos x => (infos.map fun (k, str) => (⟨k, str⟩, some x)) ++ allInfo x | .text .. | .token .. | .unparsed .. => #[] def trailingText (hl : Highlighted) : Highlighted × String := match hl with | .seq xs => Id.run do let mut txt := "" for h : i in [0:xs.size] do let i' := xs.size - (i + 1) have : i < xs.size := by get_elem_tactic have : i' < xs.size := by grind let (hl', txt') := trailingText xs[i'] txt := txt' ++ txt if hl'.isEmpty then continue else return (.seq (xs.extract 0 i' |>.push hl'), txt) return (.empty, txt) | .point .. | .token .. => (hl, "") | .tactics i s e hl' => let (hl', txt) := trailingText hl' (.tactics i s e hl', txt) | .span i hl' => let (hl', txt) := trailingText hl' (.span i hl', txt) | .text txt | .unparsed txt => (.empty, txt) private def commentContents (s : String) : Option (String × String) := let s := s.trimAsciiStart if s.startsWith "--" then let s := s.dropWhile (· == '-') |>.trimAsciiStart let ws := s.takeEndWhile (·.isWhitespace) some (s.dropEnd ws.positions.count |>.copy, ws.copy) else none /-- Extracts a trailing comment from code, if present. Returns the code along with the comment and its trailing whitespace. -/ def trailingComment (hl : Highlighted) : Highlighted × Option (String × String) := let x := trailingText hl match commentContents x.2 with | some txt' => (x.1, some txt') | none => (hl, none) section inductive ShowProofStates where | none | named (which : Array String) | all structure LeanConfig where checkOutput : Bool suppressNamespaces : Option String allowVisible : Bool showProofStates : ShowProofStates «show» : Bool variable [Monad m] [MonadError m ] [MonadLiftT CoreM m] instance : FromArgVal ShowProofStates m where fromArgVal := { description := "`all`, `none`, or a string literal", signature := .String ∪ .Ident get := fun | .name x => match x.getId with | `all => pure .all | `none => pure .none | _ => do let h ← MessageData.hint m!"Use 'all', 'none', or a string" #["all", "none", "NAME1 NAME2 ...".quote] (ref? := x) throwErrorAt x m!"Expected 'all' or 'none' or a string literal\n{h}" | .str s => pure <| .named <| (s.getString.splitOn " ").toArray | .num n => do let h ← MessageData.hint m!"Use 'all', 'none', or a string" #["all", "none", "NAME1 NAME2 ...".quote] (ref? := n) throwErrorAt n m!"Expected 'all' or 'none' or a string literal\n{h}" } instance : FromArgs LeanConfig m where fromArgs := LeanConfig.mk <$> .namedD `check .bool true <*> .named `suppressNamespaces .string true <*> .namedD `allowVisible .bool true <*> .namedD' `showProofStates .none <*> .namedD' `show .true structure SavedLeanConfig where name : Option Ident suppressNamespaces : Option String instance : FromArgs SavedLeanConfig m where fromArgs := SavedLeanConfig.mk <$> (some <$> .positional `name .ident <|> pure none) <*> .named `suppressNamespaces .string true end def isNewline (hl : Highlighted) : Bool := match hl with | .text str | .unparsed str => str == "\n" | .token .. => false | .seq xs => Id.run do for h : i in [0:xs.size] do if xs[i].isEmpty then continue else if isNewline xs[i] then return xs.extract (i+1) |>.all (·.isEmpty) else return false return false | .tactics _ _ _ x | .span _ x => isNewline x | .point .. => false open SubVerso.Module in /-- Leading anchor comments are always incorrect. They probably result from Lean placing them with the _next_ command, so we should move them back up before processing them. -/ def fixupAnchorComments (items : Array ModuleItem) : Array ModuleItem := Id.run do let mut out := #[] let mut prev? : Option ModuleItem := none for i in items do let mut i := i if prev?.isSome then let mut lines := i.code.lines while h : lines.size > 0 do if isNewline lines[0] || (proofState? lines[0].toString |>.isOk) then prev? := prev?.map (fun i => {i with code := i.code ++ lines[0]}) lines := lines.drop 1 else break i := {i with code := .seq lines} if let some prev := prev? then out := out.push prev prev? := some i if let some prev := prev? then out := out.push prev return out private def showGoals (goals : Array (Highlighted.Goal Highlighted)) : MessageData := Id.run do if goals.isEmpty then return m!"No goals" let mut out := m!"" for g in goals do if let some n := g.name then out := out ++ m!"case {n}\n" for ⟨xs, h⟩ in g.hypotheses do let xs := " ".intercalate (xs.toList.map (fun ⟨_, x⟩ => x)) out := out ++ m!"{xs} : {h.toString}\n" out := out ++ m!" {g.goalPrefix} {g.conclusion.toString}\n\n" pure out @[code_block_expander lean] def lean : CodeBlockExpander | args, code => do let {checkOutput, suppressNamespaces, allowVisible, showProofStates, «show»} ← parseThe LeanConfig args let mut showProofStates := showProofStates let codeStr := code.getString let contents ← extractFile codeStr suppressNamespaces let contents := contents.filter (!·.code.isEmpty) let (pre, mid, post) := splitExample' contents let mid := fixupAnchorComments mid let pre : Option Highlighted := pre.map fun p => p.foldl (init := .empty) fun acc c => acc ++ c.code let mut toShow : Array ExampleItem := #[] let mut visibility : HighlightHtmlM.VisibleProofStates := match showProofStates with | .none => .none | .all => .all | .named _ => .states #[] for item in mid do let code ← match item.code.anchored (textAnchors := false) with | .ok a => for (k, v) in a.proofStates.toArray do if let .tactics goals start stop hl := v then logSilentInfo m!"Proof state {k} on `{v.toString}`:\n{showGoals goals}" modifyEnv (proofStatesExt.modifyState · (·.insert k ⟨goals, start, stop, hl⟩)) if let (.states ss, .named xs) := (visibility, showProofStates) then if k ∈ xs then visibility := .states (ss.push (start, stop)) showProofStates := .named (xs.filter (· ≠ k)) else throwError "Unexpected syntax for proof state '{k}':{indentD <| repr v}" pure a.code | .error e => throwError "Error while extracting proof states:{indentD e}" let item := {item with code} match item.kind with | ``Lean.Parser.Command.check | ``Lean.Parser.Command.eval | ``Lean.reduceCmd | ``Lean.Parser.Command.check_failure | ``Lean.Parser.Command.print | ``Lean.Parser.Command.printAxioms | ``Lean.Parser.Command.printEqns | ``Lean.guardMsgsCmd => let info? : Option Highlighted.Message := allInfo item.code |>.firstM fun (msg, hl?) => if hl? matches some (.token ⟨.keyword .., _⟩) then some msg else none if let some msg := info? then if let (code, some (comment, ws)) := trailingComment item.code then let txt := msg.toString if checkOutput && !eqMessages comment txt then throwError "Mismatch! Expected {comment} but got {txt}" toShow := toShow.push ⟨code, some msg, dropOneNl ws⟩ else let (code', ws) := trailingText item.code toShow := toShow.push ⟨code', some msg, dropOneNl ws⟩ else toShow := toShow.push ⟨item.code, none, ""⟩ | _ => toShow := toShow.push ⟨item.code, none, ""⟩ let post : Option Highlighted := post.map fun p => p.foldl (init := .empty) fun acc c => acc ++ c.code let visible := .seq <| toShow.map (·.1) saveBackref visible for (msg, _) in allInfo visible do logSilentInfo msg.toString if let .named xs := showProofStates then unless xs.isEmpty do logWarning m!"Unused proof state names: {m!", ".joinSep (xs.map (m!"'{·}'")).toList}" if «show» then return #[← ``(Block.other (Block.lean $(quote allowVisible) $(quote pre) $(quote toShow) $(quote post) $(quote visibility)) #[])] else return #[] where eqMessages (s1 s2 : String) := SubVerso.Examples.Messages.messagesMatch (s1.replace "\n" " ") (s2.replace "\n" " ") dropOneNl (s : String) : String := if s.back == '\n' then (s.dropEnd 1).copy else s @[code_block_expander save] def save : CodeBlockExpander | args, code => do let {name, suppressNamespaces} ← parseThe SavedLeanConfig args let codeStr := code.getString let contents ← extractFile codeStr suppressNamespaces let contents : Highlighted := .seq <| contents.map (·.code) match contents.anchored with | .error e => throwError s!"Error extracting anchors: {e}" | .ok {anchors, proofStates, code} => let codeStr := code.toString for (k, v) in anchors.toArray do logSilentInfo m!"Anchor {k}:\n{v.toString}" modifyEnv (savedAnchorExt.modifyState · (·.insert k (v, codeStr))) for (k, v) in proofStates.toArray do if let .tactics goals start stop hl := v then logSilentInfo m!"Proof state {k} on `{v.toString}`:\n{showGoals goals}" modifyEnv (proofStatesExt.modifyState · (·.insert k ⟨goals, start, stop, hl⟩)) if let some x := name then modifyEnv (savedAnchorExt.modifyState · (·.insert x.getId.toString (code, codeStr))) pure #[] @[code_block_expander savedAnchor] def savedAnchor : CodeBlockExpander | args, code => do let name ← ArgParse.run (.positional `name .ident) args let env ← getEnv let some (hl, complete) := (savedAnchorExt.getState env)[name.getId.toString]? | throwErrorAt name m!"Not found: '{name.getId}'" discard <| ExpectString.expectString "code" code hl.toString saveBackref hl return #[← ``(Block.other (Block.leanAnchor $(quote hl) $(quote complete)) #[])] section structure ProofStateConfig where name : StrLit structure GoalConfig where name : StrLit variable [Monad m] [MonadError m ] [MonadLiftT CoreM m] private def strOrName : ValDesc m StrLit where description := "identifier or string literal" signature := .Ident ∪ .String get | .name x => pure <| Syntax.mkStrLit x.getId.toString (info := x.raw.getHeadInfo) | .str s => pure s | .num n => throwErrorAt n "Expected identifier or string literal" instance : FromArgs ProofStateConfig m where fromArgs := ProofStateConfig.mk <$> .positional `name strOrName instance : FromArgs GoalConfig m where fromArgs := GoalConfig.mk <$> .positional `name strOrName end @[code_block_expander proofState] def proofState : CodeBlockExpander | args, code => do let {name} ← parseThe ProofStateConfig args let some {goals, ..} := (proofStatesExt.getState (← getEnv))[name.getString]? | let allStates := (proofStatesExt.getState (← getEnv)).keys let h ← if allStates.isEmpty then pure <| MessageData.hint' "Name a proof state with a suitable PROOF_STATE: comment" else MessageData.hint "Use a proof state name:" (allStates.toArray.map ({suggestion := .string ·})) (ref? := some name) logErrorAt name m!"Not found: {name.getString}\n{h}" return #[← ``(sorry)] let mut goalView := "" for g in goals do goalView := goalView ++ g.toString ++ "\n\n" goalView := goalView.trimAsciiEnd.copy ++ "\n" _ ← ExpectString.expectString "proof" code goalView return #[← ``(Block.other (Block.goals $(quote goals)) #[])] @[role_expander goal] def goal : RoleExpander | args, inls => do let {name} ← parseThe GoalConfig args let caseTag ← oneCodeStr inls let some {goals, ..} := (proofStatesExt.getState (← getEnv))[name.getString]? | logErrorAt name m!"Not found: {name.getString}" return #[← ``(sorry)] let goal? := goals.find? fun | {name := some x, ..} => caseTag.getString == x | _ => false let some goal := goal? | let validTags := goals.filterMap (·.name) let h ← if validTags.isEmpty then pure <| MessageData.hint' m!"" else MessageData.hint m!"Use a case label:" (validTags.map ({suggestion := .string ·})) (ref? := some caseTag) logErrorAt caseTag m!"Not found: {caseTag.getString}\n{h}" return #[← ``(sorry)] return #[← ``(Inline.other (Inline.goal $(quote goal)) #[])] structure Helper where highlight (term : String) (type? : Option String) : IO Highlighted command (cmd : String) : IO Highlighted signature (code : String) : IO Highlighted name (code : String) : IO Highlighted open System in open SubVerso.Helper in def Helper.fromModule (setup : String) : IO Helper := do let codeHash := hash setup let modBase := "Interact" ++ hashString codeHash let filename := modBase ++ ".lean" let mod := "Examples." ++ modBase -- Validate that the path is really a Lean project let lakefile := projectDir / "lakefile.lean" let lakefile' := projectDir / "lakefile.toml" if !(← lakefile.pathExists) && !(← lakefile'.pathExists) then throw <| .userError s!"Neither {lakefile} nor {lakefile'} exist, couldn't load project" let toolchainfile := projectDir / "lean-toolchain" let toolchain ← do if !(← toolchainfile.pathExists) then throw <| .userError s!"File {toolchainfile} doesn't exist, couldn't load project" pure (← IO.FS.readFile toolchainfile).trimAscii.copy IO.FS.writeFile (projectDir / "Examples" / filename) setup -- Kludge: remove variables introduced by Lake. Clearing out DYLD_LIBRARY_PATH and -- LD_LIBRARY_PATH is useful so the version selected by Elan doesn't get the wrong shared -- libraries. let lakeVars := #["LAKE", "LAKE_HOME", "LAKE_PKG_URL_MAP", "LEAN_SYSROOT", "LEAN_AR", "LEAN_PATH", "LEAN_SRC_PATH", "LEAN_GITHASH", "ELAN_TOOLCHAIN", "DYLD_LIBRARY_PATH", "LD_LIBRARY_PATH"] let cmd := "elan" let toolchainFile ← IO.FS.Handle.mk toolchainfile .read toolchainFile.lock (exclusive := true) try let args := #["run", "--install", toolchain, "lake", "build", "subverso-helper"] let res ← IO.Process.output { cmd, args, cwd := projectDir -- Unset Lake's environment variables env := lakeVars.map (·, none) } if res.exitCode != 0 then reportFail projectDir cmd args res finally toolchainFile.unlock let setupFile ← IO.FS.Handle.mk (projectDir / "Examples" / filename) .read setupFile.lock (exclusive := true) try let args := #["run", "--install", toolchain, "lake", "env", "subverso-helper", mod] let (hlTm, hlCmd, hlSig, hlName) ← do let (procIn, proc) ← do let proc ← IO.Process.spawn { cmd, args, cwd := projectDir -- Unset Lake's environment variables env := lakeVars.map (·, none) stdin := .piped stdout := .piped stderr := .inherit } proc.takeStdin let mutex ← Std.Mutex.new (IO.FS.Stream.ofHandle procIn, IO.FS.Stream.ofHandle proc.stdout) let hlTm := fun (tm : String) (ty? : Option String) => show IO Highlighted from do mutex.atomically do let (procIn, procOut) ← get if let some code ← proc.tryWait then throw <| .userError s!"Process terminated: {code}" send procIn (Request.term tm ty?) match (← receiveThe Response procOut) with | some (.result (.highlighted hl)) => pure hl | some (.error code e more) => let mut msg := s!"{e} ({code})." if let some details := more then msg := msg ++ s!" Details:\n {details}" throw <| .userError msg | none => throw <| .userError "Helper process no longer running" let hlCmd := fun (cmd : String) => show IO Highlighted from do mutex.atomically do let (procIn, procOut) ← get if let some code ← proc.tryWait then throw <| .userError s!"Process terminated: {code}" send procIn (Request.command cmd) match (← receiveThe Response procOut) with | some (.result (.highlighted hl)) => pure hl | some (.error code e more) => let mut msg := s!"{e} ({code})." if let some details := more then msg := msg ++ s!" Details:\n {details}" throw <| .userError msg | none => throw <| .userError "Helper process no longer running" let hlSig := fun (cmd : String) => show IO Highlighted from do mutex.atomically do let (procIn, procOut) ← get if let some code ← proc.tryWait then throw <| .userError s!"Process terminated: {code}" send procIn (Request.signature cmd) match (← receiveThe Response procOut) with | some (.result (.highlighted hl)) => pure hl | some (.error code e more) => let mut msg := s!"{e} ({code})." if let some details := more then msg := msg ++ s!" Details:\n {details}" throw <| .userError msg | none => throw <| .userError "Helper process no longer running" let hlName := fun (cmd : String) => show IO Highlighted from do mutex.atomically do let (procIn, procOut) ← get if let some code ← proc.tryWait then throw <| .userError s!"Process terminated: {code}" send procIn (Request.name cmd) match (← receiveThe Response procOut) with | some (.result (.highlighted hl)) => pure hl | some (.error code e more) => let mut msg := s!"{e} ({code})." if let some details := more then msg := msg ++ s!" Details:\n {details}" throw <| .userError msg | none => throw <| .userError "Helper process no longer running" pure (hlTm, hlCmd, hlSig, hlName) return Helper.mk hlTm hlCmd hlSig hlName finally setupFile.unlock where decorateOut (name : String) (out : String) : String := if out.isEmpty then "" else s!"\n{name}:\n{out}\n" reportFail {α} (projectDir : FilePath) (cmd : String) (args : Array String) (res : IO.Process.Output) : IO α := do IO.eprintln <| "Build process failed." ++ "\nCWD: " ++ projectDir.toString ++ "\nCommand: " ++ cmd ++ "\nArgs: " ++ repr args ++ "\nExit code: " ++ toString res.exitCode ++ "\nstdout: " ++ res.stdout ++ "\nstderr: " ++ res.stderr throw <| .userError <| "Build process failed." ++ decorateOut "stdout" res.stdout ++ decorateOut "stderr" res.stderr initialize helperExt : EnvExtension (Option Helper) ← registerEnvExtension (pure none) initialize defaultHelperExt : EnvExtension (Option Helper) ← registerEnvExtension (pure none) @[directive_expander setup] def setup : DirectiveExpander | args, contents => do ArgParse.done.run args if h : contents.size < 1 then throwError "Expected a setup code block" else let first := contents[0] let contents := contents.extract 1 contents.size let `(block|``` | $setupCode ```) := first | throwErrorAt first "Expected undecorated code block" if helperExt.getState (← getEnv) |>.isSome then throwError "Already highlighting Lean" let helper ← Helper.fromModule setupCode.getString modifyEnv fun env => helperExt.setState env (some helper) try contents.mapM elabBlock finally modifyEnv fun env => helperExt.setState env none def prioritizedElab [Monad m] (prioritize : α → m Bool) (act : α → m β) (xs : Array α) : m (Array β) := do let mut out := #[] let mut later := #[] for h:i in [0:xs.size] do let x := xs[i] if ← prioritize x then out := out.push (i, (← act x)) else later := later.push (i, x) for (i, x) in later do out := out.push (i, (← act x)) out := out.qsort (fun (i, _) (j, _) => i < j) return out.map (·.2) open Lean Elab in def isLeanBlock : TSyntax `block → CoreM Bool | `(block|```$nameStx:ident $_args*|$_contents:str```) => do let name ← realizeGlobalConstNoOverloadWithInfo nameStx return name == ``TPiL.lean || name == `TPiL.signature || name == `TPiL.savedAnchor | _ => pure false /-- Elaborates Lean blocks first, maintaining the order of blocks. This makes their code available for `leanRef`. -/ @[directive_expander leanFirst] def leanFirst : DirectiveExpander | args, contents => do ArgParse.done.run args -- Elaborate Lean blocks first, so inlines in prior blocks can refer to them prioritizedElab (isLeanBlock ·) elabBlock contents @[code_block_expander setup] def setupCode : CodeBlockExpander | args, code => do ArgParse.done.run args let helper ← Helper.fromModule code.getString modifyEnv fun env => defaultHelperExt.setState env (some helper) return #[] @[directive_expander comment] def comment : DirectiveExpander | _, _ => pure #[] @[directive_expander TODO] def TODO : DirectiveExpander | _, _ => pure #[] @[role_expander TODO] def TODOinline : RoleExpander | _, _ => pure #[] @[role_expander kw] def kw : RoleExpander | args, inls => do ArgParse.done.run args let kw ← oneCodeStr inls let hl : Highlighted := .token ⟨.keyword none none none, kw.getString⟩ -- TODO kw xref return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote kw.getString)])] @[role_expander attr] def attr : RoleExpander | args, inls => do ArgParse.done.run args let kw ← oneCodeStr inls let hl : Highlighted := .token ⟨.keyword none none none, kw.getString⟩ -- TODO attr xref return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote kw.getString)])] @[role_expander tactic] def tactic : RoleExpander | args, inls => do ArgParse.done.run args let kw ← oneCodeStr inls let hl : Highlighted := .token ⟨.keyword none none none, kw.getString⟩ -- TODO kw xref return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote kw.getString)])] @[role_expander kbd] def kbd : RoleExpander | args, inls => do ArgParse.done.run args let kbd ← codeStrs inls return #[← ``(Inline.other (Inline.kbd $(quote <| kbd.map (·.getString))) #[])] @[role_expander option] def option : RoleExpander | args, inls => do ArgParse.done.run args let kw ← oneCodeStr inls return #[← ``(Inline.code $(quote kw.getString))] def currentHelper : DocElabM Helper := do if let some h := helperExt.getState (← getEnv) then pure h else if let some h := defaultHelperExt.getState (← getEnv) then pure h else let helper ← Helper.fromModule s!"-- EMPTY for {← getMainModule}\n" modifyEnv fun env => defaultHelperExt.setState env (some helper) pure helper def multiVar? (str : String) : Option (Array String × String) := do let mut out := #[] let mut str := str.trimAscii repeat let pref1 := str.takeWhile alpha let length1 := pref1.positions.count if length1 < 1 then failure str := str.drop length1 let pref2 := str.takeWhile (fun c => alpha c || c.isDigit) let length2 := pref2.positions.count str := str.drop length2 let pref := pref1.copy ++ pref2.copy let c := str.startPos.get? if pref.length > 0 && (c.isEqSome ' ' || c.isEqSome ':') then out := out.push pref str := str.dropWhile (· == ' ') else failure if str.startPos.get? |>.isEqSome ':' then str := str.drop 1 str := str.dropWhile (· == ' ') if str.isEmpty then failure return (out, str.copy) failure where alpha c := c.isAlpha || c ∈ ['α', 'β', 'γ'] def highlightInline (code : String) (type? : Option String := none) : DocElabM Highlighted := do let helper ← currentHelper try if type?.isSome then throwError "failed" let some (vars, type) := multiVar? code | throwError "failed" let mut out : Highlighted := .empty for v in vars do out := out ++ (← helper.highlight v (some type)) ++ .text " " out := out ++ .text ": " out := out ++ (← helper.highlight type none) pure out catch e1 => try let codeStr := "(\n" ++ code ++ "\n)" let hl ← helper.highlight codeStr type? pure (hl.lines.extract 1 (hl.lines.size - 1) |> Highlighted.seq) catch e2 => throwError "Failed to highlight code. Errors:{indentD e1.toMessageData}\nand:{indentD e2.toMessageData}" def highlightCommand (code : String) : DocElabM Highlighted := do let helper ← currentHelper helper.command code def highlightSignature (code : String) : DocElabM Highlighted := do let helper ← currentHelper helper.signature code def highlightName (code : String) : DocElabM Highlighted := do let helper ← currentHelper helper.name code @[role_expander lean] def leanInline : RoleExpander | args, inls => do let type? ← ArgParse.run (.named `type .string true) args let code ← oneCodeStr inls let codeStr := code.getString try let hl ← highlightInline codeStr type? saveBackref hl return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote hl.toString)])] catch | .error refStx e => logErrorAt refStx e return #[← ``(sorry)] | e => throw e @[role_expander name] def name : RoleExpander | args, inls => do let show? ← ArgParse.run (.named `show .string true) args let code ← oneCodeStr inls let codeStr := code.getString try let hl ← highlightName codeStr let hl := if let some s := show? then if let .token ⟨k, _⟩ := hl then .token ⟨k, s⟩ else hl else hl saveBackref hl match hl with | .token ⟨k, _⟩ => match k with | .const _ sig doc? _ => Hover.addCustomHover code <| s!"```\n{sig}\n```\n" ++ (doc?.map ("\n\n***\n\n" ++ ·) |>.getD "") | .var _ sig => Hover.addCustomHover code <| s!"```\n{sig}\n```\n" | _ => pure () | _ => pure () return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote hl.toString)])] catch | .error refStx e => logErrorAt refStx e return #[← ``(sorry)] | e => throw e @[role_expander leanCommand] def leanCommand : RoleExpander | args, inls => do let type? ← ArgParse.done.run args let code ← oneCodeStr inls let codeStr := code.getString try let hl ← highlightCommand codeStr saveBackref hl for (msg, _) in allInfo hl do let k := match msg.severity with | .info => "info" | .error => "error" | .warning => "warning" logSilentInfo m!"{k}: {msg.toString}" return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote hl.toString)])] catch | .error refStx e => logErrorAt refStx e return #[← ``(sorry)] | e => throw e @[code_block_expander leanCommand] def leanCommandBlock : CodeBlockExpander | args, code => do let type? ← ArgParse.done.run args let codeStr := code.getString try let hl ← highlightCommand codeStr saveBackref hl for (msg, _) in allInfo hl do let k := match msg.severity with | .info => "info" | .error => "error" | .warning => "warning" logSilentInfo m!"{k}: {msg.toString}" return #[← ``(Block.other (Block.lean false none #[ExampleItem.mk $(quote hl) none ""] none) #[])] catch | .error refStx e => logErrorAt refStx e return #[← ``(sorry)] | e => throw e @[code_block_expander signature] def signature : CodeBlockExpander | args, code => do let type? ← ArgParse.done.run args let codeStr := code.getString try let hl ← highlightSignature codeStr saveBackref hl for (msg, _) in allInfo hl do let k := match msg.severity with | .info => "info" | .error => "error" | .warning => "warning" logSilentInfo m!"{k}: {msg.toString}" return #[← ``(Block.other (Block.lean false none #[ExampleItem.mk $(quote hl) none ""] none) #[])] catch | .error refStx e => logErrorAt refStx e return #[← ``(sorry)] | e => throw e @[role_expander leanRef] def leanRef : RoleExpander | args, inls => do let in? ← ArgParse.run (.named `in .string true) args let code ← oneCodeStr inls let codeStr := code.getString for prev in recentHighlightsExt.getState (← getEnv) do if let some «in» := in? then if let some hl := prev.matchingExpr? «in» then if let some hl := hl.matchingExpr? codeStr then return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote hl.toString)])] else break else if let some hl := prev.matchingExpr? codeStr then return #[← ``(Inline.other (Inline.lean $(quote hl) {}) #[Inline.code $(quote hl.toString)])] throwError "Not found: '{codeStr}'" @[role_expander empty] def empty : RoleExpander | args, _inls => do ArgParse.done.run args return #[] private def keywords := [ "#print", "#eval", "#print axioms", "#reduce", "#check", "noncomputable", "protected", "partial", "import", "export", "local", "def", "example", "instance", "macro_rules", "axiom", "if", "then", "else", "show", "have", "calc", "universe", "section", "end", "variable", "open", "set_option", "let", "fun" ] private def tactics := [ "if", "then", "else", "show", "have", "calc", "simp", "rw", "let", "fun", "<;>" ] private def leanLits := [ "→", "->", ";", "×", ".", "_", "⟨", "⟩" ] open MessageData (hint) in /-- Internal detail of suggestion mechanism. -/ @[inline_expander Lean.Doc.Syntax.code] private def suggest : InlineExpander | `(inline| code( $str )) => do let str' := str.getString -- unless verso.examples.suggest.get (← getOptions) do -- -- Delegate to the next handler -- Elab.throwUnsupportedSyntax if str' ∈ keywords then let h ← hint m!"Add the `kw` role:" #["{kw}`" ++ str' ++ "`"] logWarning <| m!"Code element could be a keyword." ++ h else if str'.startsWith "\\" then let h ← hint m!"Add the `kbd` role:" #["{kbd}`" ++ str' ++ "`"] logWarning <| m!"Code element could be a Unicode abbreviation." ++ h else if (← getOptionDecls).any (fun x _ => x.toString == str'.trimAscii) then let h ← hint m!"Add the `option` role:" #["{option}`" ++ str' ++ "`"] logWarning <| m!"Code element could be a compiler option." ++ h else let mut suggs : Array Meta.Hint.Suggestion := #[] let mut exns := #[] try let _ ← highlightInline str' suggs := suggs.push <| "{lean}`" ++ str' ++ "`" catch e => exns := exns.push e if str' ∈ tactics then suggs := suggs.push <| "{tactic}`" ++ str' ++ "`" for prev in recentHighlightsExt.getState (← getEnv) do if let some _ := prev.matchingExpr? str' then suggs := suggs.push <| "{leanRef}`" ++ str' ++ "`" break if str' ∈ leanLits then suggs := suggs.push <| "{lit}`" ++ str' ++ "`" for (name, {goals := gs, ..}) in proofStatesExt.getState (← getEnv) do let name := if name.any (·.isWhitespace) then name.quote else name if gs.any (·.name |>.isEqSome str') then suggs := suggs.push <| "{goal " ++ name ++ "}`" ++ str' ++ "`" if suggs.isEmpty then let h ← hint m!"Add the `lit` role to indicate that it denotes literal characters:" #["{lit}`" ++ str' ++ "`"] logWarning <| m!"Code element is missing a role, and can't be Lean code:{m!"\nand\n".joinSep (exns.map (indentD ·.toMessageData) |>.toList)}" ++ h else let h ← hint m!"Add a `lean` role:" suggs logWarning <| m!"Code element could be highlighted." ++ h return (← ``(Inline.code $(quote str.getString))) | _ => Elab.throwUnsupportedSyntax
theorem_proving_in_lean4/book/TPiL/InductionAndRecursion.lean
import VersoManual import TPiL.Examples open Verso.Genre open Verso.Genre.Manual hiding tactic open TPiL #doc (Manual) "Induction and Recursion" => %%% tag := "induction-and-recursion" %%% In the previous chapter, we saw that inductive definitions provide a powerful means of introducing new types in Lean. Moreover, the constructors and the recursors provide the only means of defining functions on these types. By the {tech}[propositions-as-types] correspondence, this means that induction is the fundamental method of proof. Lean provides natural ways of defining recursive functions, performing pattern matching, and writing inductive proofs. It allows you to define a function by specifying equations that it should satisfy, and it allows you to prove a theorem by specifying how to handle various cases that can arise. Behind the scenes, these descriptions are “compiled” down to primitive recursors, using a procedure that we refer to as the “equation compiler.” The equation compiler is not part of the trusted code base; its output consists of terms that are checked independently by the kernel. # Pattern Matching %%% tag := "pattern-matching" %%% The interpretation of schematic patterns is the first step of the compilation process. We have seen that the {lit}`casesOn` recursor can be used to define functions and prove theorems by cases, according to the constructors involved in an inductively defined type. But complicated definitions may use several nested {lit}`casesOn` applications, and may be hard to read and understand. Pattern matching provides an approach that is more convenient, and familiar to users of functional programming languages. :::setup ``` open Nat variable (x : Nat) ``` Consider the inductively defined type of natural numbers. Every natural number is either {lean}`zero` or {lean}`succ x`, and so you can define a function from the natural numbers to an arbitrary type by specifying a value in each of those cases: ::: ```lean set_option linter.unusedVariables false -------- open Nat def sub1 : Nat → Nat | zero => zero | succ x => x def isZero : Nat → Bool | zero => true | succ x => false ``` The equations used to define these functions hold definitionally: ```lean open Nat def sub1 : Nat → Nat | zero => zero | succ x => x def isZero : Nat → Bool | zero => true | succ x => false ------ example : sub1 0 = 0 := rfl example (x : Nat) : sub1 (succ x) = x := rfl example : isZero 0 = true := rfl example (x : Nat) : isZero (succ x) = false := rfl example : sub1 7 = 6 := rfl example (x : Nat) : isZero (x + 3) = false := rfl ``` Instead of {leanRef}`zero` and {leanRef}`succ`, we can use more familiar notation: ```lean set_option linter.unusedVariables false -------- def sub1 : Nat → Nat | 0 => 0 | x + 1 => x def isZero : Nat → Bool | 0 => true | x + 1 => false ``` Because addition and the zero notation have been assigned the {attr}`[match_pattern]` attribute, they can be used in pattern matching. Lean simply normalizes these expressions until the constructors {leanRef}`zero` and {leanRef}`succ` are exposed. Pattern matching works with any inductive type, such as products and option types: ```lean def swap : α × β → β × α | (a, b) => (b, a) def foo : Nat × Nat → Nat | (m, n) => m + n def bar : Option Nat → Nat | some n => n + 1 | none => 0 ``` Here we use it not only to define a function, but also to carry out a proof by cases: ```lean namespace Hidden ------ def not : Bool → Bool | true => false | false => true theorem not_not : ∀ (b : Bool), not (not b) = b | true => show not (not true) = true from rfl | false => show not (not false) = false from rfl ------ end Hidden ``` Pattern matching can also be used to destruct inductively defined propositions: ```lean example (p q : Prop) : p ∧ q → q ∧ p | And.intro h₁ h₂ => And.intro h₂ h₁ example (p q : Prop) : p ∨ q → q ∨ p | Or.inl hp => Or.inr hp | Or.inr hq => Or.inl hq ``` This provides a compact way of unpacking hypotheses that make use of logical connectives. In all these examples, pattern matching was used to carry out a single case distinction. More interestingly, patterns can involve nested constructors, as in the following examples. ```lean def sub2 : Nat → Nat | 0 => 0 | 1 => 0 | x + 2 => x ``` The equation compiler first splits on cases as to whether the input is {leanRef}`zero` or of the form {leanRef}`succ x`. It then does a case split on whether {leanRef}`x` is of the form {leanRef}`zero` or {leanRef}`succ x`. It determines the necessary case splits from the patterns that are presented to it, and raises an error if the patterns fail to exhaust the cases. Once again, we can use arithmetic notation, as in the version below. In either case, the defining equations hold definitionally. ```lean def sub2 : Nat → Nat | 0 => 0 | 1 => 0 | x+2 => x ------ example : sub2 0 = 0 := rfl example : sub2 1 = 0 := rfl example : sub2 (x+2) = x := rfl example : sub2 5 = 3 := rfl ``` :::setup ``` def sub2 : Nat → Nat | 0 => 0 | 1 => 0 | x + 2 => x ``` You can write {leanCommand}`#print sub2` to see how the function was compiled to recursors. (Lean will tell you that {leanRef}`sub2` has been defined in terms of an internal auxiliary function, {lean}`sub2.match_1`, but you can print that out too.) Lean uses these auxiliary functions to compile {kw}`match` expressions. Actually, the definition above is expanded to ::: ```lean def sub2 : Nat → Nat := fun x => match x with | 0 => 0 | 1 => 0 | x + 2 => x ``` Here are some more examples of nested pattern matching: ```lean set_option linter.unusedVariables false -------- example (p q : α → Prop) : (∃ x, p x ∨ q x) → (∃ x, p x) ∨ (∃ x, q x) | Exists.intro x (Or.inl px) => Or.inl (Exists.intro x px) | Exists.intro x (Or.inr qx) => Or.inr (Exists.intro x qx) def foo : Nat × Nat → Nat | (0, n) => 0 | (m+1, 0) => 1 | (m+1, n+1) => 2 ``` The equation compiler can process multiple arguments sequentially. For example, it would be more natural to define the previous example as a function of two arguments: ```lean set_option linter.unusedVariables false -------- def foo : Nat → Nat → Nat | 0, n => 0 | m + 1, 0 => 1 | m + 1, n + 1 => 2 ``` Here is another example: ```lean set_option linter.unusedVariables false -------- def bar : List Nat → List Nat → Nat | [], [] => 0 | a :: as, [] => a | [], b :: bs => b | a :: as, b :: bs => a + b ``` Note that the patterns are separated by commas. In each of the following examples, splitting occurs on only the first argument, even though the others are included among the list of patterns. ```lean set_option linter.unusedVariables false namespace Hidden ------ def and : Bool → Bool → Bool | true, a => a | false, _ => false def or : Bool → Bool → Bool | true, _ => true | false, a => a def cond : Bool → α → α → α | true, x, y => x | false, x, y => y ------ end Hidden ``` Notice also that, when the value of an argument is not needed in the definition, you can use an underscore instead. This underscore is known as a _wildcard pattern_, or an _anonymous variable_. In contrast to usage outside the equation compiler, here the underscore does _not_ indicate an implicit argument. The use of underscores for wildcards is common in functional programming languages, and so Lean adopts that notation. The section on {ref "wildcards-and-overlapping-patterns"}[wildcards and overlapping patterns] expands on the notion of a wildcard, and the description of {ref "inaccessible-patterns"}[inaccessible patterns] explains how you can use implicit arguments in patterns as well. ::::setup ``` set_option linter.unusedVariables false -------- def tail : List α → List α | [] => [] | a :: as => as ``` :::leanFirst As described in {ref "inductive-types"}[Inductive Types], inductive data types can depend on parameters. The following example defines the {name}`tail` function using pattern matching. The argument {leanRef}`α : Type u` is a parameter and occurs before the colon to indicate it does not participate in the pattern matching. Lean also allows parameters to occur after the {leanRef}`:`, but pattern matching on them requires an explicit {leanRef}`match`. ```lean set_option linter.unusedVariables false -------- def tail1 {α : Type u} : List α → List α | [] => [] | a :: as => as def tail2 : {α : Type u} → List α → List α | α, [] => [] | α, a :: as => as ``` ::: :::: Despite the different placement of the parameter {leanRef}`α` in these two examples, in both cases it is treated in the same way, in that it does not participate in a case split. Lean can also handle more complex forms of pattern matching, in which arguments to dependent types pose additional constraints on the various cases. Such examples of _dependent pattern matching_ are considered in the section on {ref "dependent-pattern-matching"}[dependent pattern matching]. # Wildcards and Overlapping Patterns %%% tag := "wildcards-and-overlapping-patterns" %%% Consider one of the examples from the last section: ```lean set_option linter.unusedVariables false -------- def foo : Nat → Nat → Nat | 0, n => 0 | m + 1, 0 => 1 | m + 1, n + 1 => 2 ``` An alternative presentation is: ```lean set_option linter.unusedVariables false -------- def foo : Nat → Nat → Nat | 0, n => 0 | m, 0 => 1 | m, n => 2 ``` In the second presentation, the patterns overlap; for example, the pair of arguments {lit}`0, 0` matches all three cases. But Lean handles the ambiguity by using the first applicable equation, so in this example the net result is the same. In particular, the following equations hold definitionally: ```lean def foo : Nat → Nat → Nat | 0, n => 0 | m, 0 => 1 | m, n => 2 ------ example : foo 0 0 = 0 := rfl example : foo 0 (n + 1) = 0 := rfl example : foo (m + 1) 0 = 1 := rfl example : foo (m + 1) (n + 1) = 2 := rfl ``` Since the values of {leanRef (in:="m, n")}`m` and {leanRef (in:="m, n")}`n` are not needed, we can just as well use wildcard patterns instead. ```lean def foo : Nat → Nat → Nat | 0, _ => 0 | _, 0 => 1 | _, _ => 2 ``` You can check that this definition of {leanRef}`foo` satisfies the same definitional identities as before. :::setup ``` variable (α : Type u) (a : α) ``` Some functional programming languages support _incomplete patterns_. In these languages, the interpreter produces an exception or returns an arbitrary value for incomplete cases. We can simulate the arbitrary value approach using the {lean}`Inhabited` type class. Roughly, an element of {lean}`Inhabited α` is a witness to the fact that there is an element of {lean}`α`; in {ref "type-classes"}[the chapter on type classes] we will see that Lean can be instructed that suitable base types are inhabited, and can automatically infer that other constructed types are inhabited. On this basis, the standard library provides a default element, {lean}`default`, of any inhabited type. We can also use the type {lean}`Option α` to simulate incomplete patterns. The idea is to return {lean}`some a` for the provided patterns, and use {lean (type:="Option α")}`none` for the incomplete cases. The following example demonstrates both approaches. ::: ```lean def f1 : Nat → Nat → Nat | 0, _ => 1 | _, 0 => 2 | _, _ => default -- the "incomplete" case example : f1 0 0 = 1 := rfl example : f1 0 (a+1) = 1 := rfl example : f1 (a+1) 0 = 2 := rfl example : f1 (a+1) (b+1) = default := rfl def f2 : Nat → Nat → Option Nat | 0, _ => some 1 | _, 0 => some 2 | _, _ => none -- the "incomplete" case example : f2 0 0 = some 1 := rfl example : f2 0 (a+1) = some 1 := rfl example : f2 (a+1) 0 = some 2 := rfl example : f2 (a+1) (b+1) = none := rfl ``` The equation compiler is clever. If you leave out any of the cases in the following definition, the error message will let you know what has not been covered. ```lean def bar : Nat → List Nat → Bool → Nat | 0, _, false => 0 | 0, b :: _, _ => b | 0, [], true => 7 | a+1, [], false => a | a+1, [], true => a + 1 | a+1, b :: _, _ => a + b ``` It will also use an {kw}`if`{lit}` ... `{kw}`then`{lit}` ... `{kw}`else` instead of a {lit}`casesOn` in appropriate situations. ```lean set_option pp.proofs true ------- def foo : Char → Nat | 'A' => 1 | 'B' => 2 | _ => 3 #print foo.match_1 ``` # Structural Recursion and Induction %%% tag := "structural-recursion-and-induction" %%% What makes the equation compiler powerful is that it also supports recursive definitions. In the next three sections, we will describe, respectively: - structurally recursive definitions - well-founded recursive definitions - mutually recursive definitions Generally speaking, the equation compiler processes input of the following form: ``` def foo (a : α) : (b : β) → γ | [patterns₁] => t₁ ... | [patternsₙ] => tₙ ``` Here {lit}`(a : α)` is a sequence of parameters, {lit}`(b : β)` is the sequence of arguments on which pattern matching takes place, and {lit}`γ` is any type, which can depend on {lit}`a` and {lit}`b`. Each line should contain the same number of patterns, one for each element of {lit}`β`. As we have seen, a pattern is either a variable, a constructor applied to other patterns, or an expression that normalizes to something of that form (where the non-constructors are marked with the {attr}`[match_pattern]` attribute). The appearances of constructors prompt case splits, with the arguments to the constructors represented by the given variables. In the section on {ref "dependent-pattern-matching"}[dependent pattern matching], we will see that some explicit terms in patterns are forced into a particular form in order to make an expression type check, though they do not play a role in pattern matching. These are called “{deftech}[inaccessible patterns]” for that reason. But we will not need to use such inaccessible patterns before covering {ref "dependent-pattern-matching"}[dependent pattern matching]. As we saw in the last section, the terms {lit}`t₁, ..., tₙ` can make use of any of the parameters {lit}`a`, as well as any of the variables that are introduced in the corresponding patterns. What makes recursion and induction possible is that they can also involve recursive calls to {lit}`foo`. In this section, we will deal with _structural recursion_, in which the arguments to {lit}`foo` occurring on the right-hand side of the {lit}`=>` are subterms of the patterns on the left-hand side. The idea is that they are structurally smaller, and hence appear in the inductive type at an earlier stage. Here are some examples of structural recursion from the last chapter, now defined using the equation compiler: ```lean open Nat def add : Nat → Nat → Nat | m, zero => m | m, succ n => succ (add m n) theorem add_zero (m : Nat) : add m zero = m := rfl theorem add_succ (m n : Nat) : add m (succ n) = succ (add m n) := rfl theorem zero_add : ∀ n, add zero n = n | zero => rfl | succ n => congrArg succ (zero_add n) def mul : Nat → Nat → Nat | n, zero => zero | n, succ m => add (mul n m) n ``` The proof of {leanRef}`zero_add` makes it clear that proof by induction is really a form of recursion in Lean. The example above shows that the defining equations for {leanRef}`add` hold definitionally, and the same is true of {leanRef}`mul`. The equation compiler tries to ensure that this holds whenever possible, as is the case with straightforward structural induction. In other situations, however, reductions hold only _propositionally_, which is to say, they are equational theorems that must be applied explicitly. The equation compiler generates such theorems internally. They are not meant to be used directly by the user; rather, the {tactic}`simp` tactic is configured to use them when necessary. The following proof of {leanRef}`zero_add` works this way: ```lean open Nat def add : Nat → Nat → Nat | m, zero => m | m, succ n => succ (add m n) ----- theorem zero_add : ∀ n, add zero n = n | zero => by simp [add] | succ n => by simp [add, zero_add] ``` As with definition by pattern matching, parameters to a structural recursion or induction may appear before the colon. Such parameters are simply added to the local context before the definition is processed. For example, the definition of addition may also be written as follows: ```lean open Nat def add (m : Nat) : Nat → Nat | zero => m | succ n => succ (add m n) ``` You can also write the example above using {kw}`match`. ```lean open Nat def add (m n : Nat) : Nat := match n with | zero => m | succ n => succ (add m n) ``` :::leanFirst A more interesting example of structural recursion is given by the Fibonacci function {leanRef}`fib`. ```lean def fib : Nat → Nat | 0 => 1 | 1 => 1 | n+2 => fib (n+1) + fib n example : fib 0 = 1 := rfl example : fib 1 = 1 := rfl example : fib (n + 2) = fib (n + 1) + fib n := rfl example : fib 7 = 21 := rfl ``` ::: :::setup ``` variable (n : Nat) open Nat ``` Here, the value of the {leanRef}`fib` function at {leanRef}`n + 2` (which is definitionally equal to {lean}`succ (succ n)`) is defined in terms of the values at {leanRef}`n + 1` (which is definitionally equivalent to {lean}`succ n`) and the value at {leanRef}`n`. This is a notoriously inefficient way of computing the Fibonacci function, however, with an execution time that is exponential in {lean}`n`. Here is a better way: ::: ```lean def fibFast (n : Nat) : Nat := (loop n).2 where loop : Nat → Nat × Nat | 0 => (0, 1) | n+1 => let p := loop n; (p.2, p.1 + p.2) #eval fibFast 100 -- 573147844013817084101 ``` Here is the same definition using a {kw}`let rec` instead of a {kw}`where`. ```lean def fibFast (n : Nat) : Nat := let rec loop : Nat → Nat × Nat | 0 => (0, 1) | n+1 => let p := loop n; (p.2, p.1 + p.2) (loop n).2 ``` In both cases, Lean generates the auxiliary function {lit}`fibFast.loop`. :::leanFirst To handle structural recursion, the equation compiler uses _course-of-values_ recursion, using constants {lit}`below` and {lit}`brecOn` that are automatically generated with each inductively defined type. You can get a sense of how it works by looking at the types of {leanRef}`Nat.below` and {leanRef}`Nat.brecOn`: ```lean variable (C : Nat → Type u) #check (@Nat.below C : Nat → Type u) #reduce @Nat.below C (3 : Nat) #check (@Nat.brecOn C : (n : Nat) → ((n : Nat) → @Nat.below C n → C n) → C n) ``` ::: :::setup ``` variable (C : Nat → Type u) (n : Nat) ``` The type {lean}`@Nat.below C (3 : Nat)` is a data structure that stores elements of {lean}`C 0`, {lean}`C 1`, and {lean}`C 2`. The course-of-values recursion is implemented by {name}`Nat.brecOn`. It enables us to define the value of a dependent function of type {lean}`(n : Nat) → C n` at a particular input {lean}`n` in terms of all the previous values of the function, presented as an element of {lean}`@Nat.below C n`. ::: :::leanFirst The use of course-of-values recursion is one of the techniques the equation compiler uses to justify to the Lean kernel that a function terminates. It does not affect the code generator which compiles recursive functions as other functional programming language compilers. Recall that {kw}`#eval`{lit}` ` {leanRef}`fib`{lit}` <n>` is exponential in {lit}`<n>`. On the other hand, {kw}`#reduce`{lit}` `{leanRef}`fib`{lit}` <n>` is efficient because it uses the definition sent to the kernel that is based on the {lit}`brecOn` construction. ```lean def fib : Nat → Nat | 0 => 1 | 1 => 1 | n+2 => fib (n+1) + fib n -- Slow: -- #eval fib 50 -- Fast: #reduce fib 50 #print fib ``` ::: :::leanFirst Another good example of a recursive definition is the list {leanRef}`append` function. ```lean def append : List α → List α → List α | [], bs => bs | a::as, bs => a :: append as bs example : append [1, 2, 3] [4, 5] = [1, 2, 3, 4, 5] := rfl ``` ::: Here is another: it adds elements of the first list to elements of the second list, until one of the two lists runs out. ```lean def listAdd [Add α] : List α → List α → List α | [], _ => [] | _, [] => [] | a :: as, b :: bs => (a + b) :: listAdd as bs #eval listAdd [1, 2, 3] [4, 5, 6, 6, 9, 10] -- [5, 7, 9] ``` You are encouraged to experiment with similar examples in the exercises below. # Local recursive declarations %%% tag := "local-recursive-declarations" %%% You can define local recursive declarations using the {kw}`let rec` keyword. ```lean def replicate (n : Nat) (a : α) : List α := let rec loop : Nat → List α → List α | 0, as => as | n+1, as => loop n (a::as) loop n [] #check @replicate.loop -- @replicate.loop : {α : Type u_1} → α → Nat → List α → List α ``` Lean creates an auxiliary declaration for each {leanRef}`let rec`. In the example above, it created the declaration {leanRef}`replicate.loop` for the {leanRef}`let rec loop` occurring at {leanRef}`replicate`. Note that, Lean “closes” the declaration by adding any local variable occurring in the {leanRef}`let rec` declaration as additional parameters. For example, the local variable {leanRef}`a` occurs at {leanRef}`let rec loop`. You can also use {leanRef}`let rec` in tactic mode and for creating proofs by induction. ```lean def replicate (n : Nat) (a : α) : List α := let rec loop : Nat → List α → List α | 0, as => as | n+1, as => loop n (a::as) loop n [] ------ theorem length_replicate (n : Nat) (a : α) : (replicate n a).length = n := by let rec aux (n : Nat) (as : List α) : (replicate.loop a n as).length = n + as.length := by match n with | 0 => simp [replicate.loop] | n+1 => simp +arith [replicate.loop, aux n] exact aux n [] ``` You can also introduce auxiliary recursive declarations using {kw}`where` clause after your definition. Lean converts them into a {kw}`let rec`. ```lean def replicate (n : Nat) (a : α) : List α := loop n [] where loop : Nat → List α → List α | 0, as => as | n+1, as => loop n (a::as) theorem length_replicate (n : Nat) (a : α) : (replicate n a).length = n := by exact aux n [] where aux (n : Nat) (as : List α) : (replicate.loop a n as).length = n + as.length := by match n with | 0 => simp [replicate.loop] | n+1 => simp +arith [replicate.loop, aux n] ``` # Well-Founded Recursion and Induction %%% tag := "well-founded-recursion-and-induction" %%% When structural recursion cannot be used, we can prove termination using well-founded recursion. We need a well-founded relation and a proof that each recursive application is decreasing with respect to this relation. Dependent type theory is powerful enough to encode and justify well-founded recursion. Let us start with the logical background that is needed to understand how it works. :::setup ``` variable (α : Type u) (a : α) (r : α → α → Prop) ``` Lean's standard library defines two predicates, {lean}`Acc r a` and {lean}`WellFounded r`, where {lean}`r` is a binary relation on a type {lean}`α`, and {lean}`a` is an element of type {lean}`α`. ::: ```lean variable (α : Sort u) variable (r : α → α → Prop) #check (Acc r : α → Prop) #check (WellFounded r : Prop) ``` ```lean (show := false) variable {α : Sort u} (x y : α) variable {r : α → α → Prop} example : Acc r x = ∀ y, r y x → Acc r y := by simp only [eq_iff_iff] constructor . intro ⟨_, hAcc⟩ assumption . intro h constructor assumption def r' : α → α → Prop := fun x y => True infix:50 " ≺ " => r' example : y ≺ x := True.intro example := WellFounded r ``` The first, {leanRef}`Acc`, is an inductively defined predicate. According to its definition, {leanRef}`Acc r x` is equivalent to {leanRef}`∀ y, r y x → Acc r y`. If you think of {leanRef}`r y x` as denoting a kind of order relation {leanRef}`y ≺ x`, then {leanRef}`Acc r x` says that {leanRef}`x` is accessible from below, in the sense that all its predecessors are accessible. In particular, if {leanRef}`x` has no predecessors, it is accessible. Given any type {leanRef}`α`, we should be able to assign a value to each accessible element of {leanRef}`α`, recursively, by assigning values to all its predecessors first. The statement that {leanRef}`r` is well-founded, denoted {leanRef}`WellFounded r`, is exactly the statement that every element of the type is accessible. By the above considerations, if {leanRef}`r` is a well-founded relation on a type {leanRef}`α`, we should have a principle of well-founded recursion on {leanRef}`α`, with respect to the relation {leanRef}`r`. And, indeed, we do: the standard library defines {name}`WellFounded.fix`, which serves exactly that purpose. ```lean noncomputable def f {α : Sort u} (r : α → α → Prop) (h : WellFounded r) (C : α → Sort v) (F : (x : α) → ((y : α) → r y x → C y) → C x) : (x : α) → C x := WellFounded.fix h F ``` There is a long cast of characters here, but the first block we have already seen: the type, {leanRef}`α`, the relation, {leanRef}`r`, and the assumption, {leanRef}`h`, that {leanRef}`r` is well-founded. The variable {leanRef}`C` represents the motive of the recursive definition: for each element {leanRef}`x : α`, we would like to construct an element of {leanRef}`C x`. The function {leanRef}`F` provides the inductive recipe for doing that: it tells us how to construct an element {leanRef}`C x`, given elements of {leanRef}`C y` for each predecessor {leanRef}`y` of {leanRef}`x`. :::setup ``` variable {x y : α} (C : α → Sort v) (r : α → α → Prop) ``` Note that {name}`WellFounded.fix` works equally well as an induction principle. It says that if {leanRef}`≺` is well-founded and you want to prove {lean}`∀ x, C x`, it suffices to show that for an arbitrary {lean}`x`, if we have {lean}`∀ y, r y x → C y`, then we have {lean}`C x`. ::: In the example above we use the modifier {leanRef}`noncomputable` because the code generator currently does not support {name}`WellFounded.fix`. The function {name}`WellFounded.fix` is another tool Lean uses to justify that a function terminates. Lean knows that the usual order {lit}`<` on the natural numbers is well founded. It also knows a number of ways of constructing new well founded orders from others, for example, using lexicographic order. Here is essentially the definition of division on the natural numbers that is found in the standard library. ```lean ------ open Nat theorem div_lemma {x y : Nat} : 0 < y ∧ y ≤ x → x - y < x := fun h => sub_lt (Nat.lt_of_lt_of_le h.left h.right) h.left def div.F (x : Nat) (f : (x₁ : Nat) → x₁ < x → Nat → Nat) (y : Nat) : Nat := if h : 0 < y ∧ y ≤ x then f (x - y) (div_lemma h) y + 1 else zero noncomputable def div := WellFounded.fix (measure id).wf div.F #reduce div 8 2 -- 4 ``` :::TODO Missing HL for example ::: The definition is somewhat inscrutable. Here the recursion is on {leanRef (in:="def div.F (x")}`x`, and {lit}`div.F x f : Nat → Nat` returns the “divide by {leanRef}`y`” function for that fixed {leanRef (in:="def div.F (x")}`x`. You have to remember that the second argument to {leanRef}`div.F`, the recipe for the recursion, is a function that is supposed to return the divide by {leanRef}`y` function for all values {leanRef}`x₁` smaller than {leanRef}`x`. The elaborator is designed to make definitions like this more convenient. It accepts the following: ```lean def div (x y : Nat) : Nat := if h : 0 < y ∧ y ≤ x then have : x - y < x := Nat.sub_lt (Nat.lt_of_lt_of_le h.1 h.2) h.1 div (x - y) y + 1 else 0 ``` When Lean encounters a recursive definition, it first tries structural recursion, and only when that fails, does it fall back on well-founded recursion. Lean uses the tactic {tactic}`decreasing_tactic` to show that the recursive applications are smaller. The auxiliary proposition {leanRef}`x - y < x` in the example above should be viewed as a hint for this tactic. The defining equation for {leanRef}`div` does _not_ hold definitionally, but we can unfold {leanRef}`div` using the {tactic}`unfold` tactic. We use {ref "conv"}[{tactic}`conv`] to select which {leanRef}`div` application we want to unfold. ```lean def div (x y : Nat) : Nat := if h : 0 < y ∧ y ≤ x then have : x - y < x := Nat.sub_lt (Nat.lt_of_lt_of_le h.1 h.2) h.1 div (x - y) y + 1 else 0 ------ example (x y : Nat) : div x y = if 0 < y ∧ y ≤ x then div (x - y) y + 1 else 0 := by -- unfold occurrence in the left-hand-side of the equation: conv => lhs; unfold div rfl example (x y : Nat) (h : 0 < y ∧ y ≤ x) : div x y = div (x - y) y + 1 := by conv => lhs; unfold div simp [h] ``` :::leanFirst The following example is similar: it converts any natural number to a binary expression, represented as a list of 0's and 1's. We have to provide evidence that the recursive call is decreasing, which we do here with a {leanRef}`sorry`. The {leanRef}`sorry` does not prevent the interpreter from evaluating the function successfully, but {leanRef}`#eval!` must be used instead of {kw}`#eval` when a term contains {leanRef}`sorry`. ```lean def natToBin : Nat → List Nat | 0 => [0] | 1 => [1] | n + 2 => have : (n + 2) / 2 < n + 2 := sorry natToBin ((n + 2) / 2) ++ [n % 2] #eval! natToBin 1234567 ``` ::: :::leanFirst As a final example, we observe that Ackermann's function can be defined directly, because it is justified by the well-foundedness of the lexicographic order on the natural numbers. The {leanRef}`termination_by` clause instructs Lean to use a lexicographic order. This clause is actually mapping the function arguments to elements of type {lean}`Nat × Nat`. Then, Lean uses typeclass resolution to synthesize an element of type {lean}`WellFoundedRelation (Nat × Nat)`. ```lean def ack : Nat → Nat → Nat | 0, y => y+1 | x+1, 0 => ack x 1 | x+1, y+1 => ack x (ack (x+1) y) termination_by x y => (x, y) ``` ::: In many cases, Lean can automatically determine an appropriate lexicographical order. Ackermann's function is one such case, so the {leanRef}`termination_by` clause is optional: ```lean def ack : Nat → Nat → Nat | 0, y => y+1 | x+1, 0 => ack x 1 | x+1, y+1 => ack x (ack (x+1) y) ``` :::setup ``` variable {α : Type u} {β : Type v} ``` Note that a lexicographic order is used in the example above because the instance {lean}`WellFoundedRelation (α × β)` uses a lexicographic order. Lean also defines the instance ```lean instance (priority := low) [SizeOf α] : WellFoundedRelation α := sizeOfWFRel ``` ::: :::leanFirst In the following example, we prove termination by showing that {leanRef}`as.size - i` is decreasing in the recursive application. ```lean def takeWhile (p : α → Bool) (as : Array α) : Array α := go 0 #[] where go (i : Nat) (r : Array α) : Array α := if h : i < as.size then let a := as[i] if p a then go (i+1) (r.push a) else r else r termination_by as.size - i ``` ::: Note that, auxiliary function {leanRef}`go` is recursive in this example, but {leanRef}`takeWhile` is not. Once again, Lean can automatically recognize this pattern, so the {leanRef}`termination_by` clause is unnecessary: ```lean def takeWhile (p : α → Bool) (as : Array α) : Array α := go 0 #[] where go (i : Nat) (r : Array α) : Array α := if h : i < as.size then let a := as[i] if p a then go (i+1) (r.push a) else r else r ``` :::leanFirst By default, Lean uses the tactic {tactic}`decreasing_tactic` to prove recursive applications are decreasing. The modifier {leanRef}`decreasing_by` allows us to provide our own tactic. Here is an example. ```lean theorem div_lemma {x y : Nat} : 0 < y ∧ y ≤ x → x - y < x := fun ⟨ypos, ylex⟩ => Nat.sub_lt (Nat.lt_of_lt_of_le ypos ylex) ypos def div (x y : Nat) : Nat := if h : 0 < y ∧ y ≤ x then div (x - y) y + 1 else 0 decreasing_by apply div_lemma; assumption ``` ::: Note that {leanRef}`decreasing_by` is not replacement for {leanRef}`termination_by`, they complement each other. {leanRef}`termination_by` is used to specify a well-founded relation, and {leanRef}`decreasing_by` for providing our own tactic for showing recursive applications are decreasing. In the following example, we use both of them. ```lean def ack : Nat → Nat → Nat | 0, y => y+1 | x+1, 0 => ack x 1 | x+1, y+1 => ack x (ack (x+1) y) termination_by x y => (x, y) decreasing_by -- unfolds well-founded recursion auxiliary definitions: all_goals simp_wf · apply Prod.Lex.left; simp +arith · apply Prod.Lex.right; simp +arith · apply Prod.Lex.left; simp +arith ``` :::leanFirst We can use {leanRef}`decreasing_by sorry` to instruct Lean to “trust” us that the function terminates. ```lean def natToBin : Nat → List Nat | 0 => [0] | 1 => [1] | n + 2 => natToBin ((n + 2) / 2) ++ [n % 2] decreasing_by sorry #eval! natToBin 1234567 ``` ::: :::leanFirst Recall that using {leanRef}`sorry` is equivalent to using a new axiom, and should be avoided. In the following example, we used the {leanRef}`sorry` to prove {leanRef}`False`. The command {leanRef}`#print axioms unsound` shows that {leanRef}`unsound` depends on the unsound axiom {lean}`sorryAx` used to implement {leanRef}`sorry`. ```lean def unsound (x : Nat) : False := unsound (x + 1) decreasing_by sorry #check unsound 0 -- `unsound 0` is a proof of `False` #print axioms unsound -- 'unsound' depends on axioms: [sorryAx] ``` ::: :::setup ``` variable {α : Type w} {β : Type u} {γ : Type v} {G : Prop} ``` Summary: - If there is no {leanRef}`termination_by`, a well-founded relation is derived (if possible) by selecting an argument and then using typeclass resolution to synthesize a well-founded relation for this argument's type. - If {leanRef}`termination_by` is specified, it maps the arguments of the function to a type {lean}`α` and type class resolution is again used. Recall that, the default instance for {lean}`β × γ` is a lexicographic order based on the well-founded relations for {lean}`β` and {lean}`γ`. - The default well-founded relation instance for {lean}`Nat` is {lean (type := "Nat → Nat → Prop")}`(· < ·)`. - By default, the tactic {tactic}`decreasing_tactic` is used to show that recursive applications are smaller with respect to the selected well-founded relation. If {tactic}`decreasing_tactic` fails, the error message includes the remaining goal {lit}`... |- G`. Note that, the {tactic}`decreasing_tactic` uses {tactic}`assumption`. So, you can include a {kw}`have`-expression to prove goal {lean}`G`. You can also provide your own tactic using {kw}`decreasing_by`. ::: # Functional Induction %%% tag := "functional-induction" %%% Lean generates bespoke induction principles for recursive functions. These induction principles follow the recursive structure of the function's definition, rather than the structure of the datatype. Proofs about functions typically follow the recursive structure of the function itself, so these induction principles allow statements about the function to be proved more conveniently. :::leanFirst For example, using the functional induction principle for {leanRef}`ack` to prove that the result is always greater than {leanRef}`0` requires one case for each arm of the pattern match in {leanRef}`ack`: ```lean def ack : Nat → Nat → Nat | 0, y => y+1 | x+1, 0 => ack x 1 | x+1, y+1 => ack x (ack (x+1) y) theorem ack_gt_zero : ack n m > 0 := by fun_induction ack with | case1 y => -- ^ PROOF_STATE: case1 simp | case2 x ih => -- ^ PROOF_STATE: case2 exact ih | case3 x y ih1 ih2 => -- ^ PROOF_STATE: case3 simp [ack, *] ``` ::: In {goal case1}`case1`, the goal is: ```proofState case1 case case1 y : Nat ⊢ y + 1 > 0 ``` The {leanRef}`y + 1` in the goal corresponds to the value returned in the first case of {leanRef}`ack`. In {goal case2}`case2`, the goal is: ```proofState case2 case case2 x : Nat ih : ack x 1 > 0 ⊢ ack x 1 > 0 ``` The {leanRef}`ack x 1` in the goal corresponds to the value of {leanRef}`ack` applied to the pattern variables {leanRef}`x + 1` and {leanRef}`0` returned in the second case of {leanRef}`ack`. This term is automatically simplified to the right-hand side. Happily, the inductive hypothesis {leanRef}`ih : ack x 1 > 0` corresponds to the recursive call, which is exactly the answer returned in this case. In {goal case3}`case3`, the goal is: ```proofState case3 case case3 x : Nat y : Nat ih1 : ack (x + 1) y > 0 ih2 : ack x (ack (x + 1) y) > 0 ⊢ ack x (ack (x + 1) y) > 0 ``` The {leanRef}`ack x (ack (x + 1) y)` in the goal corresponds to the value returned in the third case of {leanRef}`ack`, when {leanRef}`ack` applied to {leanRef}`x + 1` and {leanRef}`y + 1` has been reduced. The inductive hypotheses {leanRef}`ih1 : ack (x + 1) y > 0` and {leanRef}`ih2 : ack x (ack (x + 1) y) > 0` correspond to the recursive calls, with {leanRef}`ih1` matching the nested recursive call. Once again, the induction hypothesis is suitable. Using {leanRef}`fun_induction ack` results in goals and induction hypotheses that match the recursive structure of {leanRef}`ack`. As a result, the proof can be a single line: ```lean def ack : Nat → Nat → Nat | 0, y => y+1 | x+1, 0 => ack x 1 | x+1, y+1 => ack x (ack (x+1) y) ------------- theorem ack_gt_zero : ack n m > 0 := by fun_induction ack <;> simp [*, ack] ``` :::leanFirst There is also a {leanRef}`fun_cases` tactic which is analogous to the {tactic}`cases` tactic. It generates a case for each branch in a function's control flow. Both it and {leanRef}`fun_induction` additionally provide assumptions that rule out the paths that were not taken. This function {leanRef}`f` represents a five-way Boolean disjunction: ```lean def f : Bool → Bool → Bool → Bool → Bool → Bool | true, _, _, _ , _ => true | _, true, _, _ , _ => true | _, _, true, _ , _ => true | _, _, _, true, _ => true | _, _, _, _, x => x ``` To prove that it is disjunction, the last case requires knowledge that none of the arguments are {leanRef}`true`. This knowledge is provided by the tactic: ```lean def f : Bool → Bool → Bool → Bool → Bool → Bool | true, _, _, _ , _ => true | _, true, _, _ , _ => true | _, _, true, _ , _ => true | _, _, _, true, _ => true | _, _, _, _, x => x ------ theorem f_or : f b1 b2 b3 b4 b5 = (b1 || b2 || b3 || b4 || b5) := by fun_cases f -- ^ PROOF_STATE: fOrAll all_goals sorry ``` ::: Each case includes an assumption that rules out the prior cases: ```proofState fOrAll case case1 b2 : Bool b3 : Bool b4 : Bool b5 : Bool ⊢ true = (true || b2 || b3 || b4 || b5) case case2 b1 : Bool b3 : Bool b4 : Bool b5 : Bool x✝ : b1 = true → False ⊢ true = (b1 || true || b3 || b4 || b5) case case3 b1 : Bool b2 : Bool b4 : Bool b5 : Bool x✝¹ : b1 = true → False x✝ : b2 = true → False ⊢ true = (b1 || b2 || true || b4 || b5) case case4 b1 : Bool b2 : Bool b3 : Bool b5 : Bool x✝² : b1 = true → False x✝¹ : b2 = true → False x✝ : b3 = true → False ⊢ true = (b1 || b2 || b3 || true || b5) case case5 b1 : Bool b2 : Bool b3 : Bool b4 : Bool b5 : Bool x✝³ : b1 = true → False x✝² : b2 = true → False x✝¹ : b3 = true → False x✝ : b4 = true → False ⊢ b5 = (b1 || b2 || b3 || b4 || b5) ``` :::leanFirst The {leanRef}`simp_all` tactic, which simplifies all the assumptions and the goal together, can dispatch all cases: ```lean def f : Bool → Bool → Bool → Bool → Bool → Bool | true, _, _, _ , _ => true | _, true, _, _ , _ => true | _, _, true, _ , _ => true | _, _, _, true, _ => true | _, _, _, _, x => x ------ theorem f_or : f b1 b2 b3 b4 b5 = (b1 || b2 || b3 || b4 || b5) := by fun_cases f <;> simp_all ``` ::: # Mutual Recursion %%% tag := "mutual-recursion" %%% Lean also supports mutual recursive definitions. The syntax is similar to that for mutual inductive types. Here is an example: ```lean mutual def even : Nat → Bool | 0 => true | n+1 => odd n def odd : Nat → Bool | 0 => false | n+1 => even n end example : even (a + 1) = odd a := by simp [even] example : odd (a + 1) = even a := by simp [odd] theorem even_eq_not_odd : ∀ a, even a = not (odd a) := by intro a; induction a . simp [even, odd] . simp [even, odd, *] ``` What makes this a mutual definition is that {leanRef}`even` is defined recursively in terms of {leanRef}`odd`, while {leanRef}`odd` is defined recursively in terms of {leanRef}`even`. Under the hood, this is compiled as a single recursive definition. The internally defined function takes, as argument, an element of a sum type, either an input to {leanRef}`even`, or an input to {leanRef}`odd`. It then returns an output appropriate to the input. To define that function, Lean uses a suitable well-founded measure. The internals are meant to be hidden from users; the canonical way to make use of such definitions is to use {leanRef}`simp` (or {tactic}`unfold`), as we did above. :::leanFirst Mutual recursive definitions also provide natural ways of working with mutual and nested inductive types. Recall the definition of {leanRef}`Even` and {leanRef}`Odd` as mutual inductive predicates as presented before. ```lean mutual inductive Even : Nat → Prop where | even_zero : Even 0 | even_succ : ∀ n, Odd n → Even (n + 1) inductive Odd : Nat → Prop where | odd_succ : ∀ n, Even n → Odd (n + 1) end ``` ::: :::leanFirst The constructors, {leanRef}`even_zero`, {leanRef}`even_succ`, and {leanRef}`odd_succ` provide positive means for showing that a number is even or odd. We need to use the fact that the inductive type is generated by these constructors to know that zero is not odd, and that the latter two implications reverse. As usual, the constructors are kept in a namespace that is named after the type being defined, and the command {leanRef}`open Even Odd` allows us to access them more conveniently. ```lean mutual inductive Even : Nat → Prop where | even_zero : Even 0 | even_succ : ∀ n, Odd n → Even (n + 1) inductive Odd : Nat → Prop where | odd_succ : ∀ n, Even n → Odd (n + 1) end ------ open Even Odd theorem not_odd_zero : ¬ Odd 0 := fun h => nomatch h theorem even_of_odd_succ : ∀ n, Odd (n + 1) → Even n | _, odd_succ n h => h theorem odd_of_even_succ : ∀ n, Even (n + 1) → Odd n | _, even_succ n h => h ``` ::: For another example, suppose we use a nested inductive type to define a set of terms inductively, so that a term is either a constant (with a name given by a string), or the result of applying a constant to a list of constants. ```lean inductive Term where | const : String → Term | app : String → List Term → Term ``` We can then use a mutual recursive definition to count the number of constants occurring in a term, as well as the number occurring in a list of terms. ```lean inductive Term where | const : String → Term | app : String → List Term → Term ------ namespace Term mutual def numConsts : Term → Nat | const _ => 1 | app _ cs => numConstsLst cs def numConstsLst : List Term → Nat | [] => 0 | c :: cs => numConsts c + numConstsLst cs end def sample := app "f" [app "g" [const "x"], const "y"] #eval numConsts sample end Term ``` :::leanFirst As a final example, we define a function {leanRef}`replaceConst a b e` that replaces a constant {leanRef (in := "replaceConst a b e")}`a` with {leanRef (in := "replaceConst a b e")}`b` in a term {leanRef (in := "replaceConst a b e")}`e`, and then prove the number of constants is the same. Note that, our proof uses mutual recursion (aka induction). ```lean inductive Term where | const : String → Term | app : String → List Term → Term namespace Term mutual def numConsts : Term → Nat | const _ => 1 | app _ cs => numConstsLst cs def numConstsLst : List Term → Nat | [] => 0 | c :: cs => numConsts c + numConstsLst cs end ------ mutual def replaceConst (a b : String) : Term → Term | const c => if a == c then const b else const c | app f cs => app f (replaceConstLst a b cs) def replaceConstLst (a b : String) : List Term → List Term | [] => [] | c :: cs => replaceConst a b c :: replaceConstLst a b cs end mutual theorem numConsts_replaceConst (a b : String) (e : Term) : numConsts (replaceConst a b e) = numConsts e := by match e with | const c => simp [replaceConst]; split <;> simp [numConsts] | app f cs => simp [replaceConst, numConsts, numConsts_replaceConstLst a b cs] theorem numConsts_replaceConstLst (a b : String) (es : List Term) : numConstsLst (replaceConstLst a b es) = numConstsLst es := by match es with | [] => simp [replaceConstLst, numConstsLst] | c :: cs => simp [replaceConstLst, numConstsLst, numConsts_replaceConst a b c, numConsts_replaceConstLst a b cs] end ``` ::: # Dependent Pattern Matching %%% tag := "dependent-pattern-matching" %%% ::::setup ``` inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) def map (f : α → β) : Vect α n → Vect β n | .nil => .nil | .cons x xs => .cons (f x) (map f xs) def zip : Vect α n → Vect β n → Vect (α × β) n | .nil, .nil => .nil | .cons x xs, .cons y ys => .cons (x, y) (zip xs ys) def unzip : Vect (α × β) n → (Vect α n × Vect β n) | .nil => (.nil, .nil) | .cons (x, y) xys => let (xs, ys) := unzip xys (.cons x xs, .cons y ys) def tail : Vect α (n + 1) → Vect α n | .cons x xs => xs variable {v : Vect α (n + 1)} open Vect ``` :::leanFirst All the examples of pattern matching we considered in the section on {ref "pattern-matching"}[pattern matching] can easily be written using {lit}`casesOn` and {lit}`recOn`. However, this is often not the case with indexed inductive families such as {leanRef}`Vect α n`, since case splits impose constraints on the values of the indices. Without the equation compiler, we would need a lot of boilerplate code to define very simple functions such as {lean}`map`, {lean}`zip`, and {lean}`unzip` using recursors. To understand the difficulty, consider what it would take to define a function {lean}`tail` which takes a vector {lean}`v : Vect α (n + 1)` and deletes the first element. ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n + 1) ``` ::: A first thought might be to use the {name}`Vect.casesOn` function: ```signature Vect.casesOn.{u, v} {α : Type v} {motive : (a : Nat) → Vect α a → Sort u} {a : Nat} (t : Vect α a) (nil : motive 0 nil) (cons : (a : α) → {n : Nat} → (a_1 : Vect α n) → motive (n + 1) (cons a a_1)) : motive a t ``` But what value should we return in the {name}`nil` case? Something funny is going on: if {lean}`v` has type {lean}`Vect α (n + 1)`, it _can't_ be {name}`nil`, but it is not clear how to tell that to {name}`Vect.casesOn`. :::: One solution is to define an auxiliary function: ```lean set_option linter.unusedVariables false inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def tailAux (v : Vect α m) : m = n + 1 → Vect α n := Vect.casesOn (motive := fun x _ => x = n + 1 → Vect α n) v (fun h : 0 = n + 1 => Nat.noConfusion h) (fun (a : α) (m : Nat) (as : Vect α m) => fun (h : m + 1 = n + 1) => Nat.noConfusion h (fun h1 : m = n => h1 ▸ as)) def tail (v : Vect α (n+1)) : Vect α n := tailAux v rfl ----- end Vect ``` In the {leanRef}`nil` case, {leanRef (in := "m = n + 1")}`m` is instantiated to {leanRef}`0`, and {leanRef}`Nat.noConfusion` makes use of the fact that {leanRef}`0 = n + 1` cannot occur. Otherwise, {leanRef}`v` is of the form {lit}`cons `{leanRef}`a`{lit}` `{leanRef}`as`, and we can simply return {leanRef}`as`, after casting it from a vector of length {leanRef (in := "m + 1 = n + 1")}`m` to a vector of length {leanRef (in := "m + 1= n + 1")}`n`. The difficulty in defining {leanRef}`tail` is to maintain the relationships between the indices. The hypothesis {leanRef}`m = n + 1` in {leanRef}`tailAux` is used to communicate the relationship between {leanRef (in:="m = n + 1")}`n` and the index associated with the minor premise. Moreover, the {leanRef}`0 = n + 1` case is unreachable, and the canonical way to discard such a case is to use {leanRef}`Nat.noConfusion`. :::leanFirst The {leanRef}`tail` function is, however, easy to define using recursive equations, and the equation compiler generates all the boilerplate code automatically for us. Here are a number of similar examples: ```lean set_option linter.unusedVariables false inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def head : {n : Nat} → Vect α (n+1) → α | n, cons a as => a def tail : {n : Nat} → Vect α (n+1) → Vect α n | n, cons a as => as theorem eta : ∀ {n : Nat} (v : Vect α (n+1)), cons (head v) (tail v) = v | n, cons a as => rfl def map (f : α → β → γ) : {n : Nat} → Vect α n → Vect β n → Vect γ n | 0, nil, nil => nil | n+1, cons a as, cons b bs => cons (f a b) (map f as bs) def zip : {n : Nat} → Vect α n → Vect β n → Vect (α × β) n | 0, nil, nil => nil | n+1, cons a as, cons b bs => cons (a, b) (zip as bs) ------ end Vect ``` ::: Note that we can omit recursive equations for “unreachable” cases such as {leanRef}`head`{lit}` `{leanRef}`nil`. The automatically generated definitions for indexed families are far from straightforward. For example: ```lean set_option linter.unusedVariables false inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------- def zipWith (f : α → β → γ) : {n : Nat} → Vect α n → Vect β n → Vect γ n | 0, nil, nil => nil | n+1, cons a as, cons b bs => cons (f a b) (zipWith f as bs) #print zipWith #print zipWith.match_1 ------ end Vect ``` :::setup ``` inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) ``` The {leanRef}`zipWith` function is even more tedious to define by hand than the {leanRef}`tail` function. We encourage you to try it, using {name}`Vect.recOn`, {name}`Vect.casesOn` and {name}`Vect.noConfusion`. ::: # Inaccessible Patterns %%% tag := "inaccessible-patterns" %%% Sometimes an argument in a dependent matching pattern is not essential to the definition, but nonetheless has to be included to specialize the type of the expression appropriately. Lean allows users to mark such subterms as _inaccessible_ for pattern matching. These annotations are essential, for example, when a term occurring in the left-hand side is neither a variable nor a constructor application, because these are not suitable targets for pattern matching. We can view such inaccessible patterns as “don't care” components of the patterns. You can declare a subterm inaccessible by writing {lit}`.(t)`. If the inaccessible pattern can be inferred, you can also write {lit}`_`. :::leanFirst The following example, we declare an inductive type that defines the property of “being in the image of {leanRef (in := "(f :")}`f`”. You can view an element of the type {leanRef}`ImageOf f b` as evidence that {leanRef (in := "ImageOf f b")}`b` is in the image of {leanRef (in := "ImageOf f b")}`f`, whereby the constructor {leanRef}`imf` is used to build such evidence. We can then define any function {leanRef (in := "inverse {f")}`f` with an “inverse” which takes anything in the image of {leanRef (in := "inverse {f")}`f` to an element that is mapped to it. The typing rules forces us to write {leanRef (in := ".(f a)")}`f a` for the first argument, but this term is neither a variable nor a constructor application, and plays no role in the pattern-matching definition. To define the function {leanRef}`inverse` below, we _have to_ mark {leanRef (in := ".(f a)")}`f a` inaccessible. ```lean inductive ImageOf {α β : Type u} (f : α → β) : β → Type u where | imf : (a : α) → ImageOf f (f a) open ImageOf def inverse {f : α → β} : (b : β) → ImageOf f b → α | .(f a), imf a => a def inverse' {f : α → β} : (b : β) → ImageOf f b → α | _, imf a => a ``` ::: In the example above, the inaccessible annotation makes it clear that {leanRef (in := ".(f a)")}`f` is _not_ a pattern matching variable. :::leanFirst Inaccessible patterns can be used to clarify and control definitions that make use of dependent pattern matching. Consider the following definition of the function {leanRef}`Vect.add`, which adds two vectors of elements of a type, assuming that type has an associated addition function: ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) def Vect.add [Add α] : {n : Nat} → Vect α n → Vect α n → Vect α n | 0, nil, nil => nil | n+1, cons a as, cons b bs => cons (a + b) (add as bs) ``` ::: The argument {leanRef}`{n : Nat}` appear after the colon, because it cannot be held fixed throughout the definition. When implementing this definition, the equation compiler starts with a case distinction as to whether the first argument is {leanRef}`0` or of the form {leanRef}`n+1`. This is followed by nested case splits on the next two arguments, and in each case the equation compiler rules out the cases are not compatible with the first pattern. But, in fact, a case split is not required on the first argument; the {lit}`casesOn` eliminator for {leanRef}`Vect` automatically abstracts this argument and replaces it by {leanRef}`0` and {leanRef}`n + 1` when we do a case split on the second argument. Using inaccessible patterns, we can prompt the equation compiler to avoid the case split on {leanRef}`n`. ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def add [Add α] : {n : Nat} → Vect α n → Vect α n → Vect α n | .(_), nil, nil => nil | .(_), cons a as, cons b bs => cons (a + b) (add as bs) ------- end Vect ``` Marking the position as an inaccessible pattern tells the equation compiler first, that the form of the argument should be inferred from the constraints posed by the other arguments, and, second, that the first argument should _not_ participate in pattern matching. The inaccessible pattern {leanRef}`.(_)` can be written as {lit}`_` for convenience. ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def add [Add α] : {n : Nat} → Vect α n → Vect α n → Vect α n | _, nil, nil => nil | _, cons a as, cons b bs => cons (a + b) (add as bs) ------- end Vect ``` As we mentioned above, the argument {leanRef}`{n : Nat}` is part of the pattern matching, because it cannot be held fixed throughout the definition. Rather than requiring that these discriminants be provided explicitly, Lean implicitly includes these extra discriminants automatically for us. ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def add [Add α] {n : Nat} : Vect α n → Vect α n → Vect α n | nil, nil => nil | cons a as, cons b bs => cons (a + b) (add as bs) ------- end Vect ``` When combined with the _auto bound implicits_ feature, you can simplify the declare further and write: ```lean inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def add [Add α] : Vect α n → Vect α n → Vect α n | nil, nil => nil | cons a as, cons b bs => cons (a + b) (add as bs) ------- end Vect ``` Using these new features, you can write the other vector functions defined in the previous sections more compactly as follows: ```lean set_option linter.unusedVariables false inductive Vect (α : Type u) : Nat → Type u | nil : Vect α 0 | cons : α → {n : Nat} → Vect α n → Vect α (n+1) namespace Vect ------ def head : Vect α (n+1) → α | cons a as => a def tail : Vect α (n+1) → Vect α n | cons a as => as theorem eta : (v : Vect α (n+1)) → cons (head v) (tail v) = v | cons a as => rfl def map (f : α → β → γ) : Vect α n → Vect β n → Vect γ n | nil, nil => nil | cons a as, cons b bs => cons (f a b) (map f as bs) def zip : Vect α n → Vect β n → Vect (α × β) n | nil, nil => nil | cons a as, cons b bs => cons (a, b) (zip as bs) ------- end Vect ``` # Match Expressions %%% tag := "match-expressions" %%% Lean also provides a compiler for {kw}`match`-{kw}`with` expressions found in many functional languages: ```lean set_option linter.unusedVariables false ------ def isNotZero (m : Nat) : Bool := match m with | 0 => false | n + 1 => true ``` This does not look very different from an ordinary pattern matching definition, but the point is that a {kw}`match` can be used anywhere in an expression, and with arbitrary arguments. ```lean set_option linter.unusedVariables false ------- def isNotZero (m : Nat) : Bool := match m with | 0 => false | n + 1 => true def filter (p : α → Bool) : List α → List α | [] => [] | a :: as => match p a with | true => a :: filter p as | false => filter p as example : filter isNotZero [1, 0, 0, 3, 0] = [1, 3] := rfl ``` Here is another example: ```lean def foo (n : Nat) (b c : Bool) := 5 + match n - 5, b && c with | 0, true => 0 | m + 1, true => m + 7 | 0, false => 5 | m + 1, false => m + 3 #eval foo 7 true false example : foo 7 true false = 9 := rfl ``` Lean uses the {kw}`match` construct internally to implement pattern-matching in all parts of the system. Thus, all four of these definitions have the same net effect: ```lean def bar₁ : Nat × Nat → Nat | (m, n) => m + n def bar₂ (p : Nat × Nat) : Nat := match p with | (m, n) => m + n def bar₃ : Nat × Nat → Nat := fun (m, n) => m + n def bar₄ (p : Nat × Nat) : Nat := let (m, n) := p; m + n ``` These variations are equally useful for destructing propositions: ```lean variable (p q : Nat → Prop) example : (∃ x, p x) → (∃ y, q y) → ∃ x y, p x ∧ q y | ⟨x, px⟩, ⟨y, qy⟩ => ⟨x, y, px, qy⟩ example (h₀ : ∃ x, p x) (h₁ : ∃ y, q y) : ∃ x y, p x ∧ q y := match h₀, h₁ with | ⟨x, px⟩, ⟨y, qy⟩ => ⟨x, y, px, qy⟩ example : (∃ x, p x) → (∃ y, q y) → ∃ x y, p x ∧ q y := fun ⟨x, px⟩ ⟨y, qy⟩ => ⟨x, y, px, qy⟩ example (h₀ : ∃ x, p x) (h₁ : ∃ y, q y) : ∃ x y, p x ∧ q y := let ⟨x, px⟩ := h₀ let ⟨y, qy⟩ := h₁ ⟨x, y, px, qy⟩ ``` # Exercises %%% tag := none %%% ```setup open List variable {xs : List α} {n : Nat} ``` 1. Open a namespace {lit}`Hidden` to avoid naming conflicts, and use the equation compiler to define addition, multiplication, and exponentiation on the natural numbers. Then use the equation compiler to derive some of their basic properties. 2. Similarly, use the equation compiler to define some basic operations on lists (like the {lean}`reverse` function) and prove theorems about lists by induction (such as the fact that {lean}`reverse (reverse xs) = xs` for any list {lean}`xs`). 3. Define your own function to carry out course-of-value recursion on the natural numbers. Similarly, see if you can figure out how to define {name}`WellFounded.fix` on your own. 4. Following the examples in the section on {ref "dependent-pattern-matching"}[dependent pattern matching], define a function that will append two vectors. This is tricky; you will have to define an auxiliary function. 5. :::leanFirst Consider the following type of arithmetic expressions. The idea is that {leanRef}`var`{lit}` `{lean}`n` is a variable, {lit}`vₙ`, and {leanRef}`const`{lit}` `{lean}`n` is the constant whose value is {lean}`n`. ```lean inductive Expr where | const : Nat → Expr | var : Nat → Expr | plus : Expr → Expr → Expr | times : Expr → Expr → Expr deriving Repr open Expr def sampleExpr : Expr := plus (times (var 0) (const 7)) (times (const 2) (var 1)) ``` ::: Here {leanRef}`sampleExpr` represents {lit}`(v₀ * 7) + (2 * v₁)`. :::leanFirst Write a function that evaluates such an expression, evaluating each {leanRef}`var n` to {leanRef}`v n`. ```lean inductive Expr where | const : Nat → Expr | var : Nat → Expr | plus : Expr → Expr → Expr | times : Expr → Expr → Expr deriving Repr open Expr def sampleExpr : Expr := plus (times (var 0) (const 7)) (times (const 2) (var 1)) ------ def eval (v : Nat → Nat) : Expr → Nat | const n => sorry | var n => v n | plus e₁ e₂ => sorry | times e₁ e₂ => sorry def sampleVal : Nat → Nat | 0 => 5 | 1 => 6 | _ => 0 -- Try it out. You should get 47 here. -- #eval eval sampleVal sampleExpr ``` ::: :::leanFirst Implement “constant fusion,” a procedure that simplifies subterms like {lean}`5 + 7` to {lean}`12`. Using the auxiliary function {leanRef}`simpConst`, define a function “fuse”: to simplify a plus or a times, first simplify the arguments recursively, and then apply {leanRef}`simpConst` to try to simplify the result. ```lean inductive Expr where | const : Nat → Expr | var : Nat → Expr | plus : Expr → Expr → Expr | times : Expr → Expr → Expr deriving Repr open Expr def eval (v : Nat → Nat) : Expr → Nat | const n => sorry | var n => v n | plus e₁ e₂ => sorry | times e₁ e₂ => sorry ------ def simpConst : Expr → Expr | plus (const n₁) (const n₂) => const (n₁ + n₂) | times (const n₁) (const n₂) => const (n₁ * n₂) | e => e def fuse : Expr → Expr := sorry theorem simpConst_eq (v : Nat → Nat) : ∀ e : Expr, eval v (simpConst e) = eval v e := sorry theorem fuse_eq (v : Nat → Nat) : ∀ e : Expr, eval v (fuse e) = eval v e := sorry ``` ::: The last two theorems show that the definitions preserve the value.
theorem_proving_in_lean4/book/TPiL/InteractingWithLean.lean
import VersoManual import TPiL.Examples open Verso.Genre Manual open TPiL #doc (Manual) "Interacting with Lean" => You are now familiar with the fundamentals of dependent type theory, both as a language for defining mathematical objects and a language for constructing proofs. The one thing you are missing is a mechanism for defining new data types. We will fill this gap in the next chapter, which introduces the notion of an _inductive data type_. But first, in this chapter, we take a break from the mechanics of type theory to explore some pragmatic aspects of interacting with Lean. Not all of the information found here will be useful to you right away. We recommend skimming this section to get a sense of Lean's features, and then returning to it as necessary. # Messages %%% tag := "messages" %%% Lean produces three kinds of messages: : Errors Errors are produced when an inconsistency in the code means that it can't be processed. Examples include syntax errors (e.g. a missing {lit}`)`) and type errors such as attempting to add a natural number to a function. : Warnings Warnings describe potential problems with the code, such as the presence of {lean}`sorry`. Unlike with errors, the code is not meaningless; however, warnings deserve careful attention. : Information Information doesn't indicate any problem with the code, and includes output from commands such as {kw}`#check` and {kw}`#eval`. Lean can check that a command produces the expected messages. If the messages match, then any errors are disregarded; this can be used to ensure that the right errors occur. If they don't, an error is produced. You can use the {kw}`#guard_msgs` command to indicate which messages are expected. Here is an example: ```lean /-- error: Type mismatch "Not a number" has type String but is expected to have type Nat -/ #guard_msgs in def x : Nat := "Not a number" ``` :::leanFirst Including a message category in parentheses after {leanRef}`#guard_msgs` causes it to check only the specified category, letting others through. In this example, {leanRef}`#eval` issues an error due to the presence of {lean}`sorry`, but the warning that is always issued for {lean}`sorry` is displayed as usual: ```lean /-- error: aborting evaluation since the expression depends on the 'sorry' axiom, which can lead to runtime instability and crashes. To attempt to evaluate anyway despite the risks, use the '#eval!' command. -/ #guard_msgs(error) in #eval (sorry : Nat) ``` ::: Without the configuration, both messages are captured: ```lean /-- error: aborting evaluation since the expression depends on the 'sorry' axiom, which can lead to runtime instability and crashes. To attempt to evaluate anyway despite the risks, use the '#eval!' command. --- warning: declaration uses 'sorry' -/ #guard_msgs in #eval (sorry : Nat) ``` Some examples in this book use {leanRef}`#guard_msgs` to indicate expected errors. # Importing Files %%% tag := "importing-files" %%% The goal of Lean's front end is to interpret user input, construct formal expressions, and check that they are well-formed and type-correct. Lean also supports the use of various editors, which provide continuous checking and feedback. More information can be found on the Lean [documentation pages](https://lean-lang.org/documentation/). The definitions and theorems in Lean's standard library are spread across multiple files. Users may also wish to make use of additional libraries, or develop their own projects across multiple files. When Lean starts, it automatically imports the contents of the library {lit}`Init` folder, which includes a number of fundamental definitions and constructions. As a result, most of the examples we present here work “out of the box.” If you want to use additional files, however, they need to be imported manually, via an {kw}`import` statement at the beginning of a file. The command > {kw}`import`{lit}` Bar.Baz.Blah` imports the file {lit}`Bar/Baz/Blah.olean`, where the descriptions are interpreted relative to the Lean _search path_. Information as to how the search path is determined can be found on the [documentation pages](https://lean-lang.org/documentation/). By default, it includes the standard library directory, and (in some contexts) the root of the user's local project. Importing is transitive. In other words, if you import {lit}`Foo` and {lit}`Foo` imports {lit}`Bar`, then you also have access to the contents of {lit}`Bar`, and do not need to import it explicitly. # More on Sections %%% tag := "more-on-sections" %%% Lean provides various sectioning mechanisms to help structure a theory. You saw in {ref "variables-and-sections"}[Variables and Sections] that the {kw}`section` command makes it possible not only to group together elements of a theory that go together, but also to declare variables that are inserted as arguments to theorems and definitions, as necessary. Remember that the point of the {kw}`variable` command is to declare variables for use in theorems, as in the following example: ```lean section variable (x y : Nat) def double := x + x #check double y #check double (2 * x) attribute [local simp] Nat.add_assoc Nat.add_comm Nat.add_left_comm theorem t1 : double (x + y) = double x + double y := by simp [double] #check t1 y #check t1 (2 * x) theorem t2 : double (x * y) = double x * y := by simp [double, Nat.add_mul] end ``` The definition of {leanRef}`double` does not have to declare {leanRef}`x` as an argument; Lean detects the dependence and inserts it automatically. Similarly, Lean detects the occurrence of {leanRef}`x` in {leanRef}`t1` and {leanRef}`t2`, and inserts it automatically there, too. Note that {leanRef}`double` does _not_ have {leanRef}`y` as argument. Variables are only included in declarations where they are actually used. # More on Namespaces %%% tag := "more-on-namespaces" %%% In Lean, identifiers are given by hierarchical _names_ like {lit}`Foo.Bar.baz`. We saw in {ref "namespaces"}[Namespaces] that Lean provides mechanisms for working with hierarchical names. The command {kw}`namespace`{lit}` Foo` causes {lit}`Foo` to be prepended to the name of each definition and theorem until {kw}`end`{lit}` Foo` is encountered. The command {kw}`open`{lit}` Foo` then creates temporary _aliases_ to definitions and theorems that begin with prefix {lit}`Foo`. ```lean namespace Foo def bar : Nat := 1 end Foo open Foo #check bar #check Foo.bar ``` The following definition ```lean def Foo.bar : Nat := 1 ``` is treated as a macro, and expands to ```lean namespace Foo def bar : Nat := 1 end Foo ``` Although the names of theorems and definitions have to be unique, the aliases that identify them do not. When we open a namespace, an identifier may be ambiguous. Lean tries to use type information to disambiguate the meaning in context, but you can always disambiguate by giving the full name. To that end, the string {lit}`_root_` is an explicit description of the empty prefix. ```lean def String.add (a b : String) : String := a ++ b def Bool.add (a b : Bool) : Bool := a != b def add (α β : Type) : Type := Sum α β open Bool open String -- This reference is ambiguous: -- #check add #check String.add -- String.add (a b : String) : String #check Bool.add -- Bool.add (a b : Bool) : Bool #check _root_.add -- _root_.add (α β : Type) : Type #check add "hello" "world" -- "hello".add "world" : String #check add true false -- true.add false : Bool #check add Nat Nat -- _root_.add Nat Nat : Type ``` We can prevent the shorter alias from being created by using the {kw}`protected` keyword: ```lean protected def Foo.bar : Nat := 1 open Foo /-- error: Unknown identifier `bar` -/ #guard_msgs in #check bar -- error #check Foo.bar ``` This is often used for names like {name}`Nat.rec` and {name}`Nat.recOn`, to prevent overloading of common names. The {leanRef}`open` command admits variations. The command ```lean open Nat (succ zero gcd) #check zero -- Nat.zero : Nat #eval gcd 15 6 -- 3 ``` creates aliases for only the identifiers listed. The command ```lean open Nat hiding succ gcd #check zero -- Nat.zero : Nat /-- error: Unknown identifier `gcd` -/ #guard_msgs in #eval gcd 15 6 -- error #eval Nat.gcd 15 6 -- 3 ``` creates aliases for everything in the {lit}`Nat` namespace _except_ the identifiers listed. ```lean open Nat renaming mul → times, add → plus #eval plus (times 2 2) 3 -- 7 ``` creates aliases renaming {lean}`Nat.mul` to {leanRef}`times` and {lean}`Nat.add` to {leanRef}`plus`. It is sometimes useful to {kw}`export` aliases from one namespace to another, or to the top level. The command ```lean export Nat (succ add sub) ``` creates aliases for {leanRef}`succ`, {leanRef}`add`, and {leanRef}`sub` in the current namespace, so that whenever the namespace is open, these aliases are available. If this command is used outside a namespace, the aliases are exported to the top level. # Attributes %%% tag := "attributes" %%% The main function of Lean is to translate user input to formal expressions that are checked by the kernel for correctness and then stored in the environment for later use. But some commands have other effects on the environment, either assigning attributes to objects in the environment, defining notation, or declaring instances of type classes, as described in the chapter on {ref "type-classes"}[type classes]. Most of these commands have global effects, which is to say, they remain in effect not only in the current file, but also in any file that imports it. However, such commands often support the {kw}`local` modifier, which indicates that they only have effect until the current {kw}`section` or {leanRef}`namespace` is closed, or until the end of the current file. In {ref "using-the-simplifier"}[Using the Simplifier], we saw that theorems can be annotated with the {attr}`[simp]` attribute, which makes them available for use by the simplifier. The following example defines the prefix relation on lists, proves that this relation is reflexive, and assigns the {attr}`[simp]` attribute to that theorem. ```lean def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ @[simp] theorem List.isPrefix_self (as : List α) : isPrefix as as := ⟨[], by simp⟩ example : isPrefix [1, 2, 3] [1, 2, 3] := by simp ``` The simplifier then proves {leanRef}`isPrefix [1, 2, 3] [1, 2, 3]` by rewriting it to {lean}`True`. One can also assign the attribute any time after the definition takes place: ```lean def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ ------ theorem List.isPrefix_self (as : List α) : isPrefix as as := ⟨[], by simp⟩ attribute [simp] List.isPrefix_self ``` In all these cases, the attribute remains in effect in any file that imports the one in which the declaration occurs. Adding the {kw}`local` modifier restricts the scope: ```lean def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ ------ section theorem List.isPrefix_self (as : List α) : isPrefix as as := ⟨[], by simp⟩ attribute [local simp] List.isPrefix_self example : isPrefix [1, 2, 3] [1, 2, 3] := by simp end /-- error: `simp` made no progress -/ #guard_msgs in example : isPrefix [1, 2, 3] [1, 2, 3] := by simp ``` :::leanFirst For another example, we can use the {kw}`instance` command to assign the notation {lit}`≤` to the {leanRef}`isPrefix` relation. That command, which will be explained in the chapter on {ref "type-classes"}[type classes], works by assigning an {attr}`[instance]` attribute to the associated definition. ```lean def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ instance : LE (List α) where le := isPrefix theorem List.isPrefix_self (as : List α) : as ≤ as := ⟨[], by simp⟩ ``` ::: That assignment can also be made local: ```lean def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ ------ def instLe : LE (List α) := { le := isPrefix } section attribute [local instance] instLe example (as : List α) : as ≤ as := ⟨[], by simp⟩ end /-- error: failed to synthesize LE (List α) Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. -/ #guard_msgs in example (as : List α) : as ≤ as := ⟨[], by simp⟩ ``` In {ref "notation"}[Notation] below, we will discuss Lean's mechanisms for defining notation, and see that they also support the {kw}`local` modifier. However, in {ref "setting-options"}[Setting Options], we will discuss Lean's mechanisms for setting options, which does _not_ follow this pattern: options can _only_ be set locally, which is to say, their scope is always restricted to the current section or current file. # More on Implicit Arguments %%% tag := "more-on-implicit-arguments" %%% :::setup ``` variable (α : Type u) (β : α → Type v) (t : {x : α} → β x) ``` In {ref "implicit-arguments"}[Implicit Arguments], we saw that if Lean displays the type of a term {lean}`t` as {lean}`{x : α} → β x`, then the curly brackets indicate that {leanRef}`x` has been marked as an _implicit argument_ to {lean}`t`. This means that whenever you write {lean}`t`, a placeholder, or “hole,” is inserted, so that {lean}`t` is replaced by {lean}`@t _`. If you don't want that to happen, you have to write {lean}`@t` instead. ::: :::setup ``` def f (x : Nat) {y : Nat} (z : Nat) : Nat := x + y + z -- Equivalent: example := f 7 example := @f 7 _ ``` Notice that implicit arguments are inserted eagerly. Suppose we define a function {lean}`f : (x : Nat) → {y : Nat} → (z : Nat) → Nat`. Then, when we write the expression {lean}`f 7` without further arguments, it is parsed as {lean}`@f 7 _`. ::: :::setup ``` def f (x : Nat) {{y : Nat}} (z : Nat) : Nat := x + y + z -- Just f 7 example := f 7 -- These are equivalent: example := @f 7 _ 3 example := f 7 3 -- Alternative syntax: def f' (x : Nat) ⦃y : Nat⦄ (z : Nat) : Nat := x + y + z ``` Lean offers a weaker annotation which specifies that a placeholder should only be added _before_ a subsequent explicit argument. It can be written with double braces, so the type of {lean}`f` would be {lean}`f : (x : Nat) → {{y : Nat}} → (z : Nat) → Nat`. With this annotation, the expression {lean}`f 7` would be parsed as is, whereas {lean}`f 7 3` would be parsed as {lean}`@f 7 _ 3`, just as it would be with the strong annotation. This annotation can also be written as {lit}`⦃y : Nat⦄`, where the Unicode brackets are entered as {kbd}`\{{` and {kbd}`\}}`, respectively. ::: To illustrate the difference, consider the following example, which shows that a reflexive euclidean relation is both symmetric and transitive. ```lean def reflexive {α : Type u} (r : α → α → Prop) : Prop := ∀ (a : α), r a a def symmetric {α : Type u} (r : α → α → Prop) : Prop := ∀ {a b : α}, r a b → r b a def transitive {α : Type u} (r : α → α → Prop) : Prop := ∀ {a b c : α}, r a b → r b c → r a c def Euclidean {α : Type u} (r : α → α → Prop) : Prop := ∀ {a b c : α}, r a b → r a c → r b c theorem th1 {α : Type u} {r : α → α → Prop} (reflr : reflexive r) (euclr : Euclidean r) : symmetric r := fun {a b : α} => fun (h : r a b) => show r b a from euclr h (reflr _) theorem th2 {α : Type u} {r : α → α → Prop} (symmr : symmetric r) (euclr : Euclidean r) : transitive r := fun {a b c : α} => fun (rab : r a b) (rbc : r b c) => euclr (symmr rab) rbc theorem th3 {α : Type u} {r : α → α → Prop} (reflr : reflexive r) (euclr : Euclidean r) : transitive r := th2 (th1 reflr @euclr) @euclr variable (r : α → α → Prop) variable (euclr : Euclidean r) #check euclr ``` The results are broken down into small steps: {leanRef}`th1` shows that a relation that is reflexive and euclidean is symmetric, and {leanRef}`th2` shows that a relation that is symmetric and euclidean is transitive. Then {leanRef}`th3` combines the two results. But notice that we have to manually disable the implicit arguments in {leanRef}`euclr`, because otherwise too many implicit arguments are inserted. The problem goes away if we use weak implicit arguments: ```lean def reflexive {α : Type u} (r : α → α → Prop) : Prop := ∀ (a : α), r a a def symmetric {α : Type u} (r : α → α → Prop) : Prop := ∀ {{a b : α}}, r a b → r b a def transitive {α : Type u} (r : α → α → Prop) : Prop := ∀ {{a b c : α}}, r a b → r b c → r a c def Euclidean {α : Type u} (r : α → α → Prop) : Prop := ∀ {{a b c : α}}, r a b → r a c → r b c theorem th1 {α : Type u} {r : α → α → Prop} (reflr : reflexive r) (euclr : Euclidean r) : symmetric r := fun {a b : α} => fun (h : r a b) => show r b a from euclr h (reflr _) theorem th2 {α : Type u} {r : α → α → Prop} (symmr : symmetric r) (euclr : Euclidean r) : transitive r := fun {a b c : α} => fun (rab : r a b) (rbc : r b c) => euclr (symmr rab) rbc theorem th3 {α : Type u} {r : α → α → Prop} (reflr : reflexive r) (euclr : Euclidean r) : transitive r := th2 (th1 reflr euclr) euclr variable (r : α → α → Prop) variable (euclr : Euclidean r) #check euclr -- euclr : Euclidean r ``` There is a third kind of implicit argument that is denoted with square brackets, {lit}`[` and {lit}`]`. These are used for type classes, as explained in the chapter on {ref "type-classes"}[type classes]. # Notation %%% tag := "notation" %%% Identifiers in Lean can include any alphanumeric characters, including Greek characters (other than ∀ , Σ , and λ , which, as we have seen, have a special meaning in the dependent type theory). They can also include subscripts, which can be entered by typing {kbd}`\_` followed by the desired subscripted character. Lean's parser is extensible, which is to say, we can define new notation. Lean's syntax can be extended and customized by users at every level, ranging from basic “mixfix” notations to custom elaborators. In fact, all builtin syntax is parsed and processed using the same mechanisms and APIs open to users. In this section, we will describe and explain the various extension points. While introducing new notations is a relatively rare feature in programming languages and sometimes even frowned upon because of its potential to obscure code, it is an invaluable tool in formalization for expressing established conventions and notations of the respective field succinctly in code. Going beyond basic notations, Lean's ability to factor out common boilerplate code into (well-behaved) macros and to embed entire custom domain specific languages (DSLs) to textually encode subproblems efficiently and readably can be of great benefit to both programmers and proof engineers alike. ## Notations and Precedence %%% tag := "notations-and-precedence" %%% The most basic syntax extension commands allow introducing new (or overloading existing) prefix, infix, and postfix operators. ```lean infixl:65 " + " => HAdd.hAdd -- left-associative infix:50 " = " => Eq -- non-associative infixr:80 " ^ " => HPow.hPow -- right-associative prefix:100 "-" => Neg.neg postfix:max "⁻¹" => Inv.inv ``` After the initial command name describing the operator kind (its “{deftech}[fixity]”), we give the _parsing precedence_ of the operator preceded by a colon {lit}`:`, then a new or existing token surrounded by double quotes (the whitespace is used for pretty printing), then the function this operator should be translated to after the arrow {lit}`=>`. The precedence is a natural number describing how “tightly” an operator binds to its arguments, encoding the order of operations. We can make this more precise by looking at the commands the above unfold to: ```lean notation:65 lhs:65 " + " rhs:66 => HAdd.hAdd lhs rhs notation:50 lhs:51 " = " rhs:51 => Eq lhs rhs notation:80 lhs:81 " ^ " rhs:80 => HPow.hPow lhs rhs notation:100 "-" arg:100 => Neg.neg arg -- `max` is a shorthand for precedence 1024: notation:1024 arg:1024 "⁻¹" => Inv.inv arg ``` :::setup ``` variable {p : Nat} {a b c : α} [Add α] [Pow α α] ``` It turns out that all commands from the first code block are in fact command _macros_ translating to the more general {leanRef}`notation` command. We will learn about writing such macros below. Instead of a single token, the {leanRef}`notation` command accepts a mixed sequence of tokens and named term placeholders with precedences, which can be referenced on the right-hand side of {lit}`=>` and will be replaced by the respective term parsed at that position. A placeholder with precedence {lean}`p` accepts only notations with precedence at least {lean}`p` in that place. Thus the string {lean}`a + b + c` cannot be parsed as the equivalent of {lean}`a + (b + c)` because the right-hand side operand of an {leanRef}`infixl` notation has precedence one greater than the notation itself. In contrast, {leanRef}`infixr` reuses the notation's precedence for the right-hand side operand, so {lean}`a ^ b ^ c` _can_ be parsed as {lean}`a ^ (b ^ c)`. Note that if we used {leanRef}`notation` directly to introduce an infix notation like ::: ```lean def wobble : α → β → γ := sorry ------ notation:65 lhs:65 " ~ " rhs:65 => wobble lhs rhs ``` :::setup ``` variable (a : α) (b : β) (c : γ) def wobble : α → β → γ := sorry notation:65 lhs:65 " ~ " rhs:65 => wobble lhs rhs ``` where the precedences do not sufficiently determine associativity, Lean's parser will default to right associativity. More precisely, Lean's parser follows a local _longest parse_ rule in the presence of ambiguous grammars: when parsing the right-hand side of {lit}`a ~` in {lean}`a ~ b ~ c`, it will continue parsing as long as possible (as the current precedence allows), not stopping after {leanRef}`b` but parsing {leanRef}`~ c` as well. Thus the term is equivalent to {lean}`a ~ (b ~ c)`. ::: As mentioned above, the {leanRef}`notation` command allows us to define arbitrary _mixfix_ syntax freely mixing tokens and placeholders. ```lean set_option quotPrecheck false ------ notation:max "(" e ")" => e notation:10 Γ " ⊢ " e " : " τ => Typing Γ e τ ``` Placeholders without precedence default to {lit}`0`, i.e. they accept notations of any precedence in their place. If two notations overlap, we again apply the longest parse rule: ```lean notation:65 a " + " b:66 " + " c:66 => a + b - c #eval 1 + 2 + 3 -- 0 ``` The new notation is preferred to the binary notation since the latter, before chaining, would stop parsing after {leanRef}`1 + 2`. If there are multiple notations accepting the same longest parse, the choice will be delayed until elaboration, which will fail unless exactly one overload is type-correct. # Coercions %%% tag := "coercions" %%% In Lean, the type of natural numbers, {lean}`Nat`, is different from the type of integers, {lean}`Int`. But there is a function {lean}`Int.ofNat` that embeds the natural numbers in the integers, meaning that we can view any natural number as an integer, when needed. Lean has mechanisms to detect and insert _coercions_ of this sort. Coercions can be explicitly requested using the overloaded {lit}`↑` operator. ```lean variable (m n : Nat) variable (i j : Int) #check i + m -- i + ↑m : Int #check i + m + j -- i + ↑m + j : Int #check i + m + n -- i + ↑m + ↑n : Int ``` # Displaying Information %%% tag := "displaying-information" %%% There are a number of ways in which you can query Lean for information about its current state and the objects and theorems that are available in the current context. You have already seen two of the most common ones, {kw}`#check` and {kw}`#eval`. Remember that {kw}`#check` is often used in conjunction with the {lit}`@` operator, which makes all of the arguments to a theorem or definition explicit. In addition, you can use the {kw}`#print` command to get information about any identifier. If the identifier denotes a definition or theorem, Lean prints the type of the symbol, and its definition. If it is a constant or an axiom, Lean indicates that fact, and shows the type. ```lean -- examples with equality #check Eq #check @Eq #check Eq.symm #check @Eq.symm #print Eq.symm -- examples with And #check And #check And.intro #check @And.intro -- a user-defined function def foo {α : Type u} (x : α) : α := x #check foo #check @foo #print foo ``` # Setting Options %%% tag := "setting-options" %%% Lean maintains a number of internal variables that can be set by users to control its behavior. The syntax for doing so is as follows: {kw}`set_option`{lit}` <name> <value>` One very useful family of options controls the way Lean's _pretty printer_ displays terms. The following options take an input of true or false: ``` pp.explicit : display implicit arguments pp.universes : display hidden universe parameters pp.notation : display output using defined notations ``` As an example, the following settings yield much longer output: ```lean set_option pp.explicit true set_option pp.universes true set_option pp.notation false #check 2 + 2 = 4 #reduce (fun x => x + 2) = (fun x => x + 3) #check (fun x => x + 1) 1 ``` The command {leanCommand}`set_option pp.all true` carries out these settings all at once, whereas {leanCommand}`set_option pp.all false` reverts to the previous values. Pretty printing additional information is often very useful when you are debugging a proof, or trying to understand a cryptic error message. Too much information can be overwhelming, though, and Lean's defaults are generally sufficient for ordinary interactions. :::comment ``` <!-- # Elaboration Hints When you ask Lean to process an expression like `λ x y z, f (x + y) z`, you are leaving information implicit. For example, the types of `x`, `y`, and `z` have to be inferred from the context, the notation `+` may be overloaded, and there may be implicit arguments to `f` that need to be filled in as well. Moreover, we will see in :numref:`Chapter %s <type_classes>` that some implicit arguments are synthesized by a process known as _type class resolution_. And we have also already seen in the last chapter that some parts of an expression can be constructed by the tactic framework. Inferring some implicit arguments is straightforward. For example, suppose a function `f` has type `Π {α : Type*}, α → α → α` and Lean is trying to parse the expression `f n`, where `n` can be inferred to have type `nat`. Then it is clear that the implicit argument `α` has to be `nat`. However, some inference problems are _higher order_. For example, the substitution operation for equality, `eq.subst`, has the following type: .. code-block:: text eq.subst : ∀ {α : Sort u} {p : α → Prop} {a b : α}, a = b → p a → p b Now suppose we are given `a b : ℕ` and `h₁ : a = b` and `h₂ : a * b > a`. Then, in the expression `eq.subst h₁ h₂`, `P` could be any of the following: - `λ x, x * b > x` - `λ x, x * b > a` - `λ x, a * b > x` - `λ x, a * b > a` In other words, our intent may be to replace either the first or second `a` in `h₂`, or both, or neither. Similar ambiguities arise in inferring induction predicates, or inferring function arguments. Even second-order unification is known to be undecidable. Lean therefore relies on heuristics to fill in such arguments, and when it fails to guess the right ones, they need to be provided explicitly. To make matters worse, sometimes definitions need to be unfolded, and sometimes expressions need to be reduced according to the computational rules of the underlying logical framework. Once again, Lean has to rely on heuristics to determine what to unfold or reduce, and when. There are attributes, however, that can be used to provide hints to the elaborator. One class of attributes determines how eagerly definitions are unfolded: constants can be marked with the attribute `[reducible]`, `[semireducible]`, or `[irreducible]`. Definitions are marked `[semireducible]` by default. A definition with the `[reducible]` attribute is unfolded eagerly; if you think of a definition as serving as an abbreviation, this attribute would be appropriate. The elaborator avoids unfolding definitions with the `[irreducible]` attribute. Theorems are marked `[irreducible]` by default, because typically proofs are not relevant to the elaboration process. It is worth emphasizing that these attributes are only hints to the elaborator. When checking an elaborated term for correctness, Lean's kernel will unfold whatever definitions it needs to unfold. As with other attributes, the ones above can be assigned with the `local` modifier, so that they are in effect only in the current section or file. Lean also has a family of attributes that control the elaboration strategy. A definition or theorem can be marked `[elab_with_expected_type]`, `[elab_simple]`. or `[elab_as_eliminator]`. When applied to a definition `f`, these bear on elaboration of an expression `f a b c ...` in which `f` is applied to arguments. With the default attribute, `[elab_with_expected_type]`, the arguments `a`, `b`, `c`, ... are elaborating using information about their expected type, inferred from `f` and the previous arguments. In contrast, with `[elab_simple]`, the arguments are elaborated from left to right without propagating information about their types. The last attribute, `[elab_as_eliminator]`, is commonly used for eliminators like recursors, induction principles, and `eq.subst`. It uses a separate heuristic to infer higher-order parameters. We will consider such operations in more detail in the next chapter. Once again, these attributes can be assigned and reassigned after an object is defined, and you can use the `local` modifier to limit their scope. Moreover, using the `@` symbol in front of an identifier in an expression instructs the elaborator to use the `[elab_simple]` strategy; the idea is that, when you provide the tricky parameters explicitly, you want the elaborator to weigh that information heavily. In fact, Lean offers an alternative annotation, `@@`, which leaves parameters before the first higher-order parameter implicit. For example, `@@eq.subst` leaves the type of the equation implicit, but makes the context of the substitution explicit. --> ``` ::: # Using the Library %%% tag := "using-the-library" %%% To use Lean effectively you will inevitably need to make use of definitions and theorems in the library. Recall that the {kw}`import` command at the beginning of a file imports previously compiled results from other files, and that importing is transitive; if you import {lit}`Foo` and {lit}`Foo` imports {lit}`Bar`, then the definitions and theorems from {lit}`Bar` are available to you as well. But the act of opening a namespace, which provides shorter names, does not carry over. In each file, you need to open the namespaces you wish to use. In general, it is important for you to be familiar with the library and its contents, so you know what theorems, definitions, notations, and resources are available to you. Below we will see that Lean's editor modes can also help you find things you need, but studying the contents of the library directly is often unavoidable. Lean's standard library can be found online, on GitHub: - [https://github.com/leanprover/lean4/tree/master/src/Init](https://github.com/leanprover/lean4/tree/master/src/Init) - [https://github.com/leanprover/lean4/tree/master/src/Std](https://github.com/leanprover/lean4/tree/master/src/Std) You can see the contents of these directories and files using GitHub's browser interface. If you have installed Lean on your own computer, you can find the library in the {lit}`lean` folder, and explore it with your file manager. Comment headers at the top of each file provide additional information. Lean's library developers follow general naming guidelines to make it easier to guess the name of a theorem you need, or to find it using tab completion in editors with a Lean mode that supports this, which is discussed in the next section. Identifiers are generally {lit}`camelCase`, and types are {lit}`CamelCase`. For theorem names, we rely on descriptive names where the different components are separated by {lit}`_`s. Often the name of theorem simply describes the conclusion: ```lean #check Nat.succ_ne_zero #check Nat.zero_add #check Nat.mul_one #check Nat.le_of_succ_le_succ ``` :::setup ``` open Nat ``` Remember that identifiers in Lean can be organized into hierarchical namespaces. For example, the theorem named {lean}`le_of_succ_le_succ` in the namespace {lit}`Nat` has full name {lean}`Nat.le_of_succ_le_succ`, but the shorter name is made available by the command {kw}`open`{lit}` Nat` (for names not marked as {kw}`protected`). We will see in the chapters on {ref "inductive-types"}[inductive types] and {ref "structures-and-records"}[structures and records] that defining structures and inductive data types in Lean generates associated operations, and these are stored in a namespace with the same name as the type under definition. For example, the product type comes with the following operations: ::: ```lean #check @Prod.mk #check @Prod.fst #check @Prod.snd #check @Prod.rec ``` The first is used to construct a pair, whereas the next two, {leanRef}`Prod.fst` and {leanRef}`Prod.snd`, project the two elements. The last, {leanRef}`Prod.rec`, provides another mechanism for defining functions on a product in terms of a function on the two components. Names like {leanRef}`Prod.rec` are _protected_, which means that one has to use the full name even when the {lit}`Prod` namespace is open. With the propositions as types correspondence, logical connectives are also instances of inductive types, and so we tend to use dot notation for them as well: ```lean #check @And.intro #check @And.casesOn #check @And.left #check @And.right #check @Or.inl #check @Or.inr #check @Or.elim #check @Exists.intro #check @Exists.elim #check @Eq.refl #check @Eq.subst ``` # Auto Bound Implicit Arguments %%% tag := "auto-bound-implicit-arguments" %%% :::leanFirst In the previous section, we have shown how implicit arguments make functions more convenient to use. However, functions such as {leanRef}`compose` are still quite verbose to define. Note that the universe polymorphic {leanRef}`compose` is even more verbose than the one previously defined. ```lean universe u v w def compose {α : Type u} {β : Type v} {γ : Type w} (g : β → γ) (f : α → β) (x : α) : γ := g (f x) ``` ::: :::leanFirst You can avoid the {kw}`universe` command by providing the universe parameters when defining {leanRef}`compose`. ```lean def compose.{u, v, w} {α : Type u} {β : Type v} {γ : Type w} (g : β → γ) (f : α → β) (x : α) : γ := g (f x) ``` ::: ::::leanFirst Lean 4 supports a new feature called _auto bound implicit arguments_. It makes functions such as {leanRef}`compose` much more convenient to write. When Lean processes the header of a declaration, any unbound identifier is automatically added as an implicit argument. With this feature we can write {leanRef}`compose` as :::TODO Update and check details ::: ```lean def compose (g : β → γ) (f : α → β) (x : α) : γ := g (f x) #check @compose -- @compose : {β : Sort u_1} → {γ : Sort u_2} → {α : Sort u_3} → (β → γ) → (α → β) → α → γ ``` Note that Lean inferred a more general type using {lean}`Sort` instead of {leanRef}`Type`. :::: Although we love this feature and use it extensively when implementing Lean, we realize some users may feel uncomfortable with it. Thus, you can disable it using the command {leanCommand}`set_option autoImplicit false`. ```lean set_option autoImplicit false /-- error: Unknown identifier `β` --- error: Unknown identifier `γ` --- error: Unknown identifier `α` --- error: Unknown identifier `β` --- error: Unknown identifier `α` --- error: Unknown identifier `γ` -/ #guard_msgs in def compose (g : β → γ) (f : α → β) (x : α) : γ := g (f x) ``` # Implicit Lambdas %%% tag := "implicit-lambdas" %%% :::TODO Update this text after archaeology ::: :::leanFirst When the expected type of an expression is a function that is awaiting implicit arguments, the elaborator automatically introduces the corresponding lambdas. For example, {leanRef}`pure`'s type states that the first argument is an implicit type {leanRef}`α`, but {leanRef}`ReaderT.pure`'s first argument is the reader monad's context type {leanRef}`ρ`. It is automatically surrounded with a {kw}`fun`{lit}` {α} => ...`, which allows the elaborator to correctly fill in the implicit arguments in the body. ```lean variable (ρ : Type) (m : Type → Type) [Monad m] ------ instance : Monad (ReaderT ρ m) where pure := ReaderT.pure bind := ReaderT.bind ``` ::: Users can disable the implicit lambda feature by using {lit}`@` or writing a lambda expression with {lit}`{}` or {lit}`[]` binder annotations. Here are few examples ```lean set_option linter.unusedVariables false namespace Ex2 ------ def id1 : {α : Type} → α → α := fun x => x def listId : List ({α : Type} → α → α) := (fun x => x) :: [] -- In this example, implicit lambda introduction has been disabled because -- we use `@` before {kw}`fun` def id2 : {α : Type} → α → α := @fun α (x : α) => id1 x def id3 : {α : Type} → α → α := @fun α x => id1 x def id4 : {α : Type} → α → α := fun x => id1 x -- In this example, implicit lambda introduction has been disabled -- because we used the binder annotation `{...}` def id5 : {α : Type} → α → α := fun {α} x => id1 x ------ end Ex2 ``` # Sugar for Simple Functions %%% tag := "sugar-for-simple-functions" %%% Lean includes a notation for describing simple functions using anonymous placeholders rather than {kw}`fun`. When {lit}`·` occurs as part of a term, the nearest enclosing parentheses become a function with the {lit}`·` as its argument. If the parentheses include multiple placeholders without other intervening parentheses, then they are made into arguments from left to right. Here are a few examples: ```lean namespace Ex3 ------ #check (· + 1) -- fun x => x + 1 : Nat → Nat #check (2 - ·) -- fun x => 2 - x : Nat → Nat #eval [1, 2, 3, 4, 5].foldl (· * ·) 1 -- 120 def f (x y z : Nat) := x + y + z #check (f · 1 ·) -- fun x1 x2 => f x1 1 x2 : Nat → Nat → Nat #eval [(1, 2), (3, 4), (5, 6)].map (·.1) -- [1, 3, 5] ------ end Ex3 ``` Nested parentheses introduce new functions. In the following example, two different lambda expressions are created: ```lean #check (Prod.mk · (· + 1)) -- fun x => (x, fun x => x + 1) : ?m.2 → ?m.2 × (Nat → Nat) ``` # Named Arguments %%% tag := "named-arguments" %%% Named arguments enable you to specify an argument for a parameter by matching the argument with its name rather than with its position in the parameter list. If you don't remember the order of the parameters but know their names, you can send the arguments in any order. You may also provide the value for an implicit parameter when Lean failed to infer it. Named arguments also improve the readability of your code by identifying what each argument represents. ```lean def sum (xs : List Nat) := xs.foldl (init := 0) (·+·) #eval sum [1, 2, 3, 4] -- 10 example {a b : Nat} {p : Nat → Nat → Nat → Prop} (h₁ : p a b b) (h₂ : b = a) : p a a b := Eq.subst (motive := fun x => p a x b) h₂ h₁ ``` In the following examples, we illustrate the interaction between named and default arguments. ```lean def f (x : Nat) (y : Nat := 1) (w : Nat := 2) (z : Nat) := x + y + w - z example (x z : Nat) : f (z := z) x = x + 1 + 2 - z := rfl example (x z : Nat) : f x (z := z) = x + 1 + 2 - z := rfl example (x y : Nat) : f x y = fun z => x + y + 2 - z := rfl example : f = (fun x z => x + 1 + 2 - z) := rfl example (x : Nat) : f x = fun z => x + 1 + 2 - z := rfl example (y : Nat) : f (y := 5) = fun x z => x + 5 + 2 - z := rfl def g {α} [Add α] (a : α) (b? : Option α := none) (c : α) : α := match b? with | none => a + c | some b => a + b + c variable {α} [Add α] example : g = fun (a c : α) => a + c := rfl example (x : α) : g (c := x) = fun (a : α) => a + x := rfl example (x : α) : g (b? := some x) = fun (a c : α) => a + x + c := rfl example (x : α) : g x = fun (c : α) => x + c := rfl example (x y : α) : g x y = fun (c : α) => x + y + c := rfl ``` You can use {lit}`..` to provide missing explicit arguments as {lit}`_`. This feature combined with named arguments is useful for writing patterns. Here is an example: ```lean inductive Term where | var (name : String) | num (val : Nat) | app (fn : Term) (arg : Term) | lambda (name : String) (type : Term) (body : Term) def getBinderName : Term → Option String | Term.lambda (name := n) .. => some n | _ => none def getBinderType : Term → Option Term | Term.lambda (type := t) .. => some t | _ => none ``` Ellipses are also useful when explicit arguments can be automatically inferred by Lean, and we want to avoid a sequence of {lit}`_`s. ```lean example (f : Nat → Nat) (a b c : Nat) : f (a + b + c) = f (a + (b + c)) := congrArg f (Nat.add_assoc ..) ```
theorem_proving_in_lean4/book/static/fonts/source-serif/LICENSE.md
Copyright 2014 - 2023 Adobe (http://www.adobe.com/), with Reserved Font Name ‘Source’. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
theorem_proving_in_lean4/book/static/fonts/noto-sans-mono/OFL.txt
Copyright 2022 The Noto Project Authors (https://github.com/notofonts/latin-greek-cyrillic) This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: https://openfontlicense.org ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
theorem_proving_in_lean4/book/static/fonts/source-code-pro/LICENSE.md
© 2023 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
theorem_proving_in_lean4/book/static/fonts/source-sans/LICENSE.md
Copyright 2010-2024 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
mathematics_in_lean/MIL.lean
import MIL.C01_Introduction.S01_Getting_Started import MIL.C01_Introduction.S02_Overview import MIL.C02_Basics.S01_Calculating import MIL.C02_Basics.S02_Proving_Identities_in_Algebraic_Structures import MIL.C02_Basics.S03_Using_Theorems_and_Lemmas import MIL.C02_Basics.S04_More_on_Order_and_Divisibility import MIL.C02_Basics.S05_Proving_Facts_about_Algebraic_Structures import MIL.C03_Logic.S01_Implication_and_the_Universal_Quantifier import MIL.C03_Logic.S02_The_Existential_Quantifier import MIL.C03_Logic.S03_Negation import MIL.C03_Logic.S04_Conjunction_and_Iff import MIL.C03_Logic.S05_Disjunction import MIL.C03_Logic.S06_Sequences_and_Convergence import MIL.C04_Sets_and_Functions.S01_Sets import MIL.C04_Sets_and_Functions.S02_Functions import MIL.C04_Sets_and_Functions.S03_The_Schroeder_Bernstein_Theorem import MIL.C05_Elementary_Number_Theory.S01_Irrational_Roots import MIL.C05_Elementary_Number_Theory.S02_Induction_and_Recursion import MIL.C05_Elementary_Number_Theory.S03_Infinitely_Many_Primes import MIL.C05_Elementary_Number_Theory.S04_More_Induction import MIL.C06_Discrete_Mathematics.S01_Finsets_and_Fintypes import MIL.C06_Discrete_Mathematics.S02_Counting_Arguments import MIL.C06_Discrete_Mathematics.S03_Inductive_Structures import MIL.C07_Structures.S01_Structures import MIL.C07_Structures.S02_Algebraic_Structures import MIL.C07_Structures.S03_Building_the_Gaussian_Integers import MIL.C08_Hierarchies.S01_Basics import MIL.C08_Hierarchies.S02_Morphisms import MIL.C08_Hierarchies.S03_Subobjects import MIL.C09_Groups_and_Rings.S01_Groups import MIL.C09_Groups_and_Rings.S02_Rings import MIL.C10_Linear_Algebra.S01_Vector_Spaces import MIL.C10_Linear_Algebra.S02_Subspaces import MIL.C10_Linear_Algebra.S03_Endomorphisms import MIL.C10_Linear_Algebra.S04_Bases import MIL.C11_Topology.S01_Filters import MIL.C11_Topology.S02_Metric_Spaces import MIL.C11_Topology.S03_Topological_Spaces import MIL.C12_Differential_Calculus.S01_Elementary_Differential_Calculus import MIL.C12_Differential_Calculus.S02_Differential_Calculus_in_Normed_Spaces import MIL.C13_Integration_and_Measure_Theory.S01_Elementary_Integration import MIL.C13_Integration_and_Measure_Theory.S02_Measure_Theory import MIL.C13_Integration_and_Measure_Theory.S03_Integration import MIL.Common
mathematics_in_lean/README.md
# Mathematics in Lean This tutorial depends on Lean 4, VS Code, and Mathlib. You can find the textbook both online and in this repository in [html format](https://leanprover-community.github.io/mathematics_in_lean/) or as a [pdf document](https://leanprover-community.github.io/mathematics_in_lean/mathematics_in_lean.pdf). The book is designed to be read as you work through examples and exercises, using a copy of this repository on your computer. Alternatively, you can use Github Codespaces or Gitpod to run Lean and VS Code in the cloud. This version of *Mathematics in Lean* is designed for [Lean 4](https://leanprover.github.io/) and [Mathlib](https://github.com/leanprover-community/mathlib4). For the Lean 3 version, see [https://github.com/leanprover-community/mathematics_in_lean3](https://github.com/leanprover-community/mathematics_in_lean3). ## To use this repository on your computer Do the following: 1. Install Lean 4 and VS Code following these [instructions](https://leanprover-community.github.io/get_started.html). 2. Make sure you have [git](https://git-scm.com/) installed. 3. Follow these [instructions](https://leanprover-community.github.io/install/project.html#working-on-an-existing-project) to fetch the `mathematics_in_lean` repository and open it up in VS Code. 4. Each section in the textbook has an associated Lean file with examples and exercises. You can find them in the folder `MIL`, organized by chapter. We strongly recommend making a copy of that folder and experimenting and doing the exercises in that copy. This leaves the originals intact, and it also makes it easier to update the repository as it changes (see below). You can call the copy `my_files` or whatever you want and use it to create your own Lean files as well. At that point, you can open the textbook in a web browser at [https://leanprover-community.github.io/mathematics_in_lean/](https://leanprover-community.github.io/mathematics_in_lean/) and start reading and doing the exercises in VS Code. The textbook and this repository are still a work in progress. You can update the repository by typing `git pull` followed by `lake exe cache get` inside the `mathematics_in_lean` folder. (This assumes that you have not changed the contents of the `MIL` folder, which is why we suggested making a copy.) ## To use this repository with Github Codespaces If you have trouble installing Lean, you can use Lean directly in your browser using Github Codespaces. This requires a Github account. If you are signed in to Github, click here: <a href='https://codespaces.new/leanprover-community/mathematics_in_lean' target="_blank" rel="noreferrer noopener"><img src='https://github.com/codespaces/badge.svg' alt='Open in GitHub Codespaces' style='max-width: 100%;'></a> Make sure the Machine type is `4-core`, and then press `Create codespace` (this might take a few minutes). This creates a virtual machine in the cloud, and installs Lean and Mathlib. Opening any `.lean` file in the MIL folder will start Lean, though this may also take a little while. We suggest making a copy of the `MIL` directory, as described in the instructions above for using MIL on your computer. You can update the repository by opening a terminal in the browser and typing `git pull` followed by `lake exe cache get` as above. Codespaces offers a certain number of free hours per month. When you are done working, press `Ctrl/Cmd+Shift+P` on your keyboard, start typing `stop current codespace`, and then select `Codespaces: Stop Current Codespace` from the list of options. If you forget, don't worry: the virtual machine will stop itself after a certain amount of time of inactivity. To restart a previous workspace, visit <https://github.com/codespaces>. ## To use this repository with Gitpod Gitpod is an alternative to Github Codespaces, but is a little less convenient, since it requires you to verify your phone number. If you have a Gitpod account or are willing to sign up for one, point your browser to [https://gitpod.io/#/https://github.com/leanprover-community/mathematics_in_lean](https://gitpod.io/#/https://github.com/leanprover-community/mathematics_in_lean). This creates a virtual machine in the cloud, and installs Lean and Mathlib. It then presents you with a VS Code window, running in a virtual copy of the repository. We suggest making a copy of the `MIL` directory, as described in the instructions above for using MIL on your computer. You can update the repository by opening a terminal in the browser and typing `git pull` followed by `lake exe cache get` as above. Gitpod gives you 50 free hours every month. When you are done working, choose `Stop workspace` from the menu on the left. The workspace should also stop automatically 30 minutes after the last interaction or 3 minutes after closing the tab. To restart a previous workspace, go to [https://gitpod.io/workspaces/](https://gitpod.io/workspaces/). If you change the filter from Active to All, you will see all your recent workspaces. You can pin a workspace to keep it on the list of active ones. ## Contributing PRs and issues should be opened at the upstream [source repository](https://github.com/avigad/mathematics_in_lean_source).
mathematics_in_lean/MIL_solutions.lean
import MIL.C01_Introduction.solutions.Solutions_S01_Getting_Started import MIL.C01_Introduction.solutions.Solutions_S02_Overview import MIL.C02_Basics.solutions.Solutions_S01_Calculating import MIL.C02_Basics.solutions.Solutions_S02_Proving_Identities_in_Algebraic_Structures import MIL.C02_Basics.solutions.Solutions_S03_Using_Theorems_and_Lemmas import MIL.C02_Basics.solutions.Solutions_S04_More_on_Order_and_Divisibility import MIL.C02_Basics.solutions.Solutions_S05_Proving_Facts_about_Algebraic_Structures import MIL.C03_Logic.solutions.Solutions_S01_Implication_and_the_Universal_Quantifier import MIL.C03_Logic.solutions.Solutions_S02_The_Existential_Quantifier import MIL.C03_Logic.solutions.Solutions_S03_Negation import MIL.C03_Logic.solutions.Solutions_S04_Conjunction_and_Iff import MIL.C03_Logic.solutions.Solutions_S05_Disjunction import MIL.C03_Logic.solutions.Solutions_S06_Sequences_and_Convergence import MIL.C04_Sets_and_Functions.solutions.Solutions_S01_Sets import MIL.C04_Sets_and_Functions.solutions.Solutions_S02_Functions import MIL.C04_Sets_and_Functions.solutions.Solutions_S03_The_Schroeder_Bernstein_Theorem import MIL.C05_Elementary_Number_Theory.solutions.Solutions_S01_Irrational_Roots import MIL.C05_Elementary_Number_Theory.solutions.Solutions_S02_Induction_and_Recursion import MIL.C05_Elementary_Number_Theory.solutions.Solutions_S03_Infinitely_Many_Primes import MIL.C05_Elementary_Number_Theory.solutions.Solutions_S04_More_Induction import MIL.C06_Discrete_Mathematics.solutions.Solutions_S01_Finsets_and_Fintypes import MIL.C06_Discrete_Mathematics.solutions.Solutions_S02_Counting_Arguments import MIL.C06_Discrete_Mathematics.solutions.Solutions_S03_Inductive_Structures import MIL.C07_Structures.solutions.Solutions_S01_Structures import MIL.C07_Structures.solutions.Solutions_S02_Algebraic_Structures import MIL.C07_Structures.solutions.Solutions_S03_Building_the_Gaussian_Integers import MIL.C08_Hierarchies.solutions.Solutions_S01_Basics import MIL.C08_Hierarchies.solutions.Solutions_S02_Morphisms import MIL.C08_Hierarchies.solutions.Solutions_S03_Subobjects import MIL.C09_Groups_and_Rings.solutions.Solutions_S01_Groups import MIL.C09_Groups_and_Rings.solutions.Solutions_S02_Rings import MIL.C10_Linear_Algebra.solutions.Solutions_S01_Vector_Spaces import MIL.C10_Linear_Algebra.solutions.Solutions_S02_Subspaces import MIL.C10_Linear_Algebra.solutions.Solutions_S03_Endomorphisms import MIL.C10_Linear_Algebra.solutions.Solutions_S04_Bases import MIL.C11_Topology.solutions.Solutions_S01_Filters import MIL.C11_Topology.solutions.Solutions_S02_Metric_Spaces import MIL.C11_Topology.solutions.Solutions_S03_Topological_Spaces import MIL.C12_Differential_Calculus.solutions.Solutions_S01_Elementary_Differential_Calculus import MIL.C12_Differential_Calculus.solutions.Solutions_S02_Differential_Calculus_in_Normed_Spaces import MIL.C13_Integration_and_Measure_Theory.solutions.Solutions_S01_Elementary_Integration import MIL.C13_Integration_and_Measure_Theory.solutions.Solutions_S02_Measure_Theory import MIL.C13_Integration_and_Measure_Theory.solutions.Solutions_S03_Integration import MIL.Common
mathematics_in_lean/html/_sources/C08_Hierarchies.rst.txt
.. _hierarchies: Hierarchies =========== We have seen in :numref:`Chapter %s <structures>` how to define the class of groups and build instances of this class, and then how to build an instance of the commutative ring class. But of course there is a hierarchy here: a commutative ring is in particular an additive group. In this chapter we will study how to build such hierarchies. They appear in all branches of mathematics but in this chapter the emphasis will be on algebraic examples. It may seem premature to discuss how to build hierarchies before more discussions about using existing hierarchies. But some understanding of the technology underlying hierarchies is required to use them. So you should probably still read this chapter, but without trying too hard to remember everything on your first read, then read the following chapters and come back here for a second reading. In this chapter, we will redefine (simpler versions of) many things that appear in Mathlib so we will used indices to distinguish our version. For instance we will have ``Ring₁`` as our version of ``Ring``. Since we will gradually explain more powerful ways of formalizing structures, those indices will sometimes grow beyond one. .. include:: C08_Hierarchies/S01_Basics.inc .. include:: C08_Hierarchies/S02_Morphisms.inc .. include:: C08_Hierarchies/S03_Subobjects.inc
mathematics_in_lean/html/_sources/C06_Discrete_Mathematics.rst.txt
.. _discrete_mathematics: Discrete Mathematics ==================== *Discrete Mathematics* is the study of finite sets, objects, and structures. We can count the elements of a finite set, and we can compute finite sums or products over its elements, we can compute maximums and minimums, and so on. We can also study objects that are generated by finitely many applications of certain generating functions, we can define functions by structural recursion, and prove theorems by structural induction. This chapters describes parts of Mathlib that support these activities. .. include:: C06_Discrete_Mathematics/S01_Finsets_and_Fintypes.inc .. include:: C06_Discrete_Mathematics/S02_Counting_Arguments.inc .. include:: C06_Discrete_Mathematics/S03_Inductive_Structures.inc
mathematics_in_lean/html/_sources/C03_Logic.rst.txt
.. _logic: Logic ===== In the last chapter, we dealt with equations, inequalities, and basic mathematical statements like ":math:`x` divides :math:`y`." Complex mathematical statements are built up from simple ones like these using logical terms like "and," "or," "not," and "if ... then," "every," and "some." In this chapter, we show you how to work with statements that are built up in this way. .. include:: C03_Logic/S01_Implication_and_the_Universal_Quantifier.inc .. include:: C03_Logic/S02_The_Existential_Quantifier.inc .. include:: C03_Logic/S03_Negation.inc .. include:: C03_Logic/S04_Conjunction_and_Iff.inc .. include:: C03_Logic/S05_Disjunction.inc .. include:: C03_Logic/S06_Sequences_and_Convergence.inc
mathematics_in_lean/html/_sources/C05_Elementary_Number_Theory.rst.txt
.. _number_theory: Elementary Number Theory ======================== In this chapter, we show you how to formalize some elementary results in number theory. As we deal with more substantive mathematical content, the proofs will get longer and more involved, building on the skills you have already mastered. .. include:: C05_Elementary_Number_Theory/S01_Irrational_Roots.inc .. include:: C05_Elementary_Number_Theory/S02_Induction_and_Recursion.inc .. include:: C05_Elementary_Number_Theory/S03_Infinitely_Many_Primes.inc .. include:: C05_Elementary_Number_Theory/S04_More_Induction.inc
mathematics_in_lean/html/_sources/C04_Sets_and_Functions.rst.txt
.. _sets_and_functions: Sets and Functions ================== The vocabulary of sets, relations, and functions provides a uniform language for carrying out constructions in all the branches of mathematics. Since functions and relations can be defined in terms of sets, axiomatic set theory can be used as a foundation for mathematics. Lean's foundation is based instead on the primitive notion of a *type*, and it includes ways of defining functions between types. Every expression in Lean has a type: there are natural numbers, real numbers, functions from reals to reals, groups, vector spaces, and so on. Some expressions *are* types, which is to say, their type is ``Type``. Lean and Mathlib provide ways of defining new types, and ways of defining objects of those types. Conceptually, you can think of a type as just a set of objects. Requiring every object to have a type has some advantages. For example, it makes it possible to overload notation like ``+``, and it sometimes makes input less verbose because Lean can infer a lot of information from an object's type. The type system also enables Lean to flag errors when you apply a function to the wrong number of arguments, or apply a function to arguments of the wrong type. Lean's library does define elementary set-theoretic notions. In contrast to set theory, in Lean a set is always a set of objects of some type, such as a set of natural numbers or a set of functions from real numbers to real numbers. The distinction between types and sets takes some getting used to, but this chapter will take you through the essentials. .. include:: C04_Sets_and_Functions/S01_Sets.inc .. include:: C04_Sets_and_Functions/S02_Functions.inc .. include:: C04_Sets_and_Functions/S03_The_Schroeder_Bernstein_Theorem.inc
mathematics_in_lean/html/_sources/genindex.rst.txt
Index =====
mathematics_in_lean/html/_sources/C02_Basics.rst.txt
.. _basics: Basics ====== This chapter is designed to introduce you to the nuts and bolts of mathematical reasoning in Lean: calculating, applying lemmas and theorems, and reasoning about generic structures. .. include:: C02_Basics/S01_Calculating.inc .. include:: C02_Basics/S02_Proving_Identities_in_Algebraic_Structures.inc .. include:: C02_Basics/S03_Using_Theorems_and_Lemmas.inc .. include:: C02_Basics/S04_More_on_Order_and_Divisibility.inc .. include:: C02_Basics/S05_Proving_Facts_about_Algebraic_Structures.inc
mathematics_in_lean/html/_sources/C07_Structures.rst.txt
.. _structures: Structures ========== Modern mathematics makes essential use of algebraic structures, which encapsulate patterns that can be instantiated in multiple settings. The subject provides various ways of defining such structures and constructing particular instances. Lean therefore provides corresponding ways of defining structures formally and working with them. You have already seen examples of algebraic structures in Lean, such as rings and lattices, which were discussed in :numref:`Chapter %s <basics>`. This chapter will explain the mysterious square bracket annotations that you saw there, ``[Ring α]`` and ``[Lattice α]``. It will also show you how to define and use algebraic structures on your own. For more technical detail, you can consult `Theorem Proving in Lean <https://leanprover.github.io/theorem_proving_in_lean/>`_, and a paper by Anne Baanen, `Use and abuse of instance parameters in the Lean mathematical library <https://arxiv.org/abs/2202.01629>`_. .. include:: C07_Structures/S01_Structures.inc .. include:: C07_Structures/S02_Algebraic_Structures.inc .. include:: C07_Structures/S03_Building_the_Gaussian_Integers.inc
mathematics_in_lean/html/_sources/C11_Topology.rst.txt
.. _topology: .. index:: topology Topology ======== Calculus is based on the concept of a function, which is used to model quantities that depend on one another. For example, it is common to study quantities that change over time. The notion of a *limit* is also fundamental. We may say that the limit of a function :math:`f(x)` is a value :math:`b` as :math:`x` approaches a value :math:`a`, or that :math:`f(x)` *converges to* :math:`b` as :math:`x` approaches :math:`a`. Equivalently, we may say that :math:`f(x)` approaches :math:`b` as :math:`x` approaches a value :math:`a`, or that it *tends to* :math:`b` as :math:`x` tends to :math:`a`. We have already begun to consider such notions in :numref:`sequences_and_convergence`. *Topology* is the abstract study of limits and continuity. Having covered the essentials of formalization in Chapters :numref:`%s <basics>` to :numref:`%s <structures>`, in this chapter, we will explain how topological notions are formalized in Mathlib. Not only do topological abstractions apply in much greater generality, but they also, somewhat paradoxically, make it easier to reason about limits and continuity in concrete instances. Topological notions build on quite a few layers of mathematical structure. The first layer is naive set theory, as described in :numref:`Chapter %s <sets_and_functions>`. The next layer is the theory of *filters*, which we will describe in :numref:`filters`. On top of that, we layer the theories of *topological spaces*, *metric spaces*, and a slightly more exotic intermediate notion called a *uniform space*. Whereas previous chapters relied on mathematical notions that were likely familiar to you, the notion of a filter is less well known, even to many working mathematicians. The notion is essential, however, for formalizing mathematics effectively. Let us explain why. Let ``f : ℝ → ℝ`` be any function. We can consider the limit of ``f x`` as ``x`` approaches some value ``x₀``, but we can also consider the limit of ``f x`` as ``x`` approaches infinity or negative infinity. We can moreover consider the limit of ``f x`` as ``x`` approaches ``x₀`` from the right, conventionally written ``x₀⁺``, or from the left, written ``x₀⁻``. There are variations where ``x`` approaches ``x₀`` or ``x₀⁺`` or ``x₀⁻`` but is not allowed to take on the value ``x₀`` itself. This results in at least eight ways that ``x`` can approach something. We can also restrict to rational values of ``x`` or place other constraints on the domain, but let's stick to those 8 cases. We have a similar variety of options on the codomain: we can specify that ``f x`` approaches a value from the left or right, or that it approaches positive or negative infinity, and so on. For example, we may wish to say that ``f x`` tends to ``+∞`` when ``x`` tends to ``x₀`` from the right without being equal to ``x₀``. This results in 64 different kinds of limit statements, and we haven't even begun to deal with limits of sequences, as we did in :numref:`sequences_and_convergence`. The problem is compounded even further when it comes to the supporting lemmas. For instance, limits compose: if ``f x`` tends to ``y₀`` when ``x`` tends to ``x₀`` and ``g y`` tends to ``z₀`` when ``y`` tends to ``y₀`` then ``g ∘ f x`` tends to ``z₀`` when ``x`` tends to ``x₀``. There are three notions of "tends to" at play here, each of which can be instantiated in any of the eight ways described in the previous paragraph. This results in 512 lemmas, a lot to have to add to a library! Informally, mathematicians generally prove two or three of these and simply note that the rest can be proved "in the same way." Formalizing mathematics requires making the relevant notion of "sameness" fully explicit, and that is exactly what Bourbaki's theory of filters manages to do. .. include:: C11_Topology/S01_Filters.inc .. include:: C11_Topology/S02_Metric_Spaces.inc .. include:: C11_Topology/S03_Topological_Spaces.inc
mathematics_in_lean/html/_sources/C12_Differential_Calculus.rst.txt
.. _differential_calculus: .. index:: differential calculus Differential Calculus ===================== We now consider the formalization of notions from *analysis*, starting with differentiation in this chapter and turning integration and measure theory in the next. In :numref:`elementary_differential_calculus`, we stick with the setting of functions from the real numbers to the real numbers, which is familiar from any introductory calculus class. In :numref:`normed_spaces`, we then consider the notion of a derivative in a much broader setting. .. include:: C12_Differential_Calculus/S01_Elementary_Differential_Calculus.inc .. include:: C12_Differential_Calculus/S02_Differential_Calculus_in_Normed_Spaces.inc
mathematics_in_lean/html/_sources/C10_Linear_Algebra.rst.txt
.. _linear_algebra: Linear algebra ============== .. include:: C10_Linear_Algebra/S01_Vector_Spaces.inc .. include:: C10_Linear_Algebra/S02_Subspaces.inc .. include:: C10_Linear_Algebra/S03_Endomorphisms.inc .. include:: C10_Linear_Algebra/S04_Bases.inc
mathematics_in_lean/html/_sources/C13_Integration_and_Measure_Theory.rst.txt
.. _integration_and_measure_theory: .. index:: integration Integration and Measure Theory ============================== .. include:: C13_Integration_and_Measure_Theory/S01_Elementary_Integration.inc .. include:: C13_Integration_and_Measure_Theory/S02_Measure_Theory.inc .. include:: C13_Integration_and_Measure_Theory/S03_Integration.inc