path
stringlengths
11
71
content
stringlengths
75
124k
CategoryTheory\Sites\Over.lean
/- Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Sites.CoverLifting import Mathlib.CategoryTheory.Sites.CoverPreserving /-! Localization In this file, given a Grothendieck topology `J` on a category `C` and `X : C`, we construct a Grothendieck topology `J.over X` on the category `Over X`. In order to do this, we first construct a bijection `Sieve.overEquiv Y : Sieve Y ≃ Sieve Y.left` for all `Y : Over X`. Then, as it is stated in SGA 4 III 5.2.1, a sieve of `Y : Over X` is covering for `J.over X` if and only if the corresponding sieve of `Y.left` is covering for `J`. As a result, the forgetful functor `Over.forget X : Over X ⥤ X` is both cover-preserving and cover-lifting. -/ universe v' v u' u namespace CategoryTheory open Category variable {C : Type u} [Category.{v} C] namespace Sieve /-- The equivalence `Sieve Y ≃ Sieve Y.left` for all `Y : Over X`. -/ def overEquiv {X : C} (Y : Over X) : Sieve Y ≃ Sieve Y.left where toFun S := Sieve.functorPushforward (Over.forget X) S invFun S' := Sieve.functorPullback (Over.forget X) S' left_inv S := by ext Z g dsimp [Presieve.functorPullback, Presieve.functorPushforward] constructor · rintro ⟨W, a, b, h, w⟩ let c : Z ⟶ W := Over.homMk b (by rw [← Over.w g, w, assoc, Over.w a]) rw [show g = c ≫ a by ext; exact w] exact S.downward_closed h _ · intro h exact ⟨Z, g, 𝟙 _, h, by simp⟩ right_inv S := by ext Z g dsimp [Presieve.functorPullback, Presieve.functorPushforward] constructor · rintro ⟨W, a, b, h, rfl⟩ exact S.downward_closed h _ · intro h exact ⟨Over.mk ((g ≫ Y.hom)), Over.homMk g, 𝟙 _, h, by simp⟩ @[simp] lemma overEquiv_top {X : C} (Y : Over X) : overEquiv Y ⊤ = ⊤ := by ext Z g simp only [top_apply, iff_true] dsimp [overEquiv, Presieve.functorPushforward] exact ⟨Y, 𝟙 Y, g, by simp, by simp⟩ @[simp] lemma overEquiv_symm_top {X : C} (Y : Over X) : (overEquiv Y).symm ⊤ = ⊤ := (overEquiv Y).injective (by simp) lemma overEquiv_pullback {X : C} {Y₁ Y₂ : Over X} (f : Y₁ ⟶ Y₂) (S : Sieve Y₂) : overEquiv _ (S.pullback f) = (overEquiv _ S).pullback f.left := by ext Z g dsimp [overEquiv, Presieve.functorPushforward] constructor · rintro ⟨W, a, b, h, rfl⟩ exact ⟨W, a ≫ f, b, h, by simp⟩ · rintro ⟨W, a, b, h, w⟩ let T := Over.mk (b ≫ W.hom) let c : T ⟶ Y₁ := Over.homMk g (by dsimp [T]; rw [← Over.w a, ← reassoc_of% w, Over.w f]) let d : T ⟶ W := Over.homMk b refine ⟨T, c, 𝟙 Z, ?_, by simp [c]⟩ rw [show c ≫ f = d ≫ a by ext; exact w] exact S.downward_closed h _ @[simp] lemma overEquiv_symm_iff {X : C} {Y : Over X} (S : Sieve Y.left) {Z : Over X} (f : Z ⟶ Y) : (overEquiv Y).symm S f ↔ S f.left := by rfl lemma overEquiv_iff {X : C} {Y : Over X} (S : Sieve Y) {Z : C} (f : Z ⟶ Y.left) : overEquiv Y S f ↔ S (Over.homMk f : Over.mk (f ≫ Y.hom) ⟶ Y) := by obtain ⟨S, rfl⟩ := (overEquiv Y).symm.surjective S simp @[simp] lemma functorPushforward_over_map {X Y : C} (f : X ⟶ Y) (Z : Over X) (S : Sieve Z.left) : Sieve.functorPushforward (Over.map f) ((Sieve.overEquiv Z).symm S) = (Sieve.overEquiv ((Over.map f).obj Z)).symm S := by ext W g constructor · rintro ⟨T, a, b, ha, rfl⟩ exact S.downward_closed ha _ · intro hg exact ⟨Over.mk (g.left ≫ Z.hom), Over.homMk g.left, Over.homMk (𝟙 _) (by simpa using Over.w g), hg, by aesop_cat⟩ end Sieve variable (J : GrothendieckTopology C) namespace GrothendieckTopology /-- The Grothendieck topology on the category `Over X` for any `X : C` that is induced by a Grothendieck topology on `C`. -/ def over (X : C) : GrothendieckTopology (Over X) where sieves Y S := Sieve.overEquiv Y S ∈ J Y.left top_mem' Y := by change _ ∈ J Y.left simp pullback_stable' Y₁ Y₂ S₁ f h₁ := by change _ ∈ J _ at h₁ ⊢ rw [Sieve.overEquiv_pullback] exact J.pullback_stable _ h₁ transitive' Y S (hS : _ ∈ J _) R hR := J.transitive hS _ (fun Z f hf => by have hf' : _ ∈ J _ := hR ((Sieve.overEquiv_iff _ _).1 hf) rw [Sieve.overEquiv_pullback] at hf' exact hf') lemma mem_over_iff {X : C} {Y : Over X} (S : Sieve Y) : S ∈ (J.over X) Y ↔ Sieve.overEquiv _ S ∈ J Y.left := by rfl lemma overEquiv_symm_mem_over {X : C} (Y : Over X) (S : Sieve Y.left) (hS : S ∈ J Y.left) : (Sieve.overEquiv Y).symm S ∈ (J.over X) Y := by simpa only [mem_over_iff, Equiv.apply_symm_apply] using hS lemma over_forget_coverPreserving (X : C) : CoverPreserving (J.over X) J (Over.forget X) where cover_preserve hS := hS lemma over_forget_compatiblePreserving (X : C) : CompatiblePreserving J (Over.forget X) where compatible {F Z T x hx Y₁ Y₂ W f₁ f₂ g₁ g₂ hg₁ hg₂ h} := by let W' : Over X := Over.mk (f₁ ≫ Y₁.hom) let g₁' : W' ⟶ Y₁ := Over.homMk f₁ let g₂' : W' ⟶ Y₂ := Over.homMk f₂ (by simpa using h.symm =≫ Z.hom) exact hx g₁' g₂' hg₁ hg₂ (by ext; exact h) instance (X : C) : (Over.forget X).IsCocontinuous (J.over X) J where cover_lift hS := J.overEquiv_symm_mem_over _ _ hS instance (X : C) : (Over.forget X).IsContinuous (J.over X) J := Functor.isContinuous_of_coverPreserving (over_forget_compatiblePreserving J X) (over_forget_coverPreserving J X) /-- The pullback functor `Sheaf J A ⥤ Sheaf (J.over X) A` -/ abbrev overPullback (A : Type u') [Category.{v'} A] (X : C) : Sheaf J A ⥤ Sheaf (J.over X) A := (Over.forget X).sheafPushforwardContinuous _ _ _ lemma over_map_coverPreserving {X Y : C} (f : X ⟶ Y) : CoverPreserving (J.over X) (J.over Y) (Over.map f) where cover_preserve {U S} hS := by obtain ⟨S, rfl⟩ := (Sieve.overEquiv U).symm.surjective S rw [Sieve.functorPushforward_over_map] apply overEquiv_symm_mem_over simpa [mem_over_iff] using hS lemma over_map_compatiblePreserving {X Y : C} (f : X ⟶ Y) : CompatiblePreserving (J.over Y) (Over.map f) where compatible {F Z T x hx Y₁ Y₂ W f₁ f₂ g₁ g₂ hg₁ hg₂ h} := by let W' : Over X := Over.mk (f₁.left ≫ Y₁.hom) let g₁' : W' ⟶ Y₁ := Over.homMk f₁.left let g₂' : W' ⟶ Y₂ := Over.homMk f₂.left (by simpa using (Over.forget _).congr_map h.symm =≫ Z.hom) let e : (Over.map f).obj W' ≅ W := Over.isoMk (Iso.refl _) (by simpa [W'] using (Over.w f₁).symm) convert congr_arg (F.val.map e.inv.op) (hx g₁' g₂' hg₁ hg₂ (by ext; exact (Over.forget _).congr_map h)) using 1 all_goals dsimp [e, W', g₁', g₂'] rw [← FunctorToTypes.map_comp_apply] apply congr_fun congr 1 rw [← op_comp] congr 1 ext simp instance {X Y : C} (f : X ⟶ Y) : (Over.map f).IsContinuous (J.over X) (J.over Y) := Functor.isContinuous_of_coverPreserving (over_map_compatiblePreserving J f) (over_map_coverPreserving J f) /-- The pullback functor `Sheaf (J.over Y) A ⥤ Sheaf (J.over X) A` induced by a morphism `f : X ⟶ Y`. -/ abbrev overMapPullback (A : Type u') [Category.{v'} A] {X Y : C} (f : X ⟶ Y) : Sheaf (J.over Y) A ⥤ Sheaf (J.over X) A := (Over.map f).sheafPushforwardContinuous _ _ _ end GrothendieckTopology variable {J} /-- Given `F : Sheaf J A` and `X : C`, this is the pullback of `F` on `J.over X`. -/ abbrev Sheaf.over {A : Type u'} [Category.{v'} A] (F : Sheaf J A) (X : C) : Sheaf (J.over X) A := (J.overPullback A X).obj F end CategoryTheory
CategoryTheory\Sites\Plus.lean
/- Copyright (c) 2021 Adam Topaz. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Adam Topaz -/ import Mathlib.CategoryTheory.Sites.Sheaf /-! # The plus construction for presheaves. This file contains the construction of `P⁺`, for a presheaf `P : Cᵒᵖ ⥤ D` where `C` is endowed with a grothendieck topology `J`. See <https://stacks.math.columbia.edu/tag/00W1> for details. -/ namespace CategoryTheory.GrothendieckTopology open CategoryTheory open CategoryTheory.Limits open Opposite universe w v u variable {C : Type u} [Category.{v} C] (J : GrothendieckTopology C) variable {D : Type w} [Category.{max v u} D] noncomputable section variable [∀ (P : Cᵒᵖ ⥤ D) (X : C) (S : J.Cover X), HasMultiequalizer (S.index P)] variable (P : Cᵒᵖ ⥤ D) /-- The diagram whose colimit defines the values of `plus`. -/ @[simps] def diagram (X : C) : (J.Cover X)ᵒᵖ ⥤ D where obj S := multiequalizer (S.unop.index P) map {S T} f := Multiequalizer.lift _ _ (fun I => Multiequalizer.ι (S.unop.index P) (I.map f.unop)) (fun I => Multiequalizer.condition (S.unop.index P) (Cover.Relation.mk' (I.r.map f.unop))) /-- A helper definition used to define the morphisms for `plus`. -/ @[simps] def diagramPullback {X Y : C} (f : X ⟶ Y) : J.diagram P Y ⟶ (J.pullback f).op ⋙ J.diagram P X where app S := Multiequalizer.lift _ _ (fun I => Multiequalizer.ι (S.unop.index P) I.base) fun I => Multiequalizer.condition (S.unop.index P) (Cover.Relation.mk' I.r.base) naturality S T f := Multiequalizer.hom_ext _ _ _ (fun I => by dsimp; simp; rfl) /-- A natural transformation `P ⟶ Q` induces a natural transformation between diagrams whose colimits define the values of `plus`. -/ @[simps] def diagramNatTrans {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (X : C) : J.diagram P X ⟶ J.diagram Q X where app W := Multiequalizer.lift _ _ (fun i => Multiequalizer.ι _ _ ≫ η.app _) (fun i => by dsimp only erw [Category.assoc, Category.assoc, ← η.naturality, ← η.naturality, Multiequalizer.condition_assoc] rfl) @[simp] theorem diagramNatTrans_id (X : C) (P : Cᵒᵖ ⥤ D) : J.diagramNatTrans (𝟙 P) X = 𝟙 (J.diagram P X) := by ext : 2 refine Multiequalizer.hom_ext _ _ _ (fun i => ?_) dsimp simp only [limit.lift_π, Multifork.ofι_pt, Multifork.ofι_π_app, Category.id_comp] erw [Category.comp_id] @[simp] theorem diagramNatTrans_zero [Preadditive D] (X : C) (P Q : Cᵒᵖ ⥤ D) : J.diagramNatTrans (0 : P ⟶ Q) X = 0 := by ext : 2 refine Multiequalizer.hom_ext _ _ _ (fun i => ?_) dsimp rw [zero_comp, Multiequalizer.lift_ι, comp_zero] @[simp] theorem diagramNatTrans_comp {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) (X : C) : J.diagramNatTrans (η ≫ γ) X = J.diagramNatTrans η X ≫ J.diagramNatTrans γ X := by ext : 2 refine Multiequalizer.hom_ext _ _ _ (fun i => ?_) dsimp simp variable (D) /-- `J.diagram P`, as a functor in `P`. -/ @[simps] def diagramFunctor (X : C) : (Cᵒᵖ ⥤ D) ⥤ (J.Cover X)ᵒᵖ ⥤ D where obj P := J.diagram P X map η := J.diagramNatTrans η X variable {D} variable [∀ X : C, HasColimitsOfShape (J.Cover X)ᵒᵖ D] /-- The plus construction, associating a presheaf to any presheaf. See `plusFunctor` below for a functorial version. -/ def plusObj : Cᵒᵖ ⥤ D where obj X := colimit (J.diagram P X.unop) map f := colimMap (J.diagramPullback P f.unop) ≫ colimit.pre _ _ map_id := by intro X refine colimit.hom_ext (fun S => ?_) dsimp simp only [diagramPullback_app, colimit.ι_pre, ι_colimMap_assoc, Category.comp_id] let e := S.unop.pullbackId dsimp only [Functor.op, pullback_obj] erw [← colimit.w _ e.inv.op, ← Category.assoc] convert Category.id_comp (colimit.ι (diagram J P (unop X)) S) refine Multiequalizer.hom_ext _ _ _ (fun I => ?_) dsimp simp only [Multiequalizer.lift_ι, Category.id_comp, Category.assoc] dsimp [Cover.Arrow.map, Cover.Arrow.base] cases I congr simp map_comp := by intro X Y Z f g refine colimit.hom_ext (fun S => ?_) dsimp simp only [diagramPullback_app, colimit.ι_pre_assoc, colimit.ι_pre, ι_colimMap_assoc, Category.assoc] let e := S.unop.pullbackComp g.unop f.unop dsimp only [Functor.op, pullback_obj] erw [← colimit.w _ e.inv.op, ← Category.assoc, ← Category.assoc] congr 1 refine Multiequalizer.hom_ext _ _ _ (fun I => ?_) dsimp simp only [Multiequalizer.lift_ι, Category.assoc] cases I dsimp only [Cover.Arrow.base, Cover.Arrow.map] congr 2 simp /-- An auxiliary definition used in `plus` below. -/ def plusMap {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : J.plusObj P ⟶ J.plusObj Q where app X := colimMap (J.diagramNatTrans η X.unop) naturality := by intro X Y f dsimp [plusObj] ext simp only [diagramPullback_app, ι_colimMap, colimit.ι_pre_assoc, colimit.ι_pre, ι_colimMap_assoc, Category.assoc] simp_rw [← Category.assoc] congr 1 exact Multiequalizer.hom_ext _ _ _ (fun I => by dsimp; simp) @[simp] theorem plusMap_id (P : Cᵒᵖ ⥤ D) : J.plusMap (𝟙 P) = 𝟙 _ := by ext : 2 dsimp only [plusMap, plusObj] rw [J.diagramNatTrans_id, NatTrans.id_app] ext dsimp simp @[simp] theorem plusMap_zero [Preadditive D] (P Q : Cᵒᵖ ⥤ D) : J.plusMap (0 : P ⟶ Q) = 0 := by ext : 2 refine colimit.hom_ext (fun S => ?_) erw [comp_zero, colimit.ι_map, J.diagramNatTrans_zero, zero_comp] @[simp, reassoc] theorem plusMap_comp {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) : J.plusMap (η ≫ γ) = J.plusMap η ≫ J.plusMap γ := by ext : 2 refine colimit.hom_ext (fun S => ?_) simp [plusMap, J.diagramNatTrans_comp] variable (D) /-- The plus construction, a functor sending `P` to `J.plusObj P`. -/ @[simps] def plusFunctor : (Cᵒᵖ ⥤ D) ⥤ Cᵒᵖ ⥤ D where obj P := J.plusObj P map η := J.plusMap η variable {D} /-- The canonical map from `P` to `J.plusObj P`. See `toPlusNatTrans` for a functorial version. -/ def toPlus : P ⟶ J.plusObj P where app X := Cover.toMultiequalizer (⊤ : J.Cover X.unop) P ≫ colimit.ι (J.diagram P X.unop) (op ⊤) naturality := by intro X Y f dsimp [plusObj] delta Cover.toMultiequalizer simp only [diagramPullback_app, colimit.ι_pre, ι_colimMap_assoc, Category.assoc] dsimp only [Functor.op, unop_op] let e : (J.pullback f.unop).obj ⊤ ⟶ ⊤ := homOfLE (OrderTop.le_top _) rw [← colimit.w _ e.op, ← Category.assoc, ← Category.assoc, ← Category.assoc] congr 1 refine Multiequalizer.hom_ext _ _ _ (fun I => ?_) simp only [Multiequalizer.lift_ι, Category.assoc] dsimp [Cover.Arrow.base] simp @[reassoc (attr := simp)] theorem toPlus_naturality {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : η ≫ J.toPlus Q = J.toPlus _ ≫ J.plusMap η := by ext dsimp [toPlus, plusMap] delta Cover.toMultiequalizer simp only [ι_colimMap, Category.assoc] simp_rw [← Category.assoc] congr 1 exact Multiequalizer.hom_ext _ _ _ (fun I => by dsimp; simp) variable (D) /-- The natural transformation from the identity functor to `plus`. -/ @[simps] def toPlusNatTrans : 𝟭 (Cᵒᵖ ⥤ D) ⟶ J.plusFunctor D where app P := J.toPlus P variable {D} /-- `(P ⟶ P⁺)⁺ = P⁺ ⟶ P⁺⁺` -/ @[simp] theorem plusMap_toPlus : J.plusMap (J.toPlus P) = J.toPlus (J.plusObj P) := by ext X : 2 refine colimit.hom_ext (fun S => ?_) dsimp only [plusMap, toPlus] let e : S.unop ⟶ ⊤ := homOfLE (OrderTop.le_top _) rw [ι_colimMap, ← colimit.w _ e.op, ← Category.assoc, ← Category.assoc] congr 1 refine Multiequalizer.hom_ext _ _ _ (fun I => ?_) erw [Multiequalizer.lift_ι] simp only [unop_op, op_unop, diagram_map, Category.assoc, limit.lift_π, Multifork.ofι_π_app] let ee : (J.pullback (I.map e).f).obj S.unop ⟶ ⊤ := homOfLE (OrderTop.le_top _) erw [← colimit.w _ ee.op, ι_colimMap_assoc, colimit.ι_pre, diagramPullback_app, ← Category.assoc, ← Category.assoc] congr 1 refine Multiequalizer.hom_ext _ _ _ (fun II => ?_) convert Multiequalizer.condition (S.unop.index P) (Cover.Relation.mk I II.base { g₁ := II.f, g₂ := 𝟙 _ }) using 1 all_goals dsimp; simp theorem isIso_toPlus_of_isSheaf (hP : Presheaf.IsSheaf J P) : IsIso (J.toPlus P) := by rw [Presheaf.isSheaf_iff_multiequalizer] at hP suffices ∀ X, IsIso ((J.toPlus P).app X) from NatIso.isIso_of_isIso_app _ intro X suffices IsIso (colimit.ι (J.diagram P X.unop) (op ⊤)) from IsIso.comp_isIso suffices ∀ (S T : (J.Cover X.unop)ᵒᵖ) (f : S ⟶ T), IsIso ((J.diagram P X.unop).map f) from isIso_ι_of_isInitial (initialOpOfTerminal isTerminalTop) _ intro S T e have : S.unop.toMultiequalizer P ≫ (J.diagram P X.unop).map e = T.unop.toMultiequalizer P := Multiequalizer.hom_ext _ _ _ (fun II => by dsimp; simp) have : (J.diagram P X.unop).map e = inv (S.unop.toMultiequalizer P) ≫ T.unop.toMultiequalizer P := by simp [← this] rw [this] infer_instance /-- The natural isomorphism between `P` and `P⁺` when `P` is a sheaf. -/ def isoToPlus (hP : Presheaf.IsSheaf J P) : P ≅ J.plusObj P := letI := isIso_toPlus_of_isSheaf J P hP asIso (J.toPlus P) @[simp] theorem isoToPlus_hom (hP : Presheaf.IsSheaf J P) : (J.isoToPlus P hP).hom = J.toPlus P := rfl /-- Lift a morphism `P ⟶ Q` to `P⁺ ⟶ Q` when `Q` is a sheaf. -/ def plusLift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) : J.plusObj P ⟶ Q := J.plusMap η ≫ (J.isoToPlus Q hQ).inv @[reassoc (attr := simp)] theorem toPlus_plusLift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) : J.toPlus P ≫ J.plusLift η hQ = η := by dsimp [plusLift] rw [← Category.assoc] rw [Iso.comp_inv_eq] dsimp only [isoToPlus, asIso] rw [toPlus_naturality] theorem plusLift_unique {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) (γ : J.plusObj P ⟶ Q) (hγ : J.toPlus P ≫ γ = η) : γ = J.plusLift η hQ := by dsimp only [plusLift] rw [Iso.eq_comp_inv, ← hγ, plusMap_comp] simp theorem plus_hom_ext {P Q : Cᵒᵖ ⥤ D} (η γ : J.plusObj P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) (h : J.toPlus P ≫ η = J.toPlus P ≫ γ) : η = γ := by have : γ = J.plusLift (J.toPlus P ≫ γ) hQ := by apply plusLift_unique rfl rw [this] apply plusLift_unique exact h @[simp] theorem isoToPlus_inv (hP : Presheaf.IsSheaf J P) : (J.isoToPlus P hP).inv = J.plusLift (𝟙 _) hP := by apply J.plusLift_unique rw [Iso.comp_inv_eq, Category.id_comp] rfl @[simp] theorem plusMap_plusLift {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) (hR : Presheaf.IsSheaf J R) : J.plusMap η ≫ J.plusLift γ hR = J.plusLift (η ≫ γ) hR := by apply J.plusLift_unique rw [← Category.assoc, ← J.toPlus_naturality, Category.assoc, J.toPlus_plusLift] instance plusFunctor_preservesZeroMorphisms [Preadditive D] : (plusFunctor J D).PreservesZeroMorphisms where map_zero F G := by ext dsimp rw [J.plusMap_zero, NatTrans.app_zero] end end CategoryTheory.GrothendieckTopology
CategoryTheory\Sites\Preserves.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Limits.Preserves.Shapes.Products import Mathlib.CategoryTheory.Limits.Shapes.Pullback.CommSq import Mathlib.CategoryTheory.Sites.EqualizerSheafCondition /-! # Sheaves preserve products We prove that a presheaf which satisfies the sheaf condition with respect to certain presieves preserve "the corresponding products". ## Main results More precisely, given a presheaf `F : Cᵒᵖ ⥤ Type*`, we have: * If `F` satisfies the sheaf condition with respect to the empty sieve on the initial object of `C`, then `F` preserves terminal objects. See `preservesTerminalOfIsSheafForEmpty`. * If `F` furthermore satisfies the sheaf condition with respect to the presieve consisting of the inclusion arrows in a coproduct in `C`, then `F` preserves the corresponding product. See `preservesProductOfIsSheafFor`. * If `F` preserves a product, then it satisfies the sheaf condition with respect to the corresponding presieve of arrows. See `isSheafFor_of_preservesProduct`. -/ universe v u w namespace CategoryTheory.Presieve variable {C : Type u} [Category.{v} C] {I : C} (F : Cᵒᵖ ⥤ Type w) open Limits Opposite variable (hF : (ofArrows (X := I) Empty.elim instIsEmptyEmpty.elim).IsSheafFor F) section Terminal variable (I) in /-- If `F` is a presheaf which satisfies the sheaf condition with respect to the empty presieve on any object, then `F` takes that object to the terminal object. -/ noncomputable def isTerminal_of_isSheafFor_empty_presieve : IsTerminal (F.obj (op I)) := by refine @IsTerminal.ofUnique _ _ _ fun Y ↦ ?_ choose t h using hF (by tauto) (by tauto) exact ⟨⟨fun _ ↦ t⟩, fun a ↦ by ext; exact h.2 _ (by tauto)⟩ /-- If `F` is a presheaf which satisfies the sheaf condition with respect to the empty presieve on the initial object, then `F` preserves terminal objects. -/ noncomputable def preservesTerminalOfIsSheafForEmpty (hI : IsInitial I) : PreservesLimit (Functor.empty Cᵒᵖ) F := have := hI.hasInitial (preservesTerminalOfIso F ((F.mapIso (terminalIsoIsTerminal (terminalOpOfInitial initialIsInitial)) ≪≫ (F.mapIso (initialIsoIsInitial hI).symm.op) ≪≫ (terminalIsoIsTerminal (isTerminal_of_isSheafFor_empty_presieve I F hF)).symm))) end Terminal section Product variable (hI : IsInitial I) -- This is the data of a particular disjoint coproduct in `C`. variable {α : Type} {X : α → C} (c : Cofan X) (hc : IsColimit c) [(ofArrows X c.inj).hasPullbacks] [HasInitial C] [∀ i, Mono (c.inj i)] (hd : Pairwise fun i j => IsPullback (initial.to _) (initial.to _) (c.inj i) (c.inj j)) /-- The two parallel maps in the equalizer diagram for the sheaf condition corresponding to the inclusion maps in a disjoint coproduct are equal. -/ theorem firstMap_eq_secondMap : Equalizer.Presieve.Arrows.firstMap F X c.inj = Equalizer.Presieve.Arrows.secondMap F X c.inj := by ext a ⟨i, j⟩ simp only [Equalizer.Presieve.Arrows.firstMap, Types.pi_lift_π_apply, types_comp_apply, Equalizer.Presieve.Arrows.secondMap] by_cases hi : i = j · rw [hi, Mono.right_cancellation _ _ pullback.condition] · have := preservesTerminalOfIsSheafForEmpty F hF hI apply_fun (F.mapIso ((hd hi).isoPullback).op ≪≫ F.mapIso (terminalIsoIsTerminal (terminalOpOfInitial initialIsInitial)).symm ≪≫ (PreservesTerminal.iso F)).hom using injective_of_mono _ ext ⟨i⟩ exact i.elim theorem piComparison_fac : have : HasCoproduct X := ⟨⟨c, hc⟩⟩ piComparison F (fun x ↦ op (X x)) = F.map (opCoproductIsoProduct' hc (productIsProduct _)).inv ≫ Equalizer.Presieve.Arrows.forkMap F X c.inj := by have : HasCoproduct X := ⟨⟨c, hc⟩⟩ dsimp only [Equalizer.Presieve.Arrows.forkMap] have h : Pi.lift (fun i ↦ F.map (c.inj i).op) = F.map (Pi.lift (fun i ↦ (c.inj i).op)) ≫ piComparison F _ := by simp rw [h, ← Category.assoc, ← Functor.map_comp] have hh : Pi.lift (fun i ↦ (c.inj i).op) = (productIsProduct (op <| X ·)).lift c.op := by simp [Pi.lift, productIsProduct] rw [hh, ← desc_op_comp_opCoproductIsoProduct'_hom hc] simp /-- If `F` is a presheaf which `IsSheafFor` a presieve of arrows and the empty presieve, then it preserves the product corresponding to the presieve of arrows. -/ noncomputable def preservesProductOfIsSheafFor (hF' : (ofArrows X c.inj).IsSheafFor F) : PreservesLimit (Discrete.functor (fun x ↦ op (X x))) F := by have : HasCoproduct X := ⟨⟨c, hc⟩⟩ refine @PreservesProduct.ofIsoComparison _ _ _ _ F _ (fun x ↦ op (X x)) _ _ ?_ rw [piComparison_fac (hc := hc)] refine @IsIso.comp_isIso _ _ _ _ _ _ _ inferInstance ?_ rw [isIso_iff_bijective, Function.bijective_iff_existsUnique] rw [Equalizer.Presieve.Arrows.sheaf_condition, Limits.Types.type_equalizer_iff_unique] at hF' exact fun b ↦ hF' b (congr_fun (firstMap_eq_secondMap F hF hI c hd) b) /-- If `F` preserves a particular product, then it `IsSheafFor` the corresponging presieve of arrows. -/ theorem isSheafFor_of_preservesProduct [PreservesLimit (Discrete.functor (fun x ↦ op (X x))) F] : (ofArrows X c.inj).IsSheafFor F := by rw [Equalizer.Presieve.Arrows.sheaf_condition, Limits.Types.type_equalizer_iff_unique] have : HasCoproduct X := ⟨⟨c, hc⟩⟩ have hi : IsIso (piComparison F (fun x ↦ op (X x))) := inferInstance rw [piComparison_fac (hc := hc), isIso_iff_bijective, Function.bijective_iff_existsUnique] at hi intro b _ obtain ⟨t, ht₁, ht₂⟩ := hi b refine ⟨F.map ((opCoproductIsoProduct' hc (productIsProduct _)).inv) t, ht₁, fun y hy ↦ ?_⟩ apply_fun F.map ((opCoproductIsoProduct' hc (productIsProduct _)).hom) using injective_of_mono _ simp only [← FunctorToTypes.map_comp_apply, Iso.op, Category.assoc] rw [ht₂ (F.map ((opCoproductIsoProduct' hc (productIsProduct _)).hom) y) (by simp [← hy])] change (𝟙 (F.obj (∏ᶜ fun x ↦ op (X x)))) t = _ rw [← Functor.map_id] refine congrFun ?_ t congr simp [Iso.eq_inv_comp, ← Category.assoc, ← op_comp, eq_comm, ← Iso.eq_comp_inv] theorem isSheafFor_iff_preservesProduct : (ofArrows X c.inj).IsSheafFor F ↔ Nonempty (PreservesLimit (Discrete.functor (fun x ↦ op (X x))) F) := by refine ⟨fun hF' ↦ ⟨preservesProductOfIsSheafFor _ hF hI c hc hd hF'⟩, fun hF' ↦ ?_⟩ let _ := hF'.some exact isSheafFor_of_preservesProduct F c hc end Product end CategoryTheory.Presieve
CategoryTheory\Sites\PreservesLocallyBijective.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.DenseSubsite import Mathlib.CategoryTheory.Sites.LocallySurjective /-! # Preserving and reflecting local injectivity and surjectivity This file proves that precomposition with a cocontinuous functor preserves local injectivity and surjectivity of morphisms of presheaves, and that precomposition with a cover preserving and cover dense functor reflects the same properties. -/ open CategoryTheory Functor variable {C D A : Type*} [Category C] [Category D] [Category A] (J : GrothendieckTopology C) (K : GrothendieckTopology D) (H : C ⥤ D) {F G : Dᵒᵖ ⥤ A} (f : F ⟶ G) namespace CategoryTheory namespace Presheaf variable [ConcreteCategory A] lemma isLocallyInjective_whisker [H.IsCocontinuous J K] [IsLocallyInjective K f] : IsLocallyInjective J (whiskerLeft H.op f) where equalizerSieve_mem x y h := H.cover_lift J K (equalizerSieve_mem K f x y h) lemma isLocallyInjective_of_whisker (hH : CoverPreserving J K H) [H.IsCoverDense K] [IsLocallyInjective J (whiskerLeft H.op f)] : IsLocallyInjective K f where equalizerSieve_mem {X} a b h := by apply K.transitive (H.is_cover_of_isCoverDense K X.unop) intro Y g ⟨⟨Z, lift, map, fac⟩⟩ rw [← fac, Sieve.pullback_comp] apply K.pullback_stable refine K.superset_covering (Sieve.functorPullback_pushforward_le H _) ?_ refine K.superset_covering (Sieve.functorPushforward_monotone H _ ?_) (hH.cover_preserve <| equalizerSieve_mem J (whiskerLeft H.op f) ((forget A).map (F.map map.op) a) ((forget A).map (F.map map.op) b) ?_) · intro W q hq simpa using hq · simp only [comp_obj, op_obj, whiskerLeft_app, Opposite.op_unop] erw [NatTrans.naturality_apply, NatTrans.naturality_apply, h] lemma isLocallyInjective_whisker_iff (hH : CoverPreserving J K H) [H.IsCocontinuous J K] [H.IsCoverDense K] : IsLocallyInjective J (whiskerLeft H.op f) ↔ IsLocallyInjective K f := ⟨fun _ ↦ isLocallyInjective_of_whisker J K H f hH, fun _ ↦ isLocallyInjective_whisker J K H f⟩ lemma isLocallySurjective_whisker [H.IsCocontinuous J K] [IsLocallySurjective K f] : IsLocallySurjective J (whiskerLeft H.op f) where imageSieve_mem a := H.cover_lift J K (imageSieve_mem K f a) lemma isLocallySurjective_of_whisker (hH : CoverPreserving J K H) [H.IsCoverDense K] [IsLocallySurjective J (whiskerLeft H.op f)] : IsLocallySurjective K f where imageSieve_mem {X} a := by apply K.transitive (H.is_cover_of_isCoverDense K X) intro Y g ⟨⟨Z, lift, map, fac⟩⟩ rw [← fac, Sieve.pullback_comp] apply K.pullback_stable have hh := hH.cover_preserve <| imageSieve_mem J (whiskerLeft H.op f) ((forget A).map (G.map map.op) a) refine K.superset_covering (Sieve.functorPullback_pushforward_le H _) ?_ refine K.superset_covering (Sieve.functorPushforward_monotone H _ ?_) hh intro W q ⟨x, h⟩ simp only [Sieve.functorPullback_apply, Presieve.functorPullback_mem, Sieve.pullback_apply] exact ⟨x, by simpa using h⟩ lemma isLocallySurjective_whisker_iff (hH : CoverPreserving J K H) [H.IsCocontinuous J K] [H.IsCoverDense K] : IsLocallySurjective J (whiskerLeft H.op f) ↔ IsLocallySurjective K f := ⟨fun _ ↦ isLocallySurjective_of_whisker J K H f hH, fun _ ↦ isLocallySurjective_whisker J K H f⟩ end Presheaf end CategoryTheory
CategoryTheory\Sites\PreservesSheafification.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Sites.Localization import Mathlib.CategoryTheory.Sites.CompatibleSheafification import Mathlib.CategoryTheory.Sites.Whiskering import Mathlib.CategoryTheory.Sites.Sheafification /-! # Functors which preserves sheafification In this file, given a Grothendieck topology `J` on `C` and `F : A ⥤ B`, we define a type class `J.PreservesSheafification F`. We say that `F` preserves the sheafification if whenever a morphism of presheaves `P₁ ⟶ P₂` induces an isomorphism on the associated sheaves, then the induced map `P₁ ⋙ F ⟶ P₂ ⋙ F` also induces an isomorphism on the associated sheaves. (Note: it suffices to check this property for the map from any presheaf `P` to its associated sheaf, see `GrothendieckTopology.preservesSheafification_iff_of_adjunctions`). In general, we define `Sheaf.composeAndSheafify J F : Sheaf J A ⥤ Sheaf J B` as the functor which sends a sheaf `G` to the sheafification of the composition `G.val ⋙ F`. It `J.PreservesSheafification F`, we show that this functor can also be thought as the localization of the functor `_ ⋙ F` on presheaves: we construct an isomorphism `presheafToSheafCompComposeAndSheafifyIso` between `presheafToSheaf J A ⋙ Sheaf.composeAndSheafify J F` and `(whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B`. Moreover, if we assume `J.HasSheafCompose F`, we obtain an isomorphism `sheafifyComposeIso J F P : sheafify J (P ⋙ F) ≅ sheafify J P ⋙ F`. We show that under suitable assumptions, the forget functor from a concrete category preserves sheafification; this holds more generally for functors between such concrete categories which commute both with suitable limits and colimits. ## TODO * construct an isomorphism `Sheaf.composeAndSheafify J F ≅ sheafCompose J F` -/ universe v u namespace CategoryTheory open CategoryTheory Category Limits variable {C : Type u} [Category.{v} C] (J : GrothendieckTopology C) {A B : Type*} [Category A] [Category B] (F : A ⥤ B) namespace GrothendieckTopology /-- A functor `F : A ⥤ B` preserves the sheafification for the Grothendieck topology `J` on a category `C` if whenever a morphism of presheaves `f : P₁ ⟶ P₂` in `Cᵒᵖ ⥤ A` is such that becomes an iso after sheafification, then it is also the case of `whiskerRight f F : P₁ ⋙ F ⟶ P₂ ⋙ F`. -/ class PreservesSheafification : Prop where le : J.W ≤ J.W.inverseImage ((whiskeringRight Cᵒᵖ A B).obj F) variable [PreservesSheafification J F] lemma W_of_preservesSheafification {P₁ P₂ : Cᵒᵖ ⥤ A} (f : P₁ ⟶ P₂) (hf : J.W f) : J.W (whiskerRight f F) := PreservesSheafification.le _ hf variable [HasWeakSheafify J B] lemma W_isInvertedBy_whiskeringRight_presheafToSheaf : J.W.IsInvertedBy (((whiskeringRight Cᵒᵖ A B).obj F) ⋙ presheafToSheaf J B) := by intro P₁ P₂ f hf dsimp rw [← W_iff] exact J.W_of_preservesSheafification F _ hf end GrothendieckTopology section variable [HasWeakSheafify J B] /-- This is the functor sending a sheaf `X : Sheaf J A` to the sheafification of `X.val ⋙ F`. -/ noncomputable abbrev Sheaf.composeAndSheafify : Sheaf J A ⥤ Sheaf J B := sheafToPresheaf J A ⋙ (whiskeringRight _ _ _).obj F ⋙ presheafToSheaf J B variable [HasWeakSheafify J A] /-- The canonical natural transformation from `(whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B` to `presheafToSheaf J A ⋙ Sheaf.composeAndSheafify J F`. -/ @[simps!] noncomputable def toPresheafToSheafCompComposeAndSheafify : (whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B ⟶ presheafToSheaf J A ⋙ Sheaf.composeAndSheafify J F := whiskerRight (sheafificationAdjunction J A).unit ((whiskeringRight _ _ _).obj F ⋙ presheafToSheaf J B) variable [J.PreservesSheafification F] instance : IsIso (toPresheafToSheafCompComposeAndSheafify J F) := by have : J.PreservesSheafification F := inferInstance rw [NatTrans.isIso_iff_isIso_app] intro X dsimp simpa only [← J.W_iff] using J.W_of_preservesSheafification F _ (J.W_toSheafify X) /-- The canonical isomorphism between `presheafToSheaf J A ⋙ Sheaf.composeAndSheafify J F` and `(whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B` when `F : A ⥤ B` preserves sheafification. -/ @[simps! inv_app] noncomputable def presheafToSheafCompComposeAndSheafifyIso : presheafToSheaf J A ⋙ Sheaf.composeAndSheafify J F ≅ (whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B := (asIso (toPresheafToSheafCompComposeAndSheafify J F)).symm noncomputable instance : Localization.Lifting (presheafToSheaf J A) J.W ((whiskeringRight Cᵒᵖ A B).obj F ⋙ presheafToSheaf J B) (Sheaf.composeAndSheafify J F) := ⟨presheafToSheafCompComposeAndSheafifyIso J F⟩ end section variable {G₁ : (Cᵒᵖ ⥤ A) ⥤ Sheaf J A} (adj₁ : G₁ ⊣ sheafToPresheaf J A) {G₂ : (Cᵒᵖ ⥤ B) ⥤ Sheaf J B} lemma GrothendieckTopology.preservesSheafification_iff_of_adjunctions (adj₂ : G₂ ⊣ sheafToPresheaf J B) : J.PreservesSheafification F ↔ ∀ (P : Cᵒᵖ ⥤ A), IsIso (G₂.map (whiskerRight (adj₁.unit.app P) F)) := by simp only [← J.W_iff_isIso_map_of_adjunction adj₂] constructor · intro _ P apply W_of_preservesSheafification rw [J.W_iff_isIso_map_of_adjunction adj₁] infer_instance · intro h constructor intro P₁ P₂ f hf rw [J.W_iff_isIso_map_of_adjunction adj₁] at hf dsimp [MorphismProperty.inverseImage] rw [← MorphismProperty.postcomp_iff _ _ _ (h P₂), ← whiskerRight_comp] erw [adj₁.unit.naturality f] dsimp only [Functor.comp_map] rw [whiskerRight_comp, MorphismProperty.precomp_iff _ _ _ (h P₁)] apply Localization.LeftBousfield.W_of_isIso section HasSheafCompose variable (adj₂ : G₂ ⊣ sheafToPresheaf J B) [J.HasSheafCompose F] /-- The canonical natural transformation `(whiskeringRight Cᵒᵖ A B).obj F ⋙ G₂ ⟶ G₁ ⋙ sheafCompose J F` when `F : A ⥤ B` is such that `J.HasSheafCompose F`, and that `G₁` and `G₂` are left adjoints to the forget functors `sheafToPresheaf`. -/ def sheafComposeNatTrans : (whiskeringRight Cᵒᵖ A B).obj F ⋙ G₂ ⟶ G₁ ⋙ sheafCompose J F where app P := (adj₂.homEquiv _ _).symm (whiskerRight (adj₁.unit.app P) F) naturality {P Q} f := by dsimp erw [← adj₂.homEquiv_naturality_left_symm, ← adj₂.homEquiv_naturality_right_symm] dsimp rw [← whiskerRight_comp, ← whiskerRight_comp] erw [adj₁.unit.naturality f] rfl lemma sheafComposeNatTrans_fac (P : Cᵒᵖ ⥤ A) : adj₂.unit.app (P ⋙ F) ≫ (sheafToPresheaf J B).map ((sheafComposeNatTrans J F adj₁ adj₂).app P) = whiskerRight (adj₁.unit.app P) F := by dsimp only [sheafComposeNatTrans] erw [Adjunction.homEquiv_counit, Adjunction.unit_naturality_assoc, adj₂.right_triangle_components, comp_id] lemma sheafComposeNatTrans_app_uniq (P : Cᵒᵖ ⥤ A) (α : G₂.obj (P ⋙ F) ⟶ (sheafCompose J F).obj (G₁.obj P)) (hα : adj₂.unit.app (P ⋙ F) ≫ (sheafToPresheaf J B).map α = whiskerRight (adj₁.unit.app P) F) : α = (sheafComposeNatTrans J F adj₁ adj₂).app P := by apply (adj₂.homEquiv _ _).injective dsimp [sheafComposeNatTrans] erw [Equiv.apply_symm_apply] rw [← hα] apply adj₂.homEquiv_unit lemma GrothendieckTopology.preservesSheafification_iff_of_adjunctions_of_hasSheafCompose : J.PreservesSheafification F ↔ IsIso (sheafComposeNatTrans J F adj₁ adj₂) := by rw [J.preservesSheafification_iff_of_adjunctions F adj₁ adj₂, NatTrans.isIso_iff_isIso_app] apply forall_congr' intro P rw [← J.W_iff_isIso_map_of_adjunction adj₂, ← J.W_sheafToPreheaf_map_iff_isIso, ← sheafComposeNatTrans_fac J F adj₁ adj₂, MorphismProperty.precomp_iff _ _ _ (J.W_adj_unit_app adj₂ (P ⋙ F))] variable [J.PreservesSheafification F] instance : IsIso (sheafComposeNatTrans J F adj₁ adj₂) := by rw [← J.preservesSheafification_iff_of_adjunctions_of_hasSheafCompose] infer_instance /-- The canonical natural isomorphism `(whiskeringRight Cᵒᵖ A B).obj F ⋙ G₂ ≅ G₁ ⋙ sheafCompose J F` when `F : A ⥤ B` preserves sheafification, and that `G₁` and `G₂` are left adjoints to the forget functors `sheafToPresheaf`. -/ noncomputable def sheafComposeNatIso : (whiskeringRight Cᵒᵖ A B).obj F ⋙ G₂ ≅ G₁ ⋙ sheafCompose J F := asIso (sheafComposeNatTrans J F adj₁ adj₂) end HasSheafCompose end section HasSheafCompose variable [HasWeakSheafify J A] [HasWeakSheafify J B] [J.HasSheafCompose F] [J.PreservesSheafification F] (P : Cᵒᵖ ⥤ A) /-- The canonical isomorphism `sheafify J (P ⋙ F) ≅ sheafify J P ⋙ F` when `F` preserves the sheafification. -/ noncomputable def sheafifyComposeIso : sheafify J (P ⋙ F) ≅ sheafify J P ⋙ F := (sheafToPresheaf J B).mapIso ((sheafComposeNatIso J F (sheafificationAdjunction J A) (sheafificationAdjunction J B)).app P) @[reassoc (attr := simp)] lemma sheafComposeIso_hom_fac : toSheafify J (P ⋙ F) ≫ (sheafifyComposeIso J F P).hom = whiskerRight (toSheafify J P) F := sheafComposeNatTrans_fac J F (sheafificationAdjunction J A) (sheafificationAdjunction J B) P @[reassoc (attr := simp)] lemma sheafComposeIso_inv_fac : whiskerRight (toSheafify J P) F ≫ (sheafifyComposeIso J F P).inv = toSheafify J (P ⋙ F) := by rw [← sheafComposeIso_hom_fac, assoc, Iso.hom_inv_id, comp_id] end HasSheafCompose namespace GrothendieckTopology section variable {D E : Type*} [Category.{max v u} D] [Category.{max v u} E] (F : D ⥤ E) [∀ (α β : Type max v u) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D] [∀ (α β : Type max v u) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E] [∀ X : C, HasColimitsOfShape (J.Cover X)ᵒᵖ D] [∀ X : C, HasColimitsOfShape (J.Cover X)ᵒᵖ E] [∀ X : C, PreservesColimitsOfShape (J.Cover X)ᵒᵖ F] [∀ (X : C) (W : J.Cover X) (P : Cᵒᵖ ⥤ D), PreservesLimit (W.index P).multicospan F] [ConcreteCategory D] [ConcreteCategory E] [∀ X, PreservesColimitsOfShape (Cover J X)ᵒᵖ (forget D)] [∀ X, PreservesColimitsOfShape (Cover J X)ᵒᵖ (forget E)] [PreservesLimits (forget D)] [PreservesLimits (forget E)] [(forget D).ReflectsIsomorphisms] [(forget E).ReflectsIsomorphisms] lemma sheafToPresheaf_map_sheafComposeNatTrans_eq_sheafifyCompIso_inv (P : Cᵒᵖ ⥤ D) : (sheafToPresheaf J E).map ((sheafComposeNatTrans J F (plusPlusAdjunction J D) (plusPlusAdjunction J E)).app P) = (sheafifyCompIso J F P).inv := by suffices (sheafComposeNatTrans J F (plusPlusAdjunction J D) (plusPlusAdjunction J E)).app P = ⟨(sheafifyCompIso J F P).inv⟩ by rw [this] rfl apply ((plusPlusAdjunction J E).homEquiv _ _).injective convert sheafComposeNatTrans_fac J F (plusPlusAdjunction J D) (plusPlusAdjunction J E) P all_goals dsimp [plusPlusAdjunction] simp instance (P : Cᵒᵖ ⥤ D) : IsIso ((sheafComposeNatTrans J F (plusPlusAdjunction J D) (plusPlusAdjunction J E)).app P) := by rw [← isIso_iff_of_reflects_iso _ (sheafToPresheaf J E), sheafToPresheaf_map_sheafComposeNatTrans_eq_sheafifyCompIso_inv] infer_instance instance : IsIso (sheafComposeNatTrans J F (plusPlusAdjunction J D) (plusPlusAdjunction J E)) := NatIso.isIso_of_isIso_app _ instance : PreservesSheafification J F := by rw [preservesSheafification_iff_of_adjunctions_of_hasSheafCompose _ _ (plusPlusAdjunction J D) (plusPlusAdjunction J E)] infer_instance end example {D : Type*} [Category.{max v u} D] [ConcreteCategory.{max v u} D] [PreservesLimits (forget D)] [∀ X : C, HasColimitsOfShape (J.Cover X)ᵒᵖ D] [∀ X : C, PreservesColimitsOfShape (J.Cover X)ᵒᵖ (forget D)] [∀ (α β : Type max u v) (fst snd : β → α), Limits.HasLimitsOfShape (Limits.WalkingMulticospan fst snd) D] [(forget D).ReflectsIsomorphisms] : PreservesSheafification J (forget D) := inferInstance end GrothendieckTopology end CategoryTheory
CategoryTheory\Sites\Pretopology.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import Mathlib.CategoryTheory.Sites.Grothendieck /-! # Grothendieck pretopologies Definition and lemmas about Grothendieck pretopologies. A Grothendieck pretopology for a category `C` is a set of families of morphisms with fixed codomain, satisfying certain closure conditions. We show that a pretopology generates a genuine Grothendieck topology, and every topology has a maximal pretopology which generates it. The pretopology associated to a topological space is defined in `Spaces.lean`. ## Tags coverage, pretopology, site ## References * [nLab, *Grothendieck pretopology*](https://ncatlab.org/nlab/show/Grothendieck+pretopology) * [S. MacLane, I. Moerdijk, *Sheaves in Geometry and Logic*][MM92] * [Stacks, *00VG*](https://stacks.math.columbia.edu/tag/00VG) -/ universe v u noncomputable section namespace CategoryTheory open CategoryTheory Category Limits Presieve variable {C : Type u} [Category.{v} C] [HasPullbacks C] variable (C) /-- A (Grothendieck) pretopology on `C` consists of a collection of families of morphisms with a fixed target `X` for every object `X` in `C`, called "coverings" of `X`, which satisfies the following three axioms: 1. Every family consisting of a single isomorphism is a covering family. 2. The collection of covering families is stable under pullback. 3. Given a covering family, and a covering family on each domain of the former, the composition is a covering family. In some sense, a pretopology can be seen as Grothendieck topology with weaker saturation conditions, in that each covering is not necessarily downward closed. See: https://ncatlab.org/nlab/show/Grothendieck+pretopology, or https://stacks.math.columbia.edu/tag/00VH, or [MM92] Chapter III, Section 2, Definition 2. Note that Stacks calls a category together with a pretopology a site, and [MM92] calls this a basis for a topology. -/ @[ext] structure Pretopology where coverings : ∀ X : C, Set (Presieve X) has_isos : ∀ ⦃X Y⦄ (f : Y ⟶ X) [IsIso f], Presieve.singleton f ∈ coverings X pullbacks : ∀ ⦃X Y⦄ (f : Y ⟶ X) (S), S ∈ coverings X → pullbackArrows f S ∈ coverings Y transitive : ∀ ⦃X : C⦄ (S : Presieve X) (Ti : ∀ ⦃Y⦄ (f : Y ⟶ X), S f → Presieve Y), S ∈ coverings X → (∀ ⦃Y⦄ (f) (H : S f), Ti f H ∈ coverings Y) → S.bind Ti ∈ coverings X namespace Pretopology instance : CoeFun (Pretopology C) fun _ => ∀ X : C, Set (Presieve X) := ⟨coverings⟩ variable {C} instance LE : LE (Pretopology C) where le K₁ K₂ := (K₁ : ∀ X : C, Set (Presieve X)) ≤ K₂ theorem le_def {K₁ K₂ : Pretopology C} : K₁ ≤ K₂ ↔ (K₁ : ∀ X : C, Set (Presieve X)) ≤ K₂ := Iff.rfl variable (C) instance : PartialOrder (Pretopology C) := { Pretopology.LE with le_refl := fun K => le_def.mpr le_rfl le_trans := fun K₁ K₂ K₃ h₁₂ h₂₃ => le_def.mpr (le_trans h₁₂ h₂₃) le_antisymm := fun K₁ K₂ h₁₂ h₂₁ => Pretopology.ext (le_antisymm h₁₂ h₂₁) } instance : OrderTop (Pretopology C) where top := { coverings := fun _ => Set.univ has_isos := fun _ _ _ _ => Set.mem_univ _ pullbacks := fun _ _ _ _ _ => Set.mem_univ _ transitive := fun _ _ _ _ _ => Set.mem_univ _ } le_top _ _ _ _ := Set.mem_univ _ instance : Inhabited (Pretopology C) := ⟨⊤⟩ /-- A pretopology `K` can be completed to a Grothendieck topology `J` by declaring a sieve to be `J`-covering if it contains a family in `K`. See <https://stacks.math.columbia.edu/tag/00ZC>, or [MM92] Chapter III, Section 2, Equation (2). -/ def toGrothendieck (K : Pretopology C) : GrothendieckTopology C where sieves X S := ∃ R ∈ K X, R ≤ (S : Presieve _) top_mem' X := ⟨Presieve.singleton (𝟙 _), K.has_isos _, fun _ _ _ => ⟨⟩⟩ pullback_stable' X Y S g := by rintro ⟨R, hR, RS⟩ refine ⟨_, K.pullbacks g _ hR, ?_⟩ rw [← Sieve.generate_le_iff, Sieve.pullbackArrows_comm] apply Sieve.pullback_monotone rwa [Sieve.giGenerate.gc] transitive' := by rintro X S ⟨R', hR', RS⟩ R t choose t₁ t₂ t₃ using t refine ⟨_, K.transitive _ _ hR' fun _ f hf => t₂ (RS _ hf), ?_⟩ rintro Y _ ⟨Z, g, f, hg, hf, rfl⟩ apply t₃ (RS _ hg) _ hf theorem mem_toGrothendieck (K : Pretopology C) (X S) : S ∈ toGrothendieck C K X ↔ ∃ R ∈ K X, R ≤ (S : Presieve X) := Iff.rfl /-- The largest pretopology generating the given Grothendieck topology. See [MM92] Chapter III, Section 2, Equations (3,4). -/ def ofGrothendieck (J : GrothendieckTopology C) : Pretopology C where coverings X R := Sieve.generate R ∈ J X has_isos X Y f i := J.covering_of_eq_top (by simp) pullbacks X Y f R hR := by simp only [Set.mem_def, Sieve.pullbackArrows_comm] apply J.pullback_stable f hR transitive X S Ti hS hTi := by apply J.transitive hS intro Y f rintro ⟨Z, g, f, hf, rfl⟩ rw [Sieve.pullback_comp] apply J.pullback_stable g apply J.superset_covering _ (hTi _ hf) rintro Y g ⟨W, h, g, hg, rfl⟩ exact ⟨_, h, _, ⟨_, _, _, hf, hg, rfl⟩, by simp⟩ /-- We have a galois insertion from pretopologies to Grothendieck topologies. -/ def gi : GaloisInsertion (toGrothendieck C) (ofGrothendieck C) where gc K J := by constructor · intro h X R hR exact h _ ⟨_, hR, Sieve.le_generate R⟩ · rintro h X S ⟨R, hR, RS⟩ apply J.superset_covering _ (h _ hR) rwa [Sieve.giGenerate.gc] le_l_u J X S hS := ⟨S, J.superset_covering (Sieve.le_generate S.arrows) hS, le_rfl⟩ choice x _ := toGrothendieck C x choice_eq _ _ := rfl /-- The trivial pretopology, in which the coverings are exactly singleton isomorphisms. This topology is also known as the indiscrete, coarse, or chaotic topology. See <https://stacks.math.columbia.edu/tag/07GE> -/ def trivial : Pretopology C where coverings X S := ∃ (Y : _) (f : Y ⟶ X) (_ : IsIso f), S = Presieve.singleton f has_isos X Y f i := ⟨_, _, i, rfl⟩ pullbacks X Y f S := by rintro ⟨Z, g, i, rfl⟩ refine ⟨pullback g f, pullback.snd _ _, ?_, ?_⟩ · refine ⟨⟨pullback.lift (f ≫ inv g) (𝟙 _) (by simp), ⟨?_, by aesop_cat⟩⟩⟩ ext · rw [assoc, pullback.lift_fst, ← pullback.condition_assoc] simp · simp · apply pullback_singleton transitive := by rintro X S Ti ⟨Z, g, i, rfl⟩ hS rcases hS g (singleton_self g) with ⟨Y, f, i, hTi⟩ refine ⟨_, f ≫ g, ?_, ?_⟩ · infer_instance -- Porting note: the next four lines were just "ext (W k)" apply funext rintro W apply Set.ext rintro k constructor · rintro ⟨V, h, k, ⟨_⟩, hh, rfl⟩ rw [hTi] at hh cases hh apply singleton.mk · rintro ⟨_⟩ refine bind_comp g singleton.mk ?_ rw [hTi] apply singleton.mk instance : OrderBot (Pretopology C) where bot := trivial C bot_le K X R := by rintro ⟨Y, f, hf, rfl⟩ exact K.has_isos f /-- The trivial pretopology induces the trivial grothendieck topology. -/ theorem toGrothendieck_bot : toGrothendieck C ⊥ = ⊥ := (gi C).gc.l_bot end Pretopology end CategoryTheory
CategoryTheory\Sites\Pullback.lean
/- Copyright (c) 2021 Andrew Yang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Andrew Yang -/ import Mathlib.CategoryTheory.Adjunction.Restrict import Mathlib.CategoryTheory.Functor.Flat import Mathlib.CategoryTheory.Sites.Continuous import Mathlib.CategoryTheory.Sites.LeftExact /-! # Pullback of sheaves ## Main definitions * `CategoryTheory.Functor.sheafPullback`: the functor `Sheaf J A ⥤ Sheaf K A` obtained as an extension of a functor `G : C ⥤ D` between the underlying categories. * `CategoryTheory.Functor.sheafAdjunctionContinuous`: the adjunction `G.sheafPullback A J K ⊣ G.sheafPushforwardContinuous A J K` when the functor `G` is continuous. In case `G` is representably flat, the pullback functor on sheaves commutes with finite limits: this is a morphism of sites in the sense of SGA 4 IV 4.9. -/ universe v₁ u₁ noncomputable section open CategoryTheory.Limits namespace CategoryTheory variable {C : Type v₁} [SmallCategory C] {D : Type v₁} [SmallCategory D] (G : C ⥤ D) variable (A : Type u₁) [Category.{v₁} A] variable (J : GrothendieckTopology C) (K : GrothendieckTopology D) -- Porting note: there was an explicit call to -- CategoryTheory.Sheaf.CategoryTheory.SheafToPresheaf.CategoryTheory.createsLimits.{u₁, v₁, v₁} -- but it is not necessary (it was not either in mathlib) instance [HasLimits A] : CreatesLimits (sheafToPresheaf J A) := inferInstance -- The assumptions so that we have sheafification variable [ConcreteCategory.{v₁} A] [PreservesLimits (forget A)] [HasColimits A] [HasLimits A] variable [PreservesFilteredColimits (forget A)] [(forget A).ReflectsIsomorphisms] attribute [local instance] reflectsLimitsOfReflectsIsomorphisms instance {X : C} : IsCofiltered (J.Cover X) := inferInstance /-- The pullback functor `Sheaf J A ⥤ Sheaf K A` associated to a functor `G : C ⥤ D` in the same direction as `G`. -/ @[simps!] def Functor.sheafPullback : Sheaf J A ⥤ Sheaf K A := sheafToPresheaf J A ⋙ G.op.lan ⋙ presheafToSheaf K A instance [RepresentablyFlat G] : PreservesFiniteLimits (G.sheafPullback A J K) := by have : PreservesFiniteLimits (G.op.lan ⋙ presheafToSheaf K A) := compPreservesFiniteLimits _ _ apply compPreservesFiniteLimits /-- The pullback functor is left adjoint to the pushforward functor. -/ def Functor.sheafAdjunctionContinuous [Functor.IsContinuous.{v₁} G J K] : G.sheafPullback A J K ⊣ G.sheafPushforwardContinuous A J K := ((G.op.lanAdjunction A).comp (sheafificationAdjunction K A)).restrictFullyFaithful (fullyFaithfulSheafToPresheaf J A) (Functor.FullyFaithful.id _) (Iso.refl _) (Iso.refl _) end CategoryTheory
CategoryTheory\Sites\Sheaf.lean
/- Copyright (c) 2020 Kevin Buzzard, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kevin Buzzard, Bhavik Mehta -/ import Mathlib.CategoryTheory.Limits.Preserves.Shapes.Equalizers import Mathlib.CategoryTheory.Limits.Preserves.Shapes.Products import Mathlib.CategoryTheory.Limits.Yoneda import Mathlib.CategoryTheory.Preadditive.FunctorCategory import Mathlib.CategoryTheory.Sites.SheafOfTypes import Mathlib.CategoryTheory.Sites.EqualizerSheafCondition import Mathlib.CategoryTheory.Limits.Constructions.EpiMono /-! # Sheaves taking values in a category If C is a category with a Grothendieck topology, we define the notion of a sheaf taking values in an arbitrary category `A`. We follow the definition in https://stacks.math.columbia.edu/tag/00VR, noting that the presheaf of sets "defined above" can be seen in the comments between tags 00VQ and 00VR on the page <https://stacks.math.columbia.edu/tag/00VL>. The advantage of this definition is that we need no assumptions whatsoever on `A` other than the assumption that the morphisms in `C` and `A` live in the same universe. * An `A`-valued presheaf `P : Cᵒᵖ ⥤ A` is defined to be a sheaf (for the topology `J`) iff for every `E : A`, the type-valued presheaves of sets given by sending `U : Cᵒᵖ` to `Hom_{A}(E, P U)` are all sheaves of sets, see `CategoryTheory.Presheaf.IsSheaf`. * When `A = Type`, this recovers the basic definition of sheaves of sets, see `CategoryTheory.isSheaf_iff_isSheaf_of_type`. * A alternate definition in terms of limits, unconditionally equivalent to the original one: see `CategoryTheory.Presheaf.isSheaf_iff_isLimit`. * An alternate definition when `C` is small, has pullbacks and `A` has products is given by an equalizer condition `CategoryTheory.Presheaf.IsSheaf'`. This is equivalent to the earlier definition, shown in `CategoryTheory.Presheaf.isSheaf_iff_isSheaf'`. * When `A = Type`, this is *definitionally* equal to the equalizer condition for presieves in `CategoryTheory.Sites.SheafOfTypes`. * When `A` has limits and there is a functor `s : A ⥤ Type` which is faithful, reflects isomorphisms and preserves limits, then `P : Cᵒᵖ ⥤ A` is a sheaf iff the underlying presheaf of types `P ⋙ s : Cᵒᵖ ⥤ Type` is a sheaf (`CategoryTheory.Presheaf.isSheaf_iff_isSheaf_forget`). Cf https://stacks.math.columbia.edu/tag/0073, which is a weaker version of this statement (it's only over spaces, not sites) and https://stacks.math.columbia.edu/tag/00YR (a), which additionally assumes filtered colimits. ## Implementation notes Occasionally we need to take a limit in `A` of a collection of morphisms of `C` indexed by a collection of objects in `C`. This turns out to force the morphisms of `A` to be in a sufficiently large universe. Rather than use `UnivLE` we prove some results for a category `A'` instead, whose morphism universe of `A'` is defined to be `max u₁ v₁`, where `u₁, v₁` are the universes for `C`. Perhaps after we get better at handling universe inequalities this can be changed. -/ universe w v₁ v₂ v₃ u₁ u₂ u₃ noncomputable section namespace CategoryTheory open Opposite CategoryTheory Category Limits Sieve namespace Presheaf variable {C : Type u₁} [Category.{v₁} C] variable {A : Type u₂} [Category.{v₂} A] variable (J : GrothendieckTopology C) -- We follow https://stacks.math.columbia.edu/tag/00VL definition 00VR /-- A sheaf of A is a presheaf P : Cᵒᵖ => A such that for every E : A, the presheaf of types given by sending U : C to Hom_{A}(E, P U) is a sheaf of types. https://stacks.math.columbia.edu/tag/00VR -/ def IsSheaf (P : Cᵒᵖ ⥤ A) : Prop := ∀ E : A, Presieve.IsSheaf J (P ⋙ coyoneda.obj (op E)) attribute [local instance] ConcreteCategory.hasCoeToSort ConcreteCategory.instFunLike in /-- Condition that a presheaf with values in a concrete category is separated for a Grothendieck topology. -/ def IsSeparated (P : Cᵒᵖ ⥤ A) [ConcreteCategory A] : Prop := ∀ (X : C) (S : Sieve X) (_ : S ∈ J X) (x y : P.obj (op X)), (∀ (Y : C) (f : Y ⟶ X) (_ : S f), P.map f.op x = P.map f.op y) → x = y section LimitSheafCondition open Presieve Presieve.FamilyOfElements Limits variable (P : Cᵒᵖ ⥤ A) {X : C} (S : Sieve X) (R : Presieve X) (E : Aᵒᵖ) /-- Given a sieve `S` on `X : C`, a presheaf `P : Cᵒᵖ ⥤ A`, and an object `E` of `A`, the cones over the natural diagram `S.arrows.diagram.op ⋙ P` associated to `S` and `P` with cone point `E` are in 1-1 correspondence with sieve_compatible family of elements for the sieve `S` and the presheaf of types `Hom (E, P -)`. -/ @[simps] def conesEquivSieveCompatibleFamily : (S.arrows.diagram.op ⋙ P).cones.obj E ≃ { x : FamilyOfElements (P ⋙ coyoneda.obj E) (S : Presieve X) // x.SieveCompatible } where toFun π := ⟨fun Y f h => π.app (op ⟨Over.mk f, h⟩), fun X Y f g hf => by apply (id_comp _).symm.trans dsimp exact π.naturality (Quiver.Hom.op (Over.homMk _ (by rfl)))⟩ invFun x := { app := fun f => x.1 f.unop.1.hom f.unop.2 naturality := fun f f' g => by refine Eq.trans ?_ (x.2 f.unop.1.hom g.unop.left f.unop.2) dsimp rw [id_comp] convert rfl rw [Over.w] } left_inv π := rfl right_inv x := rfl -- These lemmas have always been bad (#7657), but leanprover/lean4#2644 made `simp` start noticing attribute [nolint simpNF] CategoryTheory.Presheaf.conesEquivSieveCompatibleFamily_apply_coe CategoryTheory.Presheaf.conesEquivSieveCompatibleFamily_symm_apply_app variable {P S E} variable {x : FamilyOfElements (P ⋙ coyoneda.obj E) S.arrows} (hx : SieveCompatible x) /-- The cone corresponding to a sieve_compatible family of elements, dot notation enabled. -/ @[simp] def _root_.CategoryTheory.Presieve.FamilyOfElements.SieveCompatible.cone : Cone (S.arrows.diagram.op ⋙ P) where pt := E.unop π := (conesEquivSieveCompatibleFamily P S E).invFun ⟨x, hx⟩ /-- Cone morphisms from the cone corresponding to a sieve_compatible family to the natural cone associated to a sieve `S` and a presheaf `P` are in 1-1 correspondence with amalgamations of the family. -/ def homEquivAmalgamation : (hx.cone ⟶ P.mapCone S.arrows.cocone.op) ≃ { t // x.IsAmalgamation t } where toFun l := ⟨l.hom, fun _ f hf => l.w (op ⟨Over.mk f, hf⟩)⟩ invFun t := ⟨t.1, fun f => t.2 f.unop.1.hom f.unop.2⟩ left_inv _ := rfl right_inv _ := rfl variable (P S) /-- Given sieve `S` and presheaf `P : Cᵒᵖ ⥤ A`, their natural associated cone is a limit cone iff `Hom (E, P -)` is a sheaf of types for the sieve `S` and all `E : A`. -/ theorem isLimit_iff_isSheafFor : Nonempty (IsLimit (P.mapCone S.arrows.cocone.op)) ↔ ∀ E : Aᵒᵖ, IsSheafFor (P ⋙ coyoneda.obj E) S.arrows := by dsimp [IsSheafFor]; simp_rw [compatible_iff_sieveCompatible] rw [((Cone.isLimitEquivIsTerminal _).trans (isTerminalEquivUnique _ _)).nonempty_congr] rw [Classical.nonempty_pi]; constructor · intro hu E x hx specialize hu hx.cone erw [(homEquivAmalgamation hx).uniqueCongr.nonempty_congr] at hu exact (unique_subtype_iff_exists_unique _).1 hu · rintro h ⟨E, π⟩ let eqv := conesEquivSieveCompatibleFamily P S (op E) rw [← eqv.left_inv π] erw [(homEquivAmalgamation (eqv π).2).uniqueCongr.nonempty_congr] rw [unique_subtype_iff_exists_unique] exact h _ _ (eqv π).2 /-- Given sieve `S` and presheaf `P : Cᵒᵖ ⥤ A`, their natural associated cone admits at most one morphism from every cone in the same category (i.e. over the same diagram), iff `Hom (E, P -)`is separated for the sieve `S` and all `E : A`. -/ theorem subsingleton_iff_isSeparatedFor : (∀ c, Subsingleton (c ⟶ P.mapCone S.arrows.cocone.op)) ↔ ∀ E : Aᵒᵖ, IsSeparatedFor (P ⋙ coyoneda.obj E) S.arrows := by constructor · intro hs E x t₁ t₂ h₁ h₂ have hx := is_compatible_of_exists_amalgamation x ⟨t₁, h₁⟩ rw [compatible_iff_sieveCompatible] at hx specialize hs hx.cone rcases hs with ⟨hs⟩ simpa only [Subtype.mk.injEq] using (show Subtype.mk t₁ h₁ = ⟨t₂, h₂⟩ from (homEquivAmalgamation hx).symm.injective (hs _ _)) · rintro h ⟨E, π⟩ let eqv := conesEquivSieveCompatibleFamily P S (op E) constructor rw [← eqv.left_inv π] intro f₁ f₂ let eqv' := homEquivAmalgamation (eqv π).2 apply eqv'.injective ext apply h _ (eqv π).1 <;> exact (eqv' _).2 /-- A presheaf `P` is a sheaf for the Grothendieck topology `J` iff for every covering sieve `S` of `J`, the natural cone associated to `P` and `S` is a limit cone. -/ theorem isSheaf_iff_isLimit : IsSheaf J P ↔ ∀ ⦃X : C⦄ (S : Sieve X), S ∈ J X → Nonempty (IsLimit (P.mapCone S.arrows.cocone.op)) := ⟨fun h _ S hS => (isLimit_iff_isSheafFor P S).2 fun E => h E.unop S hS, fun h E _ S hS => (isLimit_iff_isSheafFor P S).1 (h S hS) (op E)⟩ /-- A presheaf `P` is separated for the Grothendieck topology `J` iff for every covering sieve `S` of `J`, the natural cone associated to `P` and `S` admits at most one morphism from every cone in the same category. -/ theorem isSeparated_iff_subsingleton : (∀ E : A, Presieve.IsSeparated J (P ⋙ coyoneda.obj (op E))) ↔ ∀ ⦃X : C⦄ (S : Sieve X), S ∈ J X → ∀ c, Subsingleton (c ⟶ P.mapCone S.arrows.cocone.op) := ⟨fun h _ S hS => (subsingleton_iff_isSeparatedFor P S).2 fun E => h E.unop S hS, fun h E _ S hS => (subsingleton_iff_isSeparatedFor P S).1 (h S hS) (op E)⟩ /-- Given presieve `R` and presheaf `P : Cᵒᵖ ⥤ A`, the natural cone associated to `P` and the sieve `Sieve.generate R` generated by `R` is a limit cone iff `Hom (E, P -)` is a sheaf of types for the presieve `R` and all `E : A`. -/ theorem isLimit_iff_isSheafFor_presieve : Nonempty (IsLimit (P.mapCone (generate R).arrows.cocone.op)) ↔ ∀ E : Aᵒᵖ, IsSheafFor (P ⋙ coyoneda.obj E) R := (isLimit_iff_isSheafFor P _).trans (forall_congr' fun _ => (isSheafFor_iff_generate _).symm) /-- A presheaf `P` is a sheaf for the Grothendieck topology generated by a pretopology `K` iff for every covering presieve `R` of `K`, the natural cone associated to `P` and `Sieve.generate R` is a limit cone. -/ theorem isSheaf_iff_isLimit_pretopology [HasPullbacks C] (K : Pretopology C) : IsSheaf (K.toGrothendieck C) P ↔ ∀ ⦃X : C⦄ (R : Presieve X), R ∈ K X → Nonempty (IsLimit (P.mapCone (generate R).arrows.cocone.op)) := by dsimp [IsSheaf] simp_rw [isSheaf_pretopology] exact ⟨fun h X R hR => (isLimit_iff_isSheafFor_presieve P R).2 fun E => h E.unop R hR, fun h E X R hR => (isLimit_iff_isSheafFor_presieve P R).1 (h R hR) (op E)⟩ end LimitSheafCondition variable {J} /-- This is a wrapper around `Presieve.IsSheafFor.amalgamate` to be used below. If `P`s a sheaf, `S` is a cover of `X`, and `x` is a collection of morphisms from `E` to `P` evaluated at terms in the cover which are compatible, then we can amalgamate the `x`s to obtain a single morphism `E ⟶ P.obj (op X)`. -/ def IsSheaf.amalgamate {A : Type u₂} [Category.{v₂} A] {E : A} {X : C} {P : Cᵒᵖ ⥤ A} (hP : Presheaf.IsSheaf J P) (S : J.Cover X) (x : ∀ I : S.Arrow, E ⟶ P.obj (op I.Y)) (hx : ∀ ⦃I₁ I₂ : S.Arrow⦄ (r : I₁.Relation I₂), x I₁ ≫ P.map r.g₁.op = x I₂ ≫ P.map r.g₂.op) : E ⟶ P.obj (op X) := (hP _ _ S.condition).amalgamate (fun Y f hf => x ⟨Y, f, hf⟩) fun _ _ _ _ _ _ _ h₁ h₂ w => @hx { hf := h₁ } { hf := h₂ } { w := w } @[reassoc (attr := simp)] theorem IsSheaf.amalgamate_map {A : Type u₂} [Category.{v₂} A] {E : A} {X : C} {P : Cᵒᵖ ⥤ A} (hP : Presheaf.IsSheaf J P) (S : J.Cover X) (x : ∀ I : S.Arrow, E ⟶ P.obj (op I.Y)) (hx : ∀ ⦃I₁ I₂ : S.Arrow⦄ (r : I₁.Relation I₂), x I₁ ≫ P.map r.g₁.op = x I₂ ≫ P.map r.g₂.op) (I : S.Arrow) : hP.amalgamate S x hx ≫ P.map I.f.op = x _ := by apply (hP _ _ S.condition).valid_glue theorem IsSheaf.hom_ext {A : Type u₂} [Category.{v₂} A] {E : A} {X : C} {P : Cᵒᵖ ⥤ A} (hP : Presheaf.IsSheaf J P) (S : J.Cover X) (e₁ e₂ : E ⟶ P.obj (op X)) (h : ∀ I : S.Arrow, e₁ ≫ P.map I.f.op = e₂ ≫ P.map I.f.op) : e₁ = e₂ := (hP _ _ S.condition).isSeparatedFor.ext fun Y f hf => h ⟨Y, f, hf⟩ lemma IsSheaf.hom_ext_ofArrows {P : Cᵒᵖ ⥤ A} (hP : Presheaf.IsSheaf J P) {I : Type*} {S : C} {X : I → C} (f : ∀ i, X i ⟶ S) (hf : Sieve.ofArrows _ f ∈ J S) {E : A} {x y : E ⟶ P.obj (op S)} (h : ∀ i, x ≫ P.map (f i).op = y ≫ P.map (f i).op) : x = y := by apply hP.hom_ext ⟨_, hf⟩ rintro ⟨Z, _, _, g, _, ⟨i⟩, rfl⟩ dsimp rw [P.map_comp, reassoc_of% (h i)] section variable {P : Cᵒᵖ ⥤ A} (hP : Presheaf.IsSheaf J P) {I : Type*} {S : C} {X : I → C} (f : ∀ i, X i ⟶ S) (hf : Sieve.ofArrows _ f ∈ J S) {E : A} (x : ∀ i, E ⟶ P.obj (op (X i))) (hx : ∀ ⦃W : C⦄ ⦃i j : I⦄ (a : W ⟶ X i) (b : W ⟶ X j), a ≫ f i = b ≫ f j → x i ≫ P.map a.op = x j ≫ P.map b.op) lemma IsSheaf.exists_unique_amalgamation_ofArrows : ∃! (g : E ⟶ P.obj (op S)), ∀ (i : I), g ≫ P.map (f i).op = x i := (Presieve.isSheafFor_arrows_iff _ _).1 ((Presieve.isSheafFor_iff_generate _).2 (hP E _ hf)) x (fun _ _ _ _ _ w => hx _ _ w) /-- If `P : Cᵒᵖ ⥤ A` is a sheaf and `f i : X i ⟶ S` is a covering family, then a morphism `E ⟶ P.obj (op S)` can be constructed from a compatible family of morphisms `x : E ⟶ P.obj (op (X i))`. -/ def IsSheaf.amalgamateOfArrows : E ⟶ P.obj (op S) := (hP.exists_unique_amalgamation_ofArrows f hf x hx).choose @[reassoc (attr := simp)] lemma IsSheaf.amalgamateOfArrows_map (i : I) : hP.amalgamateOfArrows f hf x hx ≫ P.map (f i).op = x i := (hP.exists_unique_amalgamation_ofArrows f hf x hx).choose_spec.1 i end theorem isSheaf_of_iso_iff {P P' : Cᵒᵖ ⥤ A} (e : P ≅ P') : IsSheaf J P ↔ IsSheaf J P' := forall_congr' fun _ => ⟨Presieve.isSheaf_iso J (isoWhiskerRight e _), Presieve.isSheaf_iso J (isoWhiskerRight e.symm _)⟩ variable (J) theorem isSheaf_of_isTerminal {X : A} (hX : IsTerminal X) : Presheaf.IsSheaf J ((CategoryTheory.Functor.const _).obj X) := fun _ _ _ _ _ _ => ⟨hX.from _, fun _ _ _ => hX.hom_ext _ _, fun _ _ => hX.hom_ext _ _⟩ end Presheaf variable {C : Type u₁} [Category.{v₁} C] variable (J : GrothendieckTopology C) variable (A : Type u₂) [Category.{v₂} A] /-- The category of sheaves taking values in `A` on a grothendieck topology. -/ structure Sheaf where /-- the underlying presheaf -/ val : Cᵒᵖ ⥤ A /-- the condition that the presheaf is a sheaf -/ cond : Presheaf.IsSheaf J val namespace Sheaf variable {J A} /-- Morphisms between sheaves are just morphisms of presheaves. -/ @[ext] structure Hom (X Y : Sheaf J A) where /-- a morphism between the underlying presheaves -/ val : X.val ⟶ Y.val @[simps id_val comp_val] instance instCategorySheaf : Category (Sheaf J A) where Hom := Hom id _ := ⟨𝟙 _⟩ comp f g := ⟨f.val ≫ g.val⟩ id_comp _ := Hom.ext <| id_comp _ comp_id _ := Hom.ext <| comp_id _ assoc _ _ _ := Hom.ext <| assoc _ _ _ -- Let's make the inhabited linter happy.../sips instance (X : Sheaf J A) : Inhabited (Hom X X) := ⟨𝟙 X⟩ -- Porting note: added because `Sheaf.Hom.ext` was not triggered automatically @[ext] lemma hom_ext {X Y : Sheaf J A} (x y : X ⟶ Y) (h : x.val = y.val) : x = y := Sheaf.Hom.ext h end Sheaf /-- The inclusion functor from sheaves to presheaves. -/ @[simps] def sheafToPresheaf : Sheaf J A ⥤ Cᵒᵖ ⥤ A where obj := Sheaf.val map f := f.val map_id _ := rfl map_comp _ _ := rfl /-- The sections of a sheaf (i.e. evaluation as a presheaf on `C`). -/ abbrev sheafSections : Cᵒᵖ ⥤ Sheaf J A ⥤ A := (sheafToPresheaf J A).flip /-- The functor `Sheaf J A ⥤ Cᵒᵖ ⥤ A` is fully faithful. -/ @[simps] def fullyFaithfulSheafToPresheaf : (sheafToPresheaf J A).FullyFaithful where preimage f := ⟨f⟩ variable {J A} in /-- The bijection `(X ⟶ Y) ≃ (X.val ⟶ Y.val)` when `X` and `Y` are sheaves. -/ abbrev Sheaf.homEquiv {X Y : Sheaf J A} : (X ⟶ Y) ≃ (X.val ⟶ Y.val) := (fullyFaithfulSheafToPresheaf J A).homEquiv instance : (sheafToPresheaf J A).Full := (fullyFaithfulSheafToPresheaf J A).full instance : (sheafToPresheaf J A).Faithful := (fullyFaithfulSheafToPresheaf J A).faithful instance : (sheafToPresheaf J A).ReflectsIsomorphisms := (fullyFaithfulSheafToPresheaf J A).reflectsIsomorphisms /-- This is stated as a lemma to prevent class search from forming a loop since a sheaf morphism is monic if and only if it is monic as a presheaf morphism (under suitable assumption). -/ theorem Sheaf.Hom.mono_of_presheaf_mono {F G : Sheaf J A} (f : F ⟶ G) [h : Mono f.1] : Mono f := (sheafToPresheaf J A).mono_of_mono_map h instance Sheaf.Hom.epi_of_presheaf_epi {F G : Sheaf J A} (f : F ⟶ G) [h : Epi f.1] : Epi f := (sheafToPresheaf J A).epi_of_epi_map h /-- The sheaf of sections guaranteed by the sheaf condition. -/ @[simps] def sheafOver {A : Type u₂} [Category.{v₂} A] {J : GrothendieckTopology C} (ℱ : Sheaf J A) (E : A) : SheafOfTypes J := ⟨ℱ.val ⋙ coyoneda.obj (op E), ℱ.cond E⟩ theorem isSheaf_iff_isSheaf_of_type (P : Cᵒᵖ ⥤ Type w) : Presheaf.IsSheaf J P ↔ Presieve.IsSheaf J P := by constructor · intro hP refine Presieve.isSheaf_iso J ?_ (hP PUnit) exact isoWhiskerLeft _ Coyoneda.punitIso ≪≫ P.rightUnitor · intro hP X Y S hS z hz refine ⟨fun x => (hP S hS).amalgamate (fun Z f hf => z f hf x) ?_, ?_, ?_⟩ · intro Y₁ Y₂ Z g₁ g₂ f₁ f₂ hf₁ hf₂ h exact congr_fun (hz g₁ g₂ hf₁ hf₂ h) x · intro Z f hf funext x apply Presieve.IsSheafFor.valid_glue · intro y hy funext x apply (hP S hS).isSeparatedFor.ext intro Y' f hf rw [Presieve.IsSheafFor.valid_glue _ _ _ hf, ← hy _ hf] rfl variable {J} in lemma Presheaf.IsSheaf.isSheafFor {P : Cᵒᵖ ⥤ Type w} (hP : Presheaf.IsSheaf J P) {X : C} (S : Sieve X) (hS : S ∈ J X) : Presieve.IsSheafFor P S.arrows := by rw [isSheaf_iff_isSheaf_of_type] at hP exact hP S hS /-- The category of sheaves taking values in Type is the same as the category of set-valued sheaves. -/ @[simps] def sheafEquivSheafOfTypes : Sheaf J (Type w) ≌ SheafOfTypes J where functor := { obj := fun S => ⟨S.val, (isSheaf_iff_isSheaf_of_type _ _).1 S.2⟩ map := fun f => ⟨f.val⟩ } inverse := { obj := fun S => ⟨S.val, (isSheaf_iff_isSheaf_of_type _ _).2 S.2⟩ map := fun f => ⟨f.val⟩ } unitIso := NatIso.ofComponents fun X => Iso.refl _ counitIso := NatIso.ofComponents fun X => Iso.refl _ instance : Inhabited (Sheaf (⊥ : GrothendieckTopology C) (Type w)) := ⟨(sheafEquivSheafOfTypes _).inverse.obj default⟩ variable {J} {A} /-- If the empty sieve is a cover of `X`, then `F(X)` is terminal. -/ def Sheaf.isTerminalOfBotCover (F : Sheaf J A) (X : C) (H : ⊥ ∈ J X) : IsTerminal (F.1.obj (op X)) := by refine @IsTerminal.ofUnique _ _ _ ?_ intro Y choose t h using F.2 Y _ H (by tauto) (by tauto) exact ⟨⟨t⟩, fun a => h.2 a (by tauto)⟩ section Preadditive open Preadditive variable [Preadditive A] {P Q : Sheaf J A} instance sheafHomHasZSMul : SMul ℤ (P ⟶ Q) where smul n f := Sheaf.Hom.mk { app := fun U => n • f.1.app U naturality := fun U V i => by induction' n using Int.induction_on with n ih n ih · simp only [zero_smul, comp_zero, zero_comp] · simpa only [add_zsmul, one_zsmul, comp_add, NatTrans.naturality, add_comp, add_left_inj] · simpa only [sub_smul, one_zsmul, comp_sub, NatTrans.naturality, sub_comp, sub_left_inj] using ih } instance : Sub (P ⟶ Q) where sub f g := Sheaf.Hom.mk <| f.1 - g.1 instance : Neg (P ⟶ Q) where neg f := Sheaf.Hom.mk <| -f.1 instance sheafHomHasNSMul : SMul ℕ (P ⟶ Q) where smul n f := Sheaf.Hom.mk { app := fun U => n • f.1.app U naturality := fun U V i => by induction' n with n ih · simp only [zero_smul, comp_zero, zero_comp, Nat.zero_eq] · simp only [Nat.succ_eq_add_one, add_smul, ih, one_nsmul, comp_add, NatTrans.naturality, add_comp] } instance : Zero (P ⟶ Q) where zero := Sheaf.Hom.mk 0 instance : Add (P ⟶ Q) where add f g := Sheaf.Hom.mk <| f.1 + g.1 @[simp] theorem Sheaf.Hom.add_app (f g : P ⟶ Q) (U) : (f + g).1.app U = f.1.app U + g.1.app U := rfl instance Sheaf.Hom.addCommGroup : AddCommGroup (P ⟶ Q) := Function.Injective.addCommGroup (fun f : Sheaf.Hom P Q => f.1) (fun _ _ h => Sheaf.Hom.ext h) rfl (fun _ _ => rfl) (fun _ => rfl) (fun _ _ => rfl) (fun _ _ => by aesop_cat) (fun _ _ => by aesop_cat) instance : Preadditive (Sheaf J A) where homGroup P Q := Sheaf.Hom.addCommGroup end Preadditive end CategoryTheory namespace CategoryTheory open Opposite CategoryTheory Category Limits Sieve namespace Presheaf -- Under here is the equalizer story, which is equivalent if A has products (and doesn't -- make sense otherwise). It's described in https://stacks.math.columbia.edu/tag/00VL, -- between 00VQ and 00VR. variable {C : Type u₁} [Category.{v₁} C] -- `A` is a general category; `A'` is a variant where the morphisms live in a large enough -- universe to guarantee that we can take limits in A of things coming from C. -- I would have liked to use something like `UnivLE.{max v₁ u₁, v₂}` as a hypothesis on -- `A`'s morphism universe rather than introducing `A'` but I can't get it to work. -- So, for now, results which need max v₁ u₁ ≤ v₂ are just stated for `A'` and `P' : Cᵒᵖ ⥤ A'` -- instead. variable {A : Type u₂} [Category.{v₂} A] variable {A' : Type u₂} [Category.{max v₁ u₁} A'] variable {B : Type u₃} [Category.{v₃} B] variable (J : GrothendieckTopology C) variable {U : C} (R : Presieve U) variable (P : Cᵒᵖ ⥤ A) (P' : Cᵒᵖ ⥤ A') section MultiequalizerConditions /-- When `P` is a sheaf and `S` is a cover, the associated multifork is a limit. -/ def isLimitOfIsSheaf {X : C} (S : J.Cover X) (hP : IsSheaf J P) : IsLimit (S.multifork P) where lift := fun E : Multifork _ => hP.amalgamate S (fun I => E.ι _) (fun _ _ r => E.condition ⟨_, _, r⟩) fac := by rintro (E : Multifork _) (a | b) · apply hP.amalgamate_map · rw [← E.w (WalkingMulticospan.Hom.fst b), ← (S.multifork P).w (WalkingMulticospan.Hom.fst b), ← assoc] congr 1 apply hP.amalgamate_map uniq := by rintro (E : Multifork _) m hm apply hP.hom_ext S intro I erw [hm (WalkingMulticospan.left I)] symm apply hP.amalgamate_map theorem isSheaf_iff_multifork : IsSheaf J P ↔ ∀ (X : C) (S : J.Cover X), Nonempty (IsLimit (S.multifork P)) := by refine ⟨fun hP X S => ⟨isLimitOfIsSheaf _ _ _ hP⟩, ?_⟩ intro h E X S hS x hx let T : J.Cover X := ⟨S, hS⟩ obtain ⟨hh⟩ := h _ T let K : Multifork (T.index P) := Multifork.ofι _ E (fun I => x I.f I.hf) (fun I => hx _ _ _ _ I.r.w) use hh.lift K dsimp; constructor · intro Y f hf apply hh.fac K (WalkingMulticospan.left ⟨Y, f, hf⟩) · intro e he apply hh.uniq K rintro (a | b) · apply he · rw [← K.w (WalkingMulticospan.Hom.fst b), ← (T.multifork P).w (WalkingMulticospan.Hom.fst b), ← assoc] congr 1 apply he variable {J P} in /-- If `F : Cᵒᵖ ⥤ A` is a sheaf for a Grothendieck topology `J` on `C`, and `S` is a cover of `X : C`, then the multifork `S.multifork F` is limit. -/ def IsSheaf.isLimitMultifork (hP : Presheaf.IsSheaf J P) {X : C} (S : J.Cover X) : IsLimit (S.multifork P) := by rw [Presheaf.isSheaf_iff_multifork] at hP exact (hP X S).some theorem isSheaf_iff_multiequalizer [∀ (X : C) (S : J.Cover X), HasMultiequalizer (S.index P)] : IsSheaf J P ↔ ∀ (X : C) (S : J.Cover X), IsIso (S.toMultiequalizer P) := by rw [isSheaf_iff_multifork] refine forall₂_congr fun X S => ⟨?_, ?_⟩ · rintro ⟨h⟩ let e : P.obj (op X) ≅ multiequalizer (S.index P) := h.conePointUniqueUpToIso (limit.isLimit _) exact (inferInstance : IsIso e.hom) · intro h refine ⟨IsLimit.ofIsoLimit (limit.isLimit _) (Cones.ext ?_ ?_)⟩ · apply (@asIso _ _ _ _ _ h).symm · intro a symm erw [IsIso.inv_comp_eq] dsimp simp end MultiequalizerConditions section variable [HasProducts.{max u₁ v₁} A] variable [HasProducts.{max u₁ v₁} A'] /-- The middle object of the fork diagram given in Equation (3) of [MM92], as well as the fork diagram of <https://stacks.math.columbia.edu/tag/00VM>. -/ def firstObj : A := ∏ᶜ fun f : ΣV, { f : V ⟶ U // R f } => P.obj (op f.1) /-- The left morphism of the fork diagram given in Equation (3) of [MM92], as well as the fork diagram of <https://stacks.math.columbia.edu/tag/00VM>. -/ def forkMap : P.obj (op U) ⟶ firstObj R P := Pi.lift fun f => P.map f.2.1.op variable [HasPullbacks C] /-- The rightmost object of the fork diagram of https://stacks.math.columbia.edu/tag/00VM, which contains the data used to check a family of elements for a presieve is compatible. -/ def secondObj : A := ∏ᶜ fun fg : (ΣV, { f : V ⟶ U // R f }) × ΣW, { g : W ⟶ U // R g } => P.obj (op (pullback fg.1.2.1 fg.2.2.1)) /-- The map `pr₀*` of <https://stacks.math.columbia.edu/tag/00VM>. -/ def firstMap : firstObj R P ⟶ secondObj R P := Pi.lift fun _ => Pi.π _ _ ≫ P.map (pullback.fst _ _).op /-- The map `pr₁*` of <https://stacks.math.columbia.edu/tag/00VM>. -/ def secondMap : firstObj R P ⟶ secondObj R P := Pi.lift fun _ => Pi.π _ _ ≫ P.map (pullback.snd _ _).op theorem w : forkMap R P ≫ firstMap R P = forkMap R P ≫ secondMap R P := by apply limit.hom_ext rintro ⟨⟨Y, f, hf⟩, ⟨Z, g, hg⟩⟩ simp only [firstMap, secondMap, forkMap, limit.lift_π, limit.lift_π_assoc, assoc, Fan.mk_π_app, Subtype.coe_mk] rw [← P.map_comp, ← op_comp, pullback.condition] simp /-- An alternative definition of the sheaf condition in terms of equalizers. This is shown to be equivalent in `CategoryTheory.Presheaf.isSheaf_iff_isSheaf'`. -/ def IsSheaf' (P : Cᵒᵖ ⥤ A) : Prop := ∀ (U : C) (R : Presieve U) (_ : generate R ∈ J U), Nonempty (IsLimit (Fork.ofι _ (w R P))) -- Again I wonder whether `UnivLE` can somehow be used to allow `s` to take -- values in a more general universe. /-- (Implementation). An auxiliary lemma to convert between sheaf conditions. -/ def isSheafForIsSheafFor' (P : Cᵒᵖ ⥤ A) (s : A ⥤ Type max v₁ u₁) [∀ J, PreservesLimitsOfShape (Discrete.{max v₁ u₁} J) s] (U : C) (R : Presieve U) : IsLimit (s.mapCone (Fork.ofι _ (w R P))) ≃ IsLimit (Fork.ofι _ (Equalizer.Presieve.w (P ⋙ s) R)) := by apply Equiv.trans (isLimitMapConeForkEquiv _ _) _ apply (IsLimit.postcomposeHomEquiv _ _).symm.trans (IsLimit.equivIsoLimit _) · apply NatIso.ofComponents _ _ · rintro (_ | _) · apply PreservesProduct.iso s · apply PreservesProduct.iso s · rintro _ _ (_ | _) · refine limit.hom_ext (fun j => ?_) dsimp [Equalizer.Presieve.firstMap, firstMap] simp only [limit.lift_π, map_lift_piComparison, assoc, Fan.mk_π_app, Functor.map_comp] rw [piComparison_comp_π_assoc] · refine limit.hom_ext (fun j => ?_) dsimp [Equalizer.Presieve.secondMap, secondMap] simp only [limit.lift_π, map_lift_piComparison, assoc, Fan.mk_π_app, Functor.map_comp] rw [piComparison_comp_π_assoc] · dsimp simp · refine Fork.ext (Iso.refl _) ?_ dsimp [Equalizer.forkMap, forkMap] simp [Fork.ι] -- Remark : this lemma uses `A'` not `A`; `A'` is `A` but with a universe -- restriction. Can it be generalised? /-- The equalizer definition of a sheaf given by `isSheaf'` is equivalent to `isSheaf`. -/ theorem isSheaf_iff_isSheaf' : IsSheaf J P' ↔ IsSheaf' J P' := by constructor · intro h U R hR refine ⟨?_⟩ apply coyonedaJointlyReflectsLimits intro X have q : Presieve.IsSheafFor (P' ⋙ coyoneda.obj X) _ := h X.unop _ hR rw [← Presieve.isSheafFor_iff_generate] at q rw [Equalizer.Presieve.sheaf_condition] at q replace q := Classical.choice q apply (isSheafForIsSheafFor' _ _ _ _).symm q · intro h U X S hS rw [Equalizer.Presieve.sheaf_condition] refine ⟨?_⟩ refine isSheafForIsSheafFor' _ _ _ _ ?_ letI := preservesSmallestLimitsOfPreservesLimits (coyoneda.obj (op U)) apply isLimitOfPreserves apply Classical.choice (h _ S.arrows _) simpa end section Concrete theorem isSheaf_of_isSheaf_comp (s : A ⥤ B) [ReflectsLimitsOfSize.{v₁, max v₁ u₁} s] (h : IsSheaf J (P ⋙ s)) : IsSheaf J P := by rw [isSheaf_iff_isLimit] at h ⊢ exact fun X S hS ↦ (h S hS).map fun t ↦ isLimitOfReflects s t theorem isSheaf_comp_of_isSheaf (s : A ⥤ B) [PreservesLimitsOfSize.{v₁, max v₁ u₁} s] (h : IsSheaf J P) : IsSheaf J (P ⋙ s) := by rw [isSheaf_iff_isLimit] at h ⊢ apply fun X S hS ↦ (h S hS).map fun t ↦ isLimitOfPreserves s t theorem isSheaf_iff_isSheaf_comp (s : A ⥤ B) [HasLimitsOfSize.{v₁, max v₁ u₁} A] [PreservesLimitsOfSize.{v₁, max v₁ u₁} s] [s.ReflectsIsomorphisms] : IsSheaf J P ↔ IsSheaf J (P ⋙ s) := by letI : ReflectsLimitsOfSize s := reflectsLimitsOfReflectsIsomorphisms exact ⟨isSheaf_comp_of_isSheaf J P s, isSheaf_of_isSheaf_comp J P s⟩ /-- For a concrete category `(A, s)` where the forgetful functor `s : A ⥤ Type v` preserves limits and reflects isomorphisms, and `A` has limits, an `A`-valued presheaf `P : Cᵒᵖ ⥤ A` is a sheaf iff its underlying `Type`-valued presheaf `P ⋙ s : Cᵒᵖ ⥤ Type` is a sheaf. Note this lemma applies for "algebraic" categories, eg groups, abelian groups and rings, but not for the category of topological spaces, topological rings, etc since reflecting isomorphisms doesn't hold. -/ theorem isSheaf_iff_isSheaf_forget (s : A' ⥤ Type max v₁ u₁) [HasLimits A'] [PreservesLimits s] [s.ReflectsIsomorphisms] : IsSheaf J P' ↔ IsSheaf J (P' ⋙ s) := by have : HasLimitsOfSize.{v₁, max v₁ u₁} A' := hasLimitsOfSizeShrink.{_, _, u₁, 0} A' have : PreservesLimitsOfSize.{v₁, max v₁ u₁} s := preservesLimitsOfSizeShrink.{_, 0, _, u₁} s apply isSheaf_iff_isSheaf_comp end Concrete end Presheaf end CategoryTheory
CategoryTheory\Sites\SheafHom.lean
/- Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Sites.Over /-! Internal hom of sheaves In this file, given two sheaves `F` and `G` on a site `(C, J)` with values in a category `A`, we define a sheaf of types `sheafHom F G` which sends `X : C` to the type of morphisms between the restrictions of `F` and `G` to the categories `Over X`. We first define `presheafHom F G` when `F` and `G` are presheaves `Cᵒᵖ ⥤ A` and show that it is a sheaf when `G` is a sheaf. TODO: - turn both `presheafHom` and `sheafHom` into bifunctors - for a sheaf of types `F`, the `sheafHom` functor from `F` is right-adjoint to the product functor with `F`, i.e. for all `X` and `Y`, there is a natural bijection `(X ⨯ F ⟶ Y) ≃ (X ⟶ sheafHom F Y)`. - use these results in order to show that the category of sheaves of types is Cartesian closed -/ universe v v' u u' namespace CategoryTheory open Category Opposite Limits variable {C : Type u} [Category.{v} C] {J : GrothendieckTopology C} {A : Type u'} [Category.{v'} A] variable (F G : Cᵒᵖ ⥤ A) /-- Given two presheaves `F` and `G` on a category `C` with values in a category `A`, this `presheafHom F G` is the presheaf of types which sends an object `X : C` to the type of morphisms between the "restrictions" of `F` and `G` to the category `Over X`. -/ @[simps! obj] def presheafHom : Cᵒᵖ ⥤ Type _ where obj X := (Over.forget X.unop).op ⋙ F ⟶ (Over.forget X.unop).op ⋙ G map f := whiskerLeft (Over.map f.unop).op map_id := by rintro ⟨X⟩ ext φ ⟨Y⟩ simpa [Over.mapId] using φ.naturality ((Over.mapId X).hom.app Y).op map_comp := by rintro ⟨X⟩ ⟨Y⟩ ⟨Z⟩ ⟨f : Y ⟶ X⟩ ⟨g : Z ⟶ Y⟩ ext φ ⟨W⟩ simpa [Over.mapComp] using φ.naturality ((Over.mapComp g f).hom.app W).op variable {F G} /-- Equational lemma for the presheaf structure on `presheafHom`. It is advisable to use this lemma rather than `dsimp [presheafHom]` which may result in the need to prove equalities of objects in an `Over` category. -/ lemma presheafHom_map_app {X Y Z : C} (f : Z ⟶ Y) (g : Y ⟶ X) (h : Z ⟶ X) (w : f ≫ g = h) (α : (presheafHom F G).obj (op X)) : ((presheafHom F G).map g.op α).app (op (Over.mk f)) = α.app (op (Over.mk h)) := by subst w rfl @[simp] lemma presheafHom_map_app_op_mk_id {X Y : C} (g : Y ⟶ X) (α : (presheafHom F G).obj (op X)) : ((presheafHom F G).map g.op α).app (op (Over.mk (𝟙 Y))) = α.app (op (Over.mk g)) := presheafHom_map_app (𝟙 Y) g g (by simp) α variable (F G) /-- The sections of the presheaf `presheafHom F G` identify to morphisms `F ⟶ G`. -/ def presheafHomSectionsEquiv : (presheafHom F G).sections ≃ (F ⟶ G) where toFun s := { app := fun X => (s.1 X).app ⟨Over.mk (𝟙 _)⟩ naturality := by rintro ⟨X₁⟩ ⟨X₂⟩ ⟨f : X₂ ⟶ X₁⟩ dsimp refine Eq.trans ?_ ((s.1 ⟨X₁⟩).naturality (Over.homMk f : Over.mk f ⟶ Over.mk (𝟙 X₁)).op) erw [← s.2 f.op, presheafHom_map_app_op_mk_id] rfl } invFun f := ⟨fun X => whiskerLeft _ f, fun _ => rfl⟩ left_inv s := by dsimp ext ⟨X⟩ ⟨Y : Over X⟩ have H := s.2 Y.hom.op dsimp at H ⊢ rw [← H] apply presheafHom_map_app_op_mk_id right_inv f := rfl variable {F G} lemma PresheafHom.isAmalgamation_iff {X : C} (S : Sieve X) (x : Presieve.FamilyOfElements (presheafHom F G) S.arrows) (hx : x.Compatible) (y : (presheafHom F G).obj (op X)) : x.IsAmalgamation y ↔ ∀ (Y : C) (g : Y ⟶ X) (hg : S g), y.app (op (Over.mk g)) = (x g hg).app (op (Over.mk (𝟙 Y))) := by constructor · intro h Y g hg rw [← h g hg, presheafHom_map_app_op_mk_id] · intro h Y g hg dsimp ext ⟨W : Over Y⟩ refine (h W.left (W.hom ≫ g) (S.downward_closed hg _)).trans ?_ have H := hx (𝟙 _) W.hom (S.downward_closed hg W.hom) hg (by simp) dsimp at H simp only [Functor.map_id, FunctorToTypes.map_id_apply] at H rw [H, presheafHom_map_app_op_mk_id] rfl section variable {X : C} {S : Sieve X} (hG : ∀ ⦃Y : C⦄ (f : Y ⟶ X), IsLimit (G.mapCone (S.pullback f).arrows.cocone.op)) namespace PresheafHom.IsSheafFor variable (x : Presieve.FamilyOfElements (presheafHom F G) S.arrows) (hx : x.Compatible) {Y : C} (g : Y ⟶ X) lemma exists_app : ∃ (φ : F.obj (op Y) ⟶ G.obj (op Y)), ∀ {Z : C} (p : Z ⟶ Y) (hp : S (p ≫ g)), φ ≫ G.map p.op = F.map p.op ≫ (x (p ≫ g) hp).app ⟨Over.mk (𝟙 Z)⟩ := by let c : Cone ((Presieve.diagram (Sieve.pullback g S).arrows).op ⋙ G) := { pt := F.obj (op Y) π := { app := fun ⟨Z, hZ⟩ => F.map Z.hom.op ≫ (x _ hZ).app (op (Over.mk (𝟙 _))) naturality := by rintro ⟨Z₁, hZ₁⟩ ⟨Z₂, hZ₂⟩ ⟨f : Z₂ ⟶ Z₁⟩ dsimp rw [id_comp, assoc] have H := hx f.left (𝟙 _) hZ₁ hZ₂ (by simp) simp only [presheafHom_obj, unop_op, Functor.id_obj, op_id, FunctorToTypes.map_id_apply] at H let φ : Over.mk f.left ⟶ Over.mk (𝟙 Z₁.left) := Over.homMk f.left have H' := (x (Z₁.hom ≫ g) hZ₁).naturality φ.op dsimp at H H' ⊢ erw [← H, ← H', presheafHom_map_app_op_mk_id, ← F.map_comp_assoc, ← op_comp, Over.w f] } } use (hG g).lift c intro Z p hp exact ((hG g).fac c ⟨Over.mk p, hp⟩) /-- Auxiliary definition for `presheafHom_isSheafFor`. -/ noncomputable def app : F.obj (op Y) ⟶ G.obj (op Y) := (exists_app hG x hx g).choose lemma app_cond {Z : C} (p : Z ⟶ Y) (hp : S (p ≫ g)) : app hG x hx g ≫ G.map p.op = F.map p.op ≫ (x (p ≫ g) hp).app ⟨Over.mk (𝟙 Z)⟩ := (exists_app hG x hx g).choose_spec p hp end PresheafHom.IsSheafFor variable (F G S) open PresheafHom.IsSheafFor in lemma presheafHom_isSheafFor : Presieve.IsSheafFor (presheafHom F G) S.arrows := by intro x hx apply exists_unique_of_exists_of_unique · refine ⟨ { app := fun Y => app hG x hx Y.unop.hom naturality := by rintro ⟨Y₁ : Over X⟩ ⟨Y₂ : Over X⟩ ⟨φ : Y₂ ⟶ Y₁⟩ apply (hG Y₂.hom).hom_ext rintro ⟨Z : Over Y₂.left, hZ⟩ dsimp rw [assoc, assoc, app_cond hG x hx Y₂.hom Z.hom hZ, ← G.map_comp, ← op_comp] erw [app_cond hG x hx Y₁.hom (Z.hom ≫ φ.left) (by simpa using hZ), ← F.map_comp_assoc, op_comp] congr 3 simp }, ?_⟩ rw [PresheafHom.isAmalgamation_iff _ _ hx] intro Y g hg dsimp have H := app_cond hG x hx g (𝟙 _) (by simpa using hg) rw [op_id, G.map_id, comp_id, F.map_id, id_comp] at H exact H.trans (by congr; simp) · intro y₁ y₂ hy₁ hy₂ rw [PresheafHom.isAmalgamation_iff _ _ hx] at hy₁ hy₂ apply NatTrans.ext ext ⟨Y : Over X⟩ apply (hG Y.hom).hom_ext rintro ⟨Z : Over Y.left, hZ⟩ dsimp let φ : Over.mk (Z.hom ≫ Y.hom) ⟶ Y := Over.homMk Z.hom refine (y₁.naturality φ.op).symm.trans (Eq.trans ?_ (y₂.naturality φ.op)) rw [(hy₁ _ _ hZ), ← ((hy₂ _ _ hZ))] end variable (F G) lemma Presheaf.IsSheaf.hom (hG : Presheaf.IsSheaf J G) : Presheaf.IsSheaf J (presheafHom F G) := by rw [isSheaf_iff_isSheaf_of_type] intro X S hS exact presheafHom_isSheafFor F G S (fun _ _ => ((Presheaf.isSheaf_iff_isLimit J G).1 hG _ (J.pullback_stable _ hS)).some) /-- The underlying presheaf of `sheafHom F G`. It is isomorphic to `presheafHom F.1 G.1` (see `sheafHom'Iso`), but has better definitional properties. -/ def sheafHom' (F G : Sheaf J A) : Cᵒᵖ ⥤ Type _ where obj X := (J.overPullback A X.unop).obj F ⟶ (J.overPullback A X.unop).obj G map f := fun φ => (J.overMapPullback A f.unop).map φ map_id X := by ext φ : 2 exact congr_fun ((presheafHom F.1 G.1).map_id X) φ.1 map_comp f g := by ext φ : 2 exact congr_fun ((presheafHom F.1 G.1).map_comp f g) φ.1 /-- The canonical isomorphism `sheafHom' F G ≅ presheafHom F.1 G.1`. -/ def sheafHom'Iso (F G : Sheaf J A) : sheafHom' F G ≅ presheafHom F.1 G.1 := NatIso.ofComponents (fun _ => Sheaf.homEquiv.toIso) (fun _ => rfl) /-- Given two sheaves `F` and `G` on a site `(C, J)` with values in a category `A`, this `sheafHom F G` is the sheaf of types which sends an object `X : C` to the type of morphisms between the "restrictions" of `F` and `G` to the category `Over X`. -/ def sheafHom (F G : Sheaf J A) : Sheaf J (Type _) where val := sheafHom' F G cond := (Presheaf.isSheaf_of_iso_iff (sheafHom'Iso F G)).2 (G.2.hom F.1) /-- The sections of the sheaf `sheafHom F G` identify to morphisms `F ⟶ G`. -/ def sheafHomSectionsEquiv (F G : Sheaf J A) : (sheafHom F G).1.sections ≃ (F ⟶ G) := ((Functor.sectionsFunctor Cᵒᵖ).mapIso (sheafHom'Iso F G)).toEquiv.trans ((presheafHomSectionsEquiv F.1 G.1).trans Sheaf.homEquiv.symm) @[simp] lemma sheafHomSectionsEquiv_symm_apply_coe_apply {F G : Sheaf J A} (φ : F ⟶ G) (X : Cᵒᵖ) : ((sheafHomSectionsEquiv F G).symm φ).1 X = (J.overPullback A X.unop).map φ := rfl end CategoryTheory
CategoryTheory\Sites\Sheafification.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Adjunction.Unique import Mathlib.CategoryTheory.Adjunction.FullyFaithful import Mathlib.CategoryTheory.Sites.Sheaf import Mathlib.CategoryTheory.Limits.Preserves.Finite /-! # Sheafification Given a site `(C, J)` we define a typeclass `HasSheafify J A` saying that the inclusion functor from `A`-valued sheaves on `C` to presheaves admits a left exact left adjoint (sheafification). Note: to access the `HasSheafify` instance for suitable concrete categories, import the file `Mathlib.CategoryTheory.Sites.LeftExact`. -/ universe v₁ v₂ u₁ u₂ namespace CategoryTheory open Limits variable {C : Type u₁} [Category.{v₁} C] (J : GrothendieckTopology C) variable (A : Type u₂) [Category.{v₂} A] /-- A proposition saying that the inclusion functor from sheaves to presheaves admits a left adjoint. -/ abbrev HasWeakSheafify : Prop := (sheafToPresheaf J A).IsRightAdjoint /-- `HasSheafify` means that the inclusion functor from sheaves to presheaves admits a left exact left adjiont (sheafification). Given a finite limit preserving functor `F : (Cᵒᵖ ⥤ A) ⥤ Sheaf J A` and an adjunction `adj : F ⊣ sheafToPresheaf J A`, use `HasSheafify.mk'` to construct a `HasSheafify` instance. -/ class HasSheafify : Prop where isRightAdjoint : HasWeakSheafify J A isLeftExact : Nonempty (PreservesFiniteLimits ((sheafToPresheaf J A).leftAdjoint)) instance [HasSheafify J A] : HasWeakSheafify J A := HasSheafify.isRightAdjoint noncomputable section instance [HasSheafify J A] : PreservesFiniteLimits ((sheafToPresheaf J A).leftAdjoint) := HasSheafify.isLeftExact.some theorem HasSheafify.mk' {F : (Cᵒᵖ ⥤ A) ⥤ Sheaf J A} (adj : F ⊣ sheafToPresheaf J A) [PreservesFiniteLimits F] : HasSheafify J A where isRightAdjoint := ⟨F, ⟨adj⟩⟩ isLeftExact := ⟨by have : (sheafToPresheaf J A).IsRightAdjoint := ⟨_, ⟨adj⟩⟩ exact ⟨fun _ _ _ ↦ preservesLimitsOfShapeOfNatIso (adj.leftAdjointUniq (Adjunction.ofIsRightAdjoint (sheafToPresheaf J A)))⟩⟩ /-- The sheafification functor, left adjoint to the inclusion. -/ def presheafToSheaf [HasWeakSheafify J A] : (Cᵒᵖ ⥤ A) ⥤ Sheaf J A := (sheafToPresheaf J A).leftAdjoint instance [HasSheafify J A] : PreservesFiniteLimits (presheafToSheaf J A) := HasSheafify.isLeftExact.some /-- The sheafification-inclusion adjunction. -/ def sheafificationAdjunction [HasWeakSheafify J A] : presheafToSheaf J A ⊣ sheafToPresheaf J A := Adjunction.ofIsRightAdjoint _ instance [HasWeakSheafify J A] : (presheafToSheaf J A).IsLeftAdjoint := ⟨_, ⟨sheafificationAdjunction J A⟩⟩ end variable {D : Type*} [Category D] [HasWeakSheafify J D] /-- The sheafification of a presheaf `P`. -/ noncomputable abbrev sheafify (P : Cᵒᵖ ⥤ D) : Cᵒᵖ ⥤ D := presheafToSheaf J D |>.obj P |>.val /-- The canonical map from `P` to its sheafification. -/ noncomputable abbrev toSheafify (P : Cᵒᵖ ⥤ D) : P ⟶ sheafify J P := sheafificationAdjunction J D |>.unit.app P @[simp] theorem sheafificationAdjunction_unit_app (P : Cᵒᵖ ⥤ D) : (sheafificationAdjunction J D).unit.app P = toSheafify J P := rfl /-- The canonical map on sheafifications induced by a morphism. -/ noncomputable abbrev sheafifyMap {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : sheafify J P ⟶ sheafify J Q := presheafToSheaf J D |>.map η |>.val @[simp] theorem sheafifyMap_id (P : Cᵒᵖ ⥤ D) : sheafifyMap J (𝟙 P) = 𝟙 (sheafify J P) := by simp [sheafifyMap, sheafify] @[simp] theorem sheafifyMap_comp {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) : sheafifyMap J (η ≫ γ) = sheafifyMap J η ≫ sheafifyMap J γ := by simp [sheafifyMap, sheafify] @[reassoc (attr := simp)] theorem toSheafify_naturality {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : η ≫ toSheafify J _ = toSheafify J _ ≫ sheafifyMap J η := sheafificationAdjunction J D |>.unit.naturality η variable (D) /-- The sheafification of a presheaf `P`, as a functor. -/ noncomputable abbrev sheafification : (Cᵒᵖ ⥤ D) ⥤ Cᵒᵖ ⥤ D := presheafToSheaf J D ⋙ sheafToPresheaf J D theorem sheafification_obj (P : Cᵒᵖ ⥤ D) : (sheafification J D).obj P = sheafify J P := rfl theorem sheafification_map {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : (sheafification J D).map η = sheafifyMap J η := rfl /-- The canonical map from `P` to its sheafification, as a natural transformation. -/ noncomputable abbrev toSheafification : 𝟭 _ ⟶ sheafification J D := sheafificationAdjunction J D |>.unit theorem toSheafification_app (P : Cᵒᵖ ⥤ D) : (toSheafification J D).app P = toSheafify J P := rfl variable {D} theorem isIso_toSheafify {P : Cᵒᵖ ⥤ D} (hP : Presheaf.IsSheaf J P) : IsIso (toSheafify J P) := by refine ⟨(sheafificationAdjunction J D |>.counit.app ⟨P, hP⟩).val, ?_, ?_⟩ · change _ = (𝟙 (sheafToPresheaf J D ⋙ 𝟭 (Cᵒᵖ ⥤ D)) : _).app ⟨P, hP⟩ rw [← sheafificationAdjunction J D |>.right_triangle] rfl · change (sheafToPresheaf _ _).map _ ≫ _ = _ change _ ≫ (sheafificationAdjunction J D).unit.app ((sheafToPresheaf J D).obj ⟨P, hP⟩) = _ erw [← (sheafificationAdjunction J D).inv_counit_map (X := ⟨P, hP⟩), comp_inv_eq_id] /-- If `P` is a sheaf, then `P` is isomorphic to `sheafify J P`. -/ noncomputable def isoSheafify {P : Cᵒᵖ ⥤ D} (hP : Presheaf.IsSheaf J P) : P ≅ sheafify J P := letI := isIso_toSheafify J hP asIso (toSheafify J P) @[simp] theorem isoSheafify_hom {P : Cᵒᵖ ⥤ D} (hP : Presheaf.IsSheaf J P) : (isoSheafify J hP).hom = toSheafify J P := rfl /-- Given a sheaf `Q` and a morphism `P ⟶ Q`, construct a morphism from `sheafify J P` to `Q`. -/ noncomputable def sheafifyLift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) : sheafify J P ⟶ Q := (sheafificationAdjunction J D).homEquiv P ⟨Q, hQ⟩ |>.symm η |>.val @[simp] theorem sheafificationAdjunction_counit_app_val (P : Sheaf J D) : ((sheafificationAdjunction J D).counit.app P).val = sheafifyLift J (𝟙 P.val) P.cond := by unfold sheafifyLift rw [Adjunction.homEquiv_counit] simp @[reassoc (attr := simp)] theorem toSheafify_sheafifyLift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) : toSheafify J P ≫ sheafifyLift J η hQ = η := by rw [toSheafify, sheafifyLift, Adjunction.homEquiv_counit] change _ ≫ (sheafToPresheaf J D).map _ ≫ _ = _ simp only [Adjunction.unit_naturality_assoc] change _ ≫ (sheafificationAdjunction J D).unit.app ((sheafToPresheaf J D).obj ⟨Q, hQ⟩) ≫ _ = _ change _ ≫ _ ≫ (sheafToPresheaf J D).map _ = _ rw [sheafificationAdjunction J D |>.right_triangle_components (Y := ⟨Q, hQ⟩)] simp theorem sheafifyLift_unique {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) (γ : sheafify J P ⟶ Q) : toSheafify J P ≫ γ = η → γ = sheafifyLift J η hQ := by intro h rw [toSheafify] at h rw [sheafifyLift] let γ' : (presheafToSheaf J D).obj P ⟶ ⟨Q, hQ⟩ := ⟨γ⟩ change γ'.val = _ rw [← Sheaf.Hom.ext_iff, ← Adjunction.homEquiv_apply_eq, Adjunction.homEquiv_unit] exact h @[simp] theorem isoSheafify_inv {P : Cᵒᵖ ⥤ D} (hP : Presheaf.IsSheaf J P) : (isoSheafify J hP).inv = sheafifyLift J (𝟙 _) hP := by apply sheafifyLift_unique simp [Iso.comp_inv_eq] theorem sheafify_hom_ext {P Q : Cᵒᵖ ⥤ D} (η γ : sheafify J P ⟶ Q) (hQ : Presheaf.IsSheaf J Q) (h : toSheafify J P ≫ η = toSheafify J P ≫ γ) : η = γ := by rw [sheafifyLift_unique J _ hQ _ h, ← h] exact (sheafifyLift_unique J _ hQ _ h.symm).symm @[reassoc (attr := simp)] theorem sheafifyMap_sheafifyLift {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) (hR : Presheaf.IsSheaf J R) : sheafifyMap J η ≫ sheafifyLift J γ hR = sheafifyLift J (η ≫ γ) hR := by apply sheafifyLift_unique rw [← Category.assoc, ← toSheafify_naturality, Category.assoc, toSheafify_sheafifyLift] variable {J} /-- A sheaf `P` is isomorphic to its own sheafification. -/ @[simps] noncomputable def sheafificationIso (P : Sheaf J D) : P ≅ (presheafToSheaf J D).obj P.val where hom := ⟨(isoSheafify J P.2).hom⟩ inv := ⟨(isoSheafify J P.2).inv⟩ hom_inv_id := by ext1 apply (isoSheafify J P.2).hom_inv_id inv_hom_id := by ext1 apply (isoSheafify J P.2).inv_hom_id instance isIso_sheafificationAdjunction_counit (P : Sheaf J D) : IsIso ((sheafificationAdjunction J D).counit.app P) := isIso_of_fully_faithful (sheafToPresheaf J D) _ instance sheafification_reflective : IsIso (sheafificationAdjunction J D).counit := NatIso.isIso_of_isIso_app _ variable (J D) /-- The natural isomorphism `𝟭 (Sheaf J D) ≅ sheafToPresheaf J D ⋙ presheafToSheaf J D`. -/ @[simps!] noncomputable def sheafificationNatIso : 𝟭 (Sheaf J D) ≅ sheafToPresheaf J D ⋙ presheafToSheaf J D := NatIso.ofComponents (fun P => sheafificationIso P) (by aesop_cat) end CategoryTheory
CategoryTheory\Sites\SheafOfTypes.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import Mathlib.CategoryTheory.Sites.Pretopology import Mathlib.CategoryTheory.Sites.IsSheafFor /-! # Sheaves of types on a Grothendieck topology Defines the notion of a sheaf of types (usually called a sheaf of sets by mathematicians) on a category equipped with a Grothendieck topology, as well as a range of equivalent conditions useful in different situations. In `Mathlib/CategoryTheory/Sites/IsSheafFor.lean` it is defined what it means for a presheaf to be a sheaf *for* a particular sieve. Given a Grothendieck topology `J`, `P` is a sheaf if it is a sheaf for every sieve in the topology. See `IsSheaf`. In the case where the topology is generated by a basis, it suffices to check `P` is a sheaf for every presieve in the pretopology. See `isSheaf_pretopology`. We also provide equivalent conditions to satisfy alternate definitions given in the literature. * Stacks: In `Equalizer.Presieve.sheaf_condition`, the sheaf condition at a presieve is shown to be equivalent to that of https://stacks.math.columbia.edu/tag/00VM (and combined with `isSheaf_pretopology`, this shows the notions of `IsSheaf` are exactly equivalent.) The condition of https://stacks.math.columbia.edu/tag/00Z8 is virtually identical to the statement of `isSheafFor_iff_yonedaSheafCondition` (since the bijection described there carries the same information as the unique existence.) * Maclane-Moerdijk [MM92]: Using `compatible_iff_sieveCompatible`, the definitions of `IsSheaf` are equivalent. There are also alternate definitions given: - Sheaf for a pretopology (Prop 1): `isSheaf_pretopology` combined with `pullbackCompatible_iff`. - Sheaf for a pretopology as equalizer (Prop 1, bis): `Equalizer.Presieve.sheaf_condition` combined with the previous. ## References * [MM92]: *Sheaves in geometry and logic*, Saunders MacLane, and Ieke Moerdijk: Chapter III, Section 4. * [Elephant]: *Sketches of an Elephant*, P. T. Johnstone: C2.1. * https://stacks.math.columbia.edu/tag/00VL (sheaves on a pretopology or site) * https://stacks.math.columbia.edu/tag/00ZB (sheaves on a topology) -/ universe w v u namespace CategoryTheory open Opposite CategoryTheory Category Limits Sieve namespace Presieve variable {C : Type u} [Category.{v} C] variable {P : Cᵒᵖ ⥤ Type w} variable {X : C} variable (J J₂ : GrothendieckTopology C) /-- A presheaf is separated for a topology if it is separated for every sieve in the topology. -/ def IsSeparated (P : Cᵒᵖ ⥤ Type w) : Prop := ∀ {X} (S : Sieve X), S ∈ J X → IsSeparatedFor P (S : Presieve X) /-- A presheaf is a sheaf for a topology if it is a sheaf for every sieve in the topology. If the given topology is given by a pretopology, `isSheaf_pretopology` shows it suffices to check the sheaf condition at presieves in the pretopology. -/ def IsSheaf (P : Cᵒᵖ ⥤ Type w) : Prop := ∀ ⦃X⦄ (S : Sieve X), S ∈ J X → IsSheafFor P (S : Presieve X) theorem IsSheaf.isSheafFor {P : Cᵒᵖ ⥤ Type w} (hp : IsSheaf J P) (R : Presieve X) (hr : generate R ∈ J X) : IsSheafFor P R := (isSheafFor_iff_generate R).2 <| hp _ hr theorem isSheaf_of_le (P : Cᵒᵖ ⥤ Type w) {J₁ J₂ : GrothendieckTopology C} : J₁ ≤ J₂ → IsSheaf J₂ P → IsSheaf J₁ P := fun h t _ S hS => t S (h _ hS) theorem isSeparated_of_isSheaf (P : Cᵒᵖ ⥤ Type w) (h : IsSheaf J P) : IsSeparated J P := fun S hS => (h S hS).isSeparatedFor /-- The property of being a sheaf is preserved by isomorphism. -/ theorem isSheaf_iso {P' : Cᵒᵖ ⥤ Type w} (i : P ≅ P') (h : IsSheaf J P) : IsSheaf J P' := fun _ S hS => isSheafFor_iso i (h S hS) theorem isSheaf_of_yoneda {P : Cᵒᵖ ⥤ Type v} (h : ∀ {X} (S : Sieve X), S ∈ J X → YonedaSheafCondition P S) : IsSheaf J P := fun _ _ hS => isSheafFor_iff_yonedaSheafCondition.2 (h _ hS) /-- For a topology generated by a basis, it suffices to check the sheaf condition on the basis presieves only. -/ theorem isSheaf_pretopology [HasPullbacks C] (K : Pretopology C) : IsSheaf (K.toGrothendieck C) P ↔ ∀ {X : C} (R : Presieve X), R ∈ K X → IsSheafFor P R := by constructor · intro PJ X R hR rw [isSheafFor_iff_generate] apply PJ (Sieve.generate R) ⟨_, hR, le_generate R⟩ · rintro PK X S ⟨R, hR, RS⟩ have gRS : ⇑(generate R) ≤ S := by apply giGenerate.gc.monotone_u rwa [generate_le_iff] apply isSheafFor_subsieve P gRS _ intro Y f rw [← pullbackArrows_comm, ← isSheafFor_iff_generate] exact PK (pullbackArrows f R) (K.pullbacks f R hR) /-- Any presheaf is a sheaf for the bottom (trivial) grothendieck topology. -/ theorem isSheaf_bot : IsSheaf (⊥ : GrothendieckTopology C) P := fun X => by simp [isSheafFor_top_sieve] /-- For a presheaf of the form `yoneda.obj W`, a compatible family of elements on a sieve is the same as a co-cone over the sieve. Constructing a co-cone from a compatible family works for any presieve, as does constructing a family of elements from a co-cone. Showing compatibility of the family needs the sieve condition. Note: This is related to `CategoryTheory.Presheaf.conesEquivSieveCompatibleFamily` -/ def compatibleYonedaFamily_toCocone (R : Presieve X) (W : C) (x : FamilyOfElements (yoneda.obj W) R) (hx : FamilyOfElements.Compatible x) : Cocone (R.diagram) where pt := W ι := { app := fun f => x f.obj.hom f.property naturality := by intro g₁ g₂ F simp only [Functor.id_obj, Functor.comp_obj, fullSubcategoryInclusion.obj, Over.forget_obj, Functor.const_obj_obj, Functor.comp_map, fullSubcategoryInclusion.map, Over.forget_map, Functor.const_obj_map, Category.comp_id] rw [← Category.id_comp (x g₁.obj.hom g₁.property)] apply hx simp only [Functor.id_obj, Over.w, Opposite.unop_op, Category.id_comp] } def yonedaFamilyOfElements_fromCocone (R : Presieve X) (s : Cocone (diagram R)) : FamilyOfElements (yoneda.obj s.pt) R := fun _ f hf => s.ι.app ⟨Over.mk f, hf⟩ end Presieve namespace Sieve open Presieve variable {C : Type u} [Category.{v} C] variable {X : C} theorem yonedaFamily_fromCocone_compatible (S : Sieve X) (s : Cocone (diagram S.arrows)) : FamilyOfElements.Compatible <| yonedaFamilyOfElements_fromCocone S.arrows s := by intro Y₁ Y₂ Z g₁ g₂ f₁ f₂ hf₁ hf₂ hgf have Hs := s.ι.naturality simp only [Functor.id_obj, yoneda_obj_obj, Opposite.unop_op, yoneda_obj_map, Quiver.Hom.unop_op] dsimp [yonedaFamilyOfElements_fromCocone] have hgf₁ : S.arrows (g₁ ≫ f₁) := by exact Sieve.downward_closed S hf₁ g₁ have hgf₂ : S.arrows (g₂ ≫ f₂) := by exact Sieve.downward_closed S hf₂ g₂ let F : (Over.mk (g₁ ≫ f₁) : Over X) ⟶ (Over.mk (g₂ ≫ f₂) : Over X) := Over.homMk (𝟙 Z) let F₁ : (Over.mk (g₁ ≫ f₁) : Over X) ⟶ (Over.mk f₁ : Over X) := Over.homMk g₁ let F₂ : (Over.mk (g₂ ≫ f₂) : Over X) ⟶ (Over.mk f₂ : Over X) := Over.homMk g₂ have hF := @Hs ⟨Over.mk (g₁ ≫ f₁), hgf₁⟩ ⟨Over.mk (g₂ ≫ f₂), hgf₂⟩ F have hF₁ := @Hs ⟨Over.mk (g₁ ≫ f₁), hgf₁⟩ ⟨Over.mk f₁, hf₁⟩ F₁ have hF₂ := @Hs ⟨Over.mk (g₂ ≫ f₂), hgf₂⟩ ⟨Over.mk f₂, hf₂⟩ F₂ aesop_cat /-- The base of a sieve `S` is a colimit of `S` iff all Yoneda-presheaves satisfy the sheaf condition for `S`. -/ theorem forallYonedaIsSheaf_iff_colimit (S : Sieve X) : (∀ W : C, Presieve.IsSheafFor (yoneda.obj W) (S : Presieve X)) ↔ Nonempty (IsColimit S.arrows.cocone) := by constructor · intro H refine Nonempty.intro ?_ exact { desc := fun s => H s.pt (yonedaFamilyOfElements_fromCocone S.arrows s) (yonedaFamily_fromCocone_compatible S s) |>.choose fac := by intro s f replace H := H s.pt (yonedaFamilyOfElements_fromCocone S.arrows s) (yonedaFamily_fromCocone_compatible S s) have ht := H.choose_spec.1 f.obj.hom f.property aesop_cat uniq := by intro s Fs HFs replace H := H s.pt (yonedaFamilyOfElements_fromCocone S.arrows s) (yonedaFamily_fromCocone_compatible S s) apply H.choose_spec.2 Fs exact fun _ f hf => HFs ⟨Over.mk f, hf⟩ } · intro H W x hx replace H := Classical.choice H let s := compatibleYonedaFamily_toCocone S W x hx use H.desc s constructor · exact fun _ f hf => (H.fac s) ⟨Over.mk f, hf⟩ · exact fun g hg => H.uniq s g (fun ⟨⟨f, _, hom⟩, hf⟩ => hg hom hf) end Sieve variable {C : Type u} [Category.{v} C] variable (J : GrothendieckTopology C) /-- The category of sheaves on a grothendieck topology. -/ structure SheafOfTypes (J : GrothendieckTopology C) : Type max u v (w + 1) where /-- the underlying presheaf -/ val : Cᵒᵖ ⥤ Type w /-- the condition that the presheaf is a sheaf -/ cond : Presieve.IsSheaf J val namespace SheafOfTypes variable {J} /-- Morphisms between sheaves of types are just morphisms between the underlying presheaves. -/ @[ext] structure Hom (X Y : SheafOfTypes J) where /-- a morphism between the underlying presheaves -/ val : X.val ⟶ Y.val @[simps] instance : Category (SheafOfTypes J) where Hom := Hom id _ := ⟨𝟙 _⟩ comp f g := ⟨f.val ≫ g.val⟩ id_comp _ := Hom.ext <| id_comp _ comp_id _ := Hom.ext <| comp_id _ assoc _ _ _ := Hom.ext <| assoc _ _ _ -- Porting note (#11041): we need to restate the `ext` lemma in terms of the categorical morphism. -- not just the underlying structure. -- It would be nice if this boilerplate weren't necessary. @[ext] theorem Hom.ext' {X Y : SheafOfTypes J} (f g : X ⟶ Y) (w : f.val = g.val) : f = g := Hom.ext w -- Let's make the inhabited linter happy... instance (X : SheafOfTypes J) : Inhabited (Hom X X) := ⟨𝟙 X⟩ end SheafOfTypes /-- The inclusion functor from sheaves to presheaves. -/ @[simps] def sheafOfTypesToPresheaf : SheafOfTypes J ⥤ Cᵒᵖ ⥤ Type w where obj := SheafOfTypes.val map f := f.val map_id _ := rfl map_comp _ _ := rfl instance : (sheafOfTypesToPresheaf J).Full where map_surjective f := ⟨⟨f⟩, rfl⟩ instance : (sheafOfTypesToPresheaf J).Faithful where /-- The category of sheaves on the bottom (trivial) grothendieck topology is equivalent to the category of presheaves. -/ @[simps] def sheafOfTypesBotEquiv : SheafOfTypes (⊥ : GrothendieckTopology C) ≌ Cᵒᵖ ⥤ Type w where functor := sheafOfTypesToPresheaf _ inverse := { obj := fun P => ⟨P, Presieve.isSheaf_bot⟩ map := fun f => ⟨f⟩ } unitIso := Iso.refl _ counitIso := Iso.refl _ instance : Inhabited (SheafOfTypes (⊥ : GrothendieckTopology C)) := ⟨sheafOfTypesBotEquiv.inverse.obj ((Functor.const _).obj PUnit)⟩ end CategoryTheory
CategoryTheory\Sites\Sieves.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, E. W. Ayers -/ import Mathlib.CategoryTheory.Comma.Over import Mathlib.CategoryTheory.Limits.Shapes.Pullback.HasPullback import Mathlib.CategoryTheory.Yoneda import Mathlib.Data.Set.Lattice import Mathlib.Order.CompleteLattice /-! # Theory of sieves - For an object `X` of a category `C`, a `Sieve X` is a set of morphisms to `X` which is closed under left-composition. - The complete lattice structure on sieves is given, as well as the Galois insertion given by downward-closing. - A `Sieve X` (functorially) induces a presheaf on `C` together with a monomorphism to the yoneda embedding of `X`. ## Tags sieve, pullback -/ universe v₁ v₂ v₃ u₁ u₂ u₃ namespace CategoryTheory open Category Limits variable {C : Type u₁} [Category.{v₁} C] {D : Type u₂} [Category.{v₂} D] (F : C ⥤ D) variable {X Y Z : C} (f : Y ⟶ X) /-- A set of arrows all with codomain `X`. -/ def Presieve (X : C) := ∀ ⦃Y⦄, Set (Y ⟶ X)-- deriving CompleteLattice instance : CompleteLattice (Presieve X) := by dsimp [Presieve] infer_instance namespace Presieve noncomputable instance : Inhabited (Presieve X) := ⟨⊤⟩ /-- The full subcategory of the over category `C/X` consisting of arrows which belong to a presieve on `X`. -/ abbrev category {X : C} (P : Presieve X) := FullSubcategory fun f : Over X => P f.hom /-- Construct an object of `P.category`. -/ abbrev categoryMk {X : C} (P : Presieve X) {Y : C} (f : Y ⟶ X) (hf : P f) : P.category := ⟨Over.mk f, hf⟩ /-- Given a sieve `S` on `X : C`, its associated diagram `S.diagram` is defined to be the natural functor from the full subcategory of the over category `C/X` consisting of arrows in `S` to `C`. -/ abbrev diagram (S : Presieve X) : S.category ⥤ C := fullSubcategoryInclusion _ ⋙ Over.forget X /-- Given a sieve `S` on `X : C`, its associated cocone `S.cocone` is defined to be the natural cocone over the diagram defined above with cocone point `X`. -/ abbrev cocone (S : Presieve X) : Cocone S.diagram := (Over.forgetCocone X).whisker (fullSubcategoryInclusion _) /-- Given a set of arrows `S` all with codomain `X`, and a set of arrows with codomain `Y` for each `f : Y ⟶ X` in `S`, produce a set of arrows with codomain `X`: `{ g ≫ f | (f : Y ⟶ X) ∈ S, (g : Z ⟶ Y) ∈ R f }`. -/ def bind (S : Presieve X) (R : ∀ ⦃Y⦄ ⦃f : Y ⟶ X⦄, S f → Presieve Y) : Presieve X := fun Z h => ∃ (Y : C) (g : Z ⟶ Y) (f : Y ⟶ X) (H : S f), R H g ∧ g ≫ f = h @[simp] theorem bind_comp {S : Presieve X} {R : ∀ ⦃Y : C⦄ ⦃f : Y ⟶ X⦄, S f → Presieve Y} {g : Z ⟶ Y} (h₁ : S f) (h₂ : R h₁ g) : bind S R (g ≫ f) := ⟨_, _, _, h₁, h₂, rfl⟩ -- Porting note: it seems the definition of `Presieve` must be unfolded in order to define -- this inductive type, it was thus renamed `singleton'` -- Note we can't make this into `HasSingleton` because of the out-param. /-- The singleton presieve. -/ inductive singleton' : ⦃Y : C⦄ → (Y ⟶ X) → Prop | mk : singleton' f /-- The singleton presieve. -/ def singleton : Presieve X := singleton' f lemma singleton.mk {f : Y ⟶ X} : singleton f f := singleton'.mk @[simp] theorem singleton_eq_iff_domain (f g : Y ⟶ X) : singleton f g ↔ f = g := by constructor · rintro ⟨a, rfl⟩ rfl · rintro rfl apply singleton.mk theorem singleton_self : singleton f f := singleton.mk /-- Pullback a set of arrows with given codomain along a fixed map, by taking the pullback in the category. This is not the same as the arrow set of `Sieve.pullback`, but there is a relation between them in `pullbackArrows_comm`. -/ inductive pullbackArrows [HasPullbacks C] (R : Presieve X) : Presieve Y | mk (Z : C) (h : Z ⟶ X) : R h → pullbackArrows _ (pullback.snd h f) theorem pullback_singleton [HasPullbacks C] (g : Z ⟶ X) : pullbackArrows f (singleton g) = singleton (pullback.snd g f) := by funext W ext h constructor · rintro ⟨W, _, _, _⟩ exact singleton.mk · rintro ⟨_⟩ exact pullbackArrows.mk Z g singleton.mk /-- Construct the presieve given by the family of arrows indexed by `ι`. -/ inductive ofArrows {ι : Type*} (Y : ι → C) (f : ∀ i, Y i ⟶ X) : Presieve X | mk (i : ι) : ofArrows _ _ (f i) theorem ofArrows_pUnit : (ofArrows _ fun _ : PUnit => f) = singleton f := by funext Y ext g constructor · rintro ⟨_⟩ apply singleton.mk · rintro ⟨_⟩ exact ofArrows.mk PUnit.unit theorem ofArrows_pullback [HasPullbacks C] {ι : Type*} (Z : ι → C) (g : ∀ i : ι, Z i ⟶ X) : (ofArrows (fun i => pullback (g i) f) fun i => pullback.snd _ _) = pullbackArrows f (ofArrows Z g) := by funext T ext h constructor · rintro ⟨hk⟩ exact pullbackArrows.mk _ _ (ofArrows.mk hk) · rintro ⟨W, k, hk₁⟩ cases' hk₁ with i hi apply ofArrows.mk theorem ofArrows_bind {ι : Type*} (Z : ι → C) (g : ∀ i : ι, Z i ⟶ X) (j : ∀ ⦃Y⦄ (f : Y ⟶ X), ofArrows Z g f → Type*) (W : ∀ ⦃Y⦄ (f : Y ⟶ X) (H), j f H → C) (k : ∀ ⦃Y⦄ (f : Y ⟶ X) (H i), W f H i ⟶ Y) : ((ofArrows Z g).bind fun Y f H => ofArrows (W f H) (k f H)) = ofArrows (fun i : Σi, j _ (ofArrows.mk i) => W (g i.1) _ i.2) fun ij => k (g ij.1) _ ij.2 ≫ g ij.1 := by funext Y ext f constructor · rintro ⟨_, _, _, ⟨i⟩, ⟨i'⟩, rfl⟩ exact ofArrows.mk (Sigma.mk _ _) · rintro ⟨i⟩ exact bind_comp _ (ofArrows.mk _) (ofArrows.mk _) theorem ofArrows_surj {ι : Type*} {Y : ι → C} (f : ∀ i, Y i ⟶ X) {Z : C} (g : Z ⟶ X) (hg : ofArrows Y f g) : ∃ (i : ι) (h : Y i = Z), g = eqToHom h.symm ≫ f i := by cases' hg with i exact ⟨i, rfl, by simp only [eqToHom_refl, id_comp]⟩ /-- Given a presieve on `F(X)`, we can define a presieve on `X` by taking the preimage via `F`. -/ def functorPullback (R : Presieve (F.obj X)) : Presieve X := fun _ f => R (F.map f) @[simp] theorem functorPullback_mem (R : Presieve (F.obj X)) {Y} (f : Y ⟶ X) : R.functorPullback F f ↔ R (F.map f) := Iff.rfl @[simp] theorem functorPullback_id (R : Presieve X) : R.functorPullback (𝟭 _) = R := rfl /-- Given a presieve `R` on `X`, the predicate `R.hasPullbacks` means that for all arrows `f` and `g` in `R`, the pullback of `f` and `g` exists. -/ class hasPullbacks (R : Presieve X) : Prop where /-- For all arrows `f` and `g` in `R`, the pullback of `f` and `g` exists. -/ has_pullbacks : ∀ {Y Z} {f : Y ⟶ X} (_ : R f) {g : Z ⟶ X} (_ : R g), HasPullback f g instance (R : Presieve X) [HasPullbacks C] : R.hasPullbacks := ⟨fun _ _ ↦ inferInstance⟩ instance {α : Type v₂} {X : α → C} {B : C} (π : (a : α) → X a ⟶ B) [(Presieve.ofArrows X π).hasPullbacks] (a b : α) : HasPullback (π a) (π b) := Presieve.hasPullbacks.has_pullbacks (Presieve.ofArrows.mk _) (Presieve.ofArrows.mk _) section FunctorPushforward variable {E : Type u₃} [Category.{v₃} E] (G : D ⥤ E) /-- Given a presieve on `X`, we can define a presieve on `F(X)` (which is actually a sieve) by taking the sieve generated by the image via `F`. -/ def functorPushforward (S : Presieve X) : Presieve (F.obj X) := fun Y f => ∃ (Z : C) (g : Z ⟶ X) (h : Y ⟶ F.obj Z), S g ∧ f = h ≫ F.map g -- Porting note: removed @[nolint hasNonemptyInstance] /-- An auxiliary definition in order to fix the choice of the preimages between various definitions. -/ structure FunctorPushforwardStructure (S : Presieve X) {Y} (f : Y ⟶ F.obj X) where /-- an object in the source category -/ preobj : C /-- a map in the source category which has to be in the presieve -/ premap : preobj ⟶ X /-- the morphism which appear in the factorisation -/ lift : Y ⟶ F.obj preobj /-- the condition that `premap` is in the presieve -/ cover : S premap /-- the factorisation of the morphism -/ fac : f = lift ≫ F.map premap /-- The fixed choice of a preimage. -/ noncomputable def getFunctorPushforwardStructure {F : C ⥤ D} {S : Presieve X} {Y : D} {f : Y ⟶ F.obj X} (h : S.functorPushforward F f) : FunctorPushforwardStructure F S f := by choose Z f' g h₁ h using h exact ⟨Z, f', g, h₁, h⟩ theorem functorPushforward_comp (R : Presieve X) : R.functorPushforward (F ⋙ G) = (R.functorPushforward F).functorPushforward G := by funext x ext f constructor · rintro ⟨X, f₁, g₁, h₁, rfl⟩ exact ⟨F.obj X, F.map f₁, g₁, ⟨X, f₁, 𝟙 _, h₁, by simp⟩, rfl⟩ · rintro ⟨X, f₁, g₁, ⟨X', f₂, g₂, h₁, rfl⟩, rfl⟩ exact ⟨X', f₂, g₁ ≫ G.map g₂, h₁, by simp⟩ theorem image_mem_functorPushforward (R : Presieve X) {f : Y ⟶ X} (h : R f) : R.functorPushforward F (F.map f) := ⟨Y, f, 𝟙 _, h, by simp⟩ end FunctorPushforward end Presieve /-- For an object `X` of a category `C`, a `Sieve X` is a set of morphisms to `X` which is closed under left-composition. -/ structure Sieve {C : Type u₁} [Category.{v₁} C] (X : C) where /-- the underlying presieve -/ arrows : Presieve X /-- stability by precomposition -/ downward_closed : ∀ {Y Z f} (_ : arrows f) (g : Z ⟶ Y), arrows (g ≫ f) namespace Sieve instance : CoeFun (Sieve X) fun _ => Presieve X := ⟨Sieve.arrows⟩ initialize_simps_projections Sieve (arrows → apply) variable {S R : Sieve X} attribute [simp] downward_closed theorem arrows_ext : ∀ {R S : Sieve X}, R.arrows = S.arrows → R = S := by rintro ⟨_, _⟩ ⟨_, _⟩ rfl rfl @[ext] protected theorem ext {R S : Sieve X} (h : ∀ ⦃Y⦄ (f : Y ⟶ X), R f ↔ S f) : R = S := arrows_ext <| funext fun _ => funext fun f => propext <| h f open Lattice /-- The supremum of a collection of sieves: the union of them all. -/ protected def sup (𝒮 : Set (Sieve X)) : Sieve X where arrows Y := { f | ∃ S ∈ 𝒮, Sieve.arrows S f } downward_closed {_ _ f} hf _ := by obtain ⟨S, hS, hf⟩ := hf exact ⟨S, hS, S.downward_closed hf _⟩ /-- The infimum of a collection of sieves: the intersection of them all. -/ protected def inf (𝒮 : Set (Sieve X)) : Sieve X where arrows _ := { f | ∀ S ∈ 𝒮, Sieve.arrows S f } downward_closed {_ _ _} hf g S H := S.downward_closed (hf S H) g /-- The union of two sieves is a sieve. -/ protected def union (S R : Sieve X) : Sieve X where arrows Y f := S f ∨ R f downward_closed := by rintro _ _ _ (h | h) g <;> simp [h] /-- The intersection of two sieves is a sieve. -/ protected def inter (S R : Sieve X) : Sieve X where arrows Y f := S f ∧ R f downward_closed := by rintro _ _ _ ⟨h₁, h₂⟩ g simp [h₁, h₂] /-- Sieves on an object `X` form a complete lattice. We generate this directly rather than using the galois insertion for nicer definitional properties. -/ instance : CompleteLattice (Sieve X) where le S R := ∀ ⦃Y⦄ (f : Y ⟶ X), S f → R f le_refl S f q := id le_trans S₁ S₂ S₃ S₁₂ S₂₃ Y f h := S₂₃ _ (S₁₂ _ h) le_antisymm S R p q := Sieve.ext fun Y f => ⟨p _, q _⟩ top := { arrows := fun _ => Set.univ downward_closed := fun _ _ => ⟨⟩ } bot := { arrows := fun _ => ∅ downward_closed := False.elim } sup := Sieve.union inf := Sieve.inter sSup := Sieve.sup sInf := Sieve.inf le_sSup 𝒮 S hS Y f hf := ⟨S, hS, hf⟩ sSup_le := fun s a ha Y f ⟨b, hb, hf⟩ => (ha b hb) _ hf sInf_le _ _ hS _ _ h := h _ hS le_sInf _ _ hS _ _ hf _ hR := hS _ hR _ hf le_sup_left _ _ _ _ := Or.inl le_sup_right _ _ _ _ := Or.inr sup_le _ _ _ h₁ h₂ _ f := by--ℰ S hS Y f := by rintro (hf | hf) · exact h₁ _ hf · exact h₂ _ hf inf_le_left _ _ _ _ := And.left inf_le_right _ _ _ _ := And.right le_inf _ _ _ p q _ _ z := ⟨p _ z, q _ z⟩ le_top _ _ _ _ := trivial bot_le _ _ _ := False.elim /-- The maximal sieve always exists. -/ instance sieveInhabited : Inhabited (Sieve X) := ⟨⊤⟩ @[simp] theorem sInf_apply {Ss : Set (Sieve X)} {Y} (f : Y ⟶ X) : sInf Ss f ↔ ∀ (S : Sieve X) (_ : S ∈ Ss), S f := Iff.rfl @[simp] theorem sSup_apply {Ss : Set (Sieve X)} {Y} (f : Y ⟶ X) : sSup Ss f ↔ ∃ (S : Sieve X) (_ : S ∈ Ss), S f := by simp [sSup, Sieve.sup, setOf] @[simp] theorem inter_apply {R S : Sieve X} {Y} (f : Y ⟶ X) : (R ⊓ S) f ↔ R f ∧ S f := Iff.rfl @[simp] theorem union_apply {R S : Sieve X} {Y} (f : Y ⟶ X) : (R ⊔ S) f ↔ R f ∨ S f := Iff.rfl @[simp] theorem top_apply (f : Y ⟶ X) : (⊤ : Sieve X) f := trivial /-- Generate the smallest sieve containing the given set of arrows. -/ @[simps] def generate (R : Presieve X) : Sieve X where arrows Z f := ∃ (Y : _) (h : Z ⟶ Y) (g : Y ⟶ X), R g ∧ h ≫ g = f downward_closed := by rintro Y Z _ ⟨W, g, f, hf, rfl⟩ h exact ⟨_, h ≫ g, _, hf, by simp⟩ /-- Given a presieve on `X`, and a sieve on each domain of an arrow in the presieve, we can bind to produce a sieve on `X`. -/ @[simps] def bind (S : Presieve X) (R : ∀ ⦃Y⦄ ⦃f : Y ⟶ X⦄, S f → Sieve Y) : Sieve X where arrows := S.bind fun Y f h => R h downward_closed := by rintro Y Z f ⟨W, f, h, hh, hf, rfl⟩ g exact ⟨_, g ≫ f, _, hh, by simp [hf]⟩ open Order Lattice theorem generate_le_iff (R : Presieve X) (S : Sieve X) : generate R ≤ S ↔ R ≤ S := ⟨fun H Y g hg => H _ ⟨_, 𝟙 _, _, hg, id_comp _⟩, fun ss Y f => by rintro ⟨Z, f, g, hg, rfl⟩ exact S.downward_closed (ss Z hg) f⟩ @[deprecated (since := "2024-07-13")] alias sets_iff_generate := generate_le_iff /-- Show that there is a galois insertion (generate, set_over). -/ def giGenerate : GaloisInsertion (generate : Presieve X → Sieve X) arrows where gc := generate_le_iff choice 𝒢 _ := generate 𝒢 choice_eq _ _ := rfl le_l_u _ _ _ hf := ⟨_, 𝟙 _, _, hf, id_comp _⟩ theorem le_generate (R : Presieve X) : R ≤ generate R := giGenerate.gc.le_u_l R @[simp] theorem generate_sieve (S : Sieve X) : generate S = S := giGenerate.l_u_eq S /-- If the identity arrow is in a sieve, the sieve is maximal. -/ theorem id_mem_iff_eq_top : S (𝟙 X) ↔ S = ⊤ := ⟨fun h => top_unique fun Y f _ => by simpa using downward_closed _ h f, fun h => h.symm ▸ trivial⟩ /-- If an arrow set contains a split epi, it generates the maximal sieve. -/ theorem generate_of_contains_isSplitEpi {R : Presieve X} (f : Y ⟶ X) [IsSplitEpi f] (hf : R f) : generate R = ⊤ := by rw [← id_mem_iff_eq_top] exact ⟨_, section_ f, f, hf, by simp⟩ @[simp] theorem generate_of_singleton_isSplitEpi (f : Y ⟶ X) [IsSplitEpi f] : generate (Presieve.singleton f) = ⊤ := generate_of_contains_isSplitEpi f (Presieve.singleton_self _) @[simp] theorem generate_top : generate (⊤ : Presieve X) = ⊤ := generate_of_contains_isSplitEpi (𝟙 _) ⟨⟩ @[simp] lemma comp_mem_iff (i : X ⟶ Y) (f : Y ⟶ Z) [IsIso i] (S : Sieve Z) : S (i ≫ f) ↔ S f := by refine ⟨fun H ↦ ?_, fun H ↦ S.downward_closed H _⟩ convert S.downward_closed H (inv i) simp /-- The sieve of `X` generated by family of morphisms `Y i ⟶ X`. -/ abbrev ofArrows {I : Type*} {X : C} (Y : I → C) (f : ∀ i, Y i ⟶ X) : Sieve X := generate (Presieve.ofArrows Y f) lemma ofArrows_mk {I : Type*} {X : C} (Y : I → C) (f : ∀ i, Y i ⟶ X) (i : I) : ofArrows Y f (f i) := ⟨_, 𝟙 _, _, ⟨i⟩, by simp⟩ lemma mem_ofArrows_iff {I : Type*} {X : C} (Y : I → C) (f : ∀ i, Y i ⟶ X) {W : C} (g : W ⟶ X) : ofArrows Y f g ↔ ∃ (i : I) (a : W ⟶ Y i), g = a ≫ f i := by constructor · rintro ⟨T, a, b, ⟨i⟩, rfl⟩ exact ⟨i, a, rfl⟩ · rintro ⟨i, a, rfl⟩ apply downward_closed _ (ofArrows_mk Y f i) /-- The sieve of `X : C` that is generated by a family of objects `Y : I → C`: it consists of morphisms to `X` which factor through at least one of the `Y i`. -/ def ofObjects {I : Type*} (Y : I → C) (X : C) : Sieve X where arrows Z _ := ∃ (i : I), Nonempty (Z ⟶ Y i) downward_closed := by rintro Z₁ Z₂ p ⟨i, ⟨f⟩⟩ g exact ⟨i, ⟨g ≫ f⟩⟩ lemma mem_ofObjects_iff {I : Type*} (Y : I → C) {Z X : C} (g : Z ⟶ X) : ofObjects Y X g ↔ ∃ (i : I), Nonempty (Z ⟶ Y i) := by rfl lemma ofArrows_le_ofObjects {I : Type*} (Y : I → C) {X : C} (f : ∀ i, Y i ⟶ X) : Sieve.ofArrows Y f ≤ Sieve.ofObjects Y X := by intro W g hg rw [mem_ofArrows_iff] at hg obtain ⟨i, a, rfl⟩ := hg exact ⟨i, ⟨a⟩⟩ lemma ofArrows_eq_ofObjects {X : C} (hX : IsTerminal X) {I : Type*} (Y : I → C) (f : ∀ i, Y i ⟶ X) : ofArrows Y f = ofObjects Y X := by refine le_antisymm (ofArrows_le_ofObjects Y f) (fun W g => ?_) rw [mem_ofArrows_iff, mem_ofObjects_iff] rintro ⟨i, ⟨h⟩⟩ exact ⟨i, h, hX.hom_ext _ _⟩ /-- Given a morphism `h : Y ⟶ X`, send a sieve S on X to a sieve on Y as the inverse image of S with `_ ≫ h`. That is, `Sieve.pullback S h := (≫ h) '⁻¹ S`. -/ @[simps] def pullback (h : Y ⟶ X) (S : Sieve X) : Sieve Y where arrows Y sl := S (sl ≫ h) downward_closed g := by simp [g] @[simp] theorem pullback_id : S.pullback (𝟙 _) = S := by simp [Sieve.ext_iff] @[simp] theorem pullback_top {f : Y ⟶ X} : (⊤ : Sieve X).pullback f = ⊤ := top_unique fun _ _ => id theorem pullback_comp {f : Y ⟶ X} {g : Z ⟶ Y} (S : Sieve X) : S.pullback (g ≫ f) = (S.pullback f).pullback g := by simp [Sieve.ext_iff] @[simp] theorem pullback_inter {f : Y ⟶ X} (S R : Sieve X) : (S ⊓ R).pullback f = S.pullback f ⊓ R.pullback f := by simp [Sieve.ext_iff] theorem pullback_eq_top_iff_mem (f : Y ⟶ X) : S f ↔ S.pullback f = ⊤ := by rw [← id_mem_iff_eq_top, pullback_apply, id_comp] theorem pullback_eq_top_of_mem (S : Sieve X) {f : Y ⟶ X} : S f → S.pullback f = ⊤ := (pullback_eq_top_iff_mem f).1 lemma pullback_ofObjects_eq_top {I : Type*} (Y : I → C) {X : C} {i : I} (g : X ⟶ Y i) : ofObjects Y X = ⊤ := by ext Z h simp only [top_apply, iff_true] rw [mem_ofObjects_iff ] exact ⟨i, ⟨h ≫ g⟩⟩ /-- Push a sieve `R` on `Y` forward along an arrow `f : Y ⟶ X`: `gf : Z ⟶ X` is in the sieve if `gf` factors through some `g : Z ⟶ Y` which is in `R`. -/ @[simps] def pushforward (f : Y ⟶ X) (R : Sieve Y) : Sieve X where arrows Z gf := ∃ g, g ≫ f = gf ∧ R g downward_closed := fun ⟨j, k, z⟩ h => ⟨h ≫ j, by simp [k], by simp [z]⟩ theorem pushforward_apply_comp {R : Sieve Y} {Z : C} {g : Z ⟶ Y} (hg : R g) (f : Y ⟶ X) : R.pushforward f (g ≫ f) := ⟨g, rfl, hg⟩ theorem pushforward_comp {f : Y ⟶ X} {g : Z ⟶ Y} (R : Sieve Z) : R.pushforward (g ≫ f) = (R.pushforward g).pushforward f := Sieve.ext fun W h => ⟨fun ⟨f₁, hq, hf₁⟩ => ⟨f₁ ≫ g, by simpa, f₁, rfl, hf₁⟩, fun ⟨y, hy, z, hR, hz⟩ => ⟨z, by rw [← Category.assoc, hR]; tauto⟩⟩ theorem galoisConnection (f : Y ⟶ X) : GaloisConnection (Sieve.pushforward f) (Sieve.pullback f) := fun _ _ => ⟨fun hR _ g hg => hR _ ⟨g, rfl, hg⟩, fun hS _ _ ⟨h, hg, hh⟩ => hg ▸ hS h hh⟩ theorem pullback_monotone (f : Y ⟶ X) : Monotone (Sieve.pullback f) := (galoisConnection f).monotone_u theorem pushforward_monotone (f : Y ⟶ X) : Monotone (Sieve.pushforward f) := (galoisConnection f).monotone_l theorem le_pushforward_pullback (f : Y ⟶ X) (R : Sieve Y) : R ≤ (R.pushforward f).pullback f := (galoisConnection f).le_u_l _ theorem pullback_pushforward_le (f : Y ⟶ X) (R : Sieve X) : (R.pullback f).pushforward f ≤ R := (galoisConnection f).l_u_le _ theorem pushforward_union {f : Y ⟶ X} (S R : Sieve Y) : (S ⊔ R).pushforward f = S.pushforward f ⊔ R.pushforward f := (galoisConnection f).l_sup theorem pushforward_le_bind_of_mem (S : Presieve X) (R : ∀ ⦃Y : C⦄ ⦃f : Y ⟶ X⦄, S f → Sieve Y) (f : Y ⟶ X) (h : S f) : (R h).pushforward f ≤ bind S R := by rintro Z _ ⟨g, rfl, hg⟩ exact ⟨_, g, f, h, hg, rfl⟩ theorem le_pullback_bind (S : Presieve X) (R : ∀ ⦃Y : C⦄ ⦃f : Y ⟶ X⦄, S f → Sieve Y) (f : Y ⟶ X) (h : S f) : R h ≤ (bind S R).pullback f := by rw [← galoisConnection f] apply pushforward_le_bind_of_mem /-- If `f` is a monomorphism, the pushforward-pullback adjunction on sieves is coreflective. -/ def galoisCoinsertionOfMono (f : Y ⟶ X) [Mono f] : GaloisCoinsertion (Sieve.pushforward f) (Sieve.pullback f) := by apply (galoisConnection f).toGaloisCoinsertion rintro S Z g ⟨g₁, hf, hg₁⟩ rw [cancel_mono f] at hf rwa [← hf] /-- If `f` is a split epi, the pushforward-pullback adjunction on sieves is reflective. -/ def galoisInsertionOfIsSplitEpi (f : Y ⟶ X) [IsSplitEpi f] : GaloisInsertion (Sieve.pushforward f) (Sieve.pullback f) := by apply (galoisConnection f).toGaloisInsertion intro S Z g hg exact ⟨g ≫ section_ f, by simpa⟩ theorem pullbackArrows_comm [HasPullbacks C] {X Y : C} (f : Y ⟶ X) (R : Presieve X) : Sieve.generate (R.pullbackArrows f) = (Sieve.generate R).pullback f := by ext W g constructor · rintro ⟨_, h, k, hk, rfl⟩ cases' hk with W g hg change (Sieve.generate R).pullback f (h ≫ pullback.snd g f) rw [Sieve.pullback_apply, assoc, ← pullback.condition, ← assoc] exact Sieve.downward_closed _ (by exact Sieve.le_generate R W hg) (h ≫ pullback.fst g f) · rintro ⟨W, h, k, hk, comm⟩ exact ⟨_, _, _, Presieve.pullbackArrows.mk _ _ hk, pullback.lift_snd _ _ comm⟩ section Functor variable {E : Type u₃} [Category.{v₃} E] (G : D ⥤ E) /-- If `R` is a sieve, then the `CategoryTheory.Presieve.functorPullback` of `R` is actually a sieve. -/ @[simps] def functorPullback (R : Sieve (F.obj X)) : Sieve X where arrows := Presieve.functorPullback F R downward_closed := by intro _ _ f hf g unfold Presieve.functorPullback rw [F.map_comp] exact R.downward_closed hf (F.map g) @[simp] theorem functorPullback_arrows (R : Sieve (F.obj X)) : (R.functorPullback F).arrows = R.arrows.functorPullback F := rfl @[simp] theorem functorPullback_id (R : Sieve X) : R.functorPullback (𝟭 _) = R := by ext rfl theorem functorPullback_comp (R : Sieve ((F ⋙ G).obj X)) : R.functorPullback (F ⋙ G) = (R.functorPullback G).functorPullback F := by ext rfl theorem functorPushforward_extend_eq {R : Presieve X} : (generate R).arrows.functorPushforward F = R.functorPushforward F := by funext Y ext f constructor · rintro ⟨X', g, f', ⟨X'', g', f'', h₁, rfl⟩, rfl⟩ exact ⟨X'', f'', f' ≫ F.map g', h₁, by simp⟩ · rintro ⟨X', g, f', h₁, h₂⟩ exact ⟨X', g, f', le_generate R _ h₁, h₂⟩ /-- The sieve generated by the image of `R` under `F`. -/ @[simps] def functorPushforward (R : Sieve X) : Sieve (F.obj X) where arrows := R.arrows.functorPushforward F downward_closed := by intro _ _ f h g obtain ⟨X, α, β, hα, rfl⟩ := h exact ⟨X, α, g ≫ β, hα, by simp⟩ @[simp] theorem functorPushforward_id (R : Sieve X) : R.functorPushforward (𝟭 _) = R := by ext X f constructor · intro hf obtain ⟨X, g, h, hg, rfl⟩ := hf exact R.downward_closed hg h · intro hf exact ⟨X, f, 𝟙 _, hf, by simp⟩ theorem functorPushforward_comp (R : Sieve X) : R.functorPushforward (F ⋙ G) = (R.functorPushforward F).functorPushforward G := by ext simp [R.arrows.functorPushforward_comp F G] theorem functor_galoisConnection (X : C) : GaloisConnection (Sieve.functorPushforward F : Sieve X → Sieve (F.obj X)) (Sieve.functorPullback F) := by intro R S constructor · intro hle X f hf apply hle refine ⟨X, f, 𝟙 _, hf, ?_⟩ rw [id_comp] · rintro hle Y f ⟨X, g, h, hg, rfl⟩ apply Sieve.downward_closed S exact hle g hg theorem functorPullback_monotone (X : C) : Monotone (Sieve.functorPullback F : Sieve (F.obj X) → Sieve X) := (functor_galoisConnection F X).monotone_u theorem functorPushforward_monotone (X : C) : Monotone (Sieve.functorPushforward F : Sieve X → Sieve (F.obj X)) := (functor_galoisConnection F X).monotone_l theorem le_functorPushforward_pullback (R : Sieve X) : R ≤ (R.functorPushforward F).functorPullback F := (functor_galoisConnection F X).le_u_l _ theorem functorPullback_pushforward_le (R : Sieve (F.obj X)) : (R.functorPullback F).functorPushforward F ≤ R := (functor_galoisConnection F X).l_u_le _ theorem functorPushforward_union (S R : Sieve X) : (S ⊔ R).functorPushforward F = S.functorPushforward F ⊔ R.functorPushforward F := (functor_galoisConnection F X).l_sup theorem functorPullback_union (S R : Sieve (F.obj X)) : (S ⊔ R).functorPullback F = S.functorPullback F ⊔ R.functorPullback F := rfl theorem functorPullback_inter (S R : Sieve (F.obj X)) : (S ⊓ R).functorPullback F = S.functorPullback F ⊓ R.functorPullback F := rfl @[simp] theorem functorPushforward_bot (F : C ⥤ D) (X : C) : (⊥ : Sieve X).functorPushforward F = ⊥ := (functor_galoisConnection F X).l_bot @[simp] theorem functorPushforward_top (F : C ⥤ D) (X : C) : (⊤ : Sieve X).functorPushforward F = ⊤ := by refine (generate_sieve _).symm.trans ?_ apply generate_of_contains_isSplitEpi (𝟙 (F.obj X)) exact ⟨X, 𝟙 _, 𝟙 _, trivial, by simp⟩ @[simp] theorem functorPullback_bot (F : C ⥤ D) (X : C) : (⊥ : Sieve (F.obj X)).functorPullback F = ⊥ := rfl @[simp] theorem functorPullback_top (F : C ⥤ D) (X : C) : (⊤ : Sieve (F.obj X)).functorPullback F = ⊤ := rfl theorem image_mem_functorPushforward (R : Sieve X) {V} {f : V ⟶ X} (h : R f) : R.functorPushforward F (F.map f) := ⟨V, f, 𝟙 _, h, by simp⟩ /-- When `F` is essentially surjective and full, the galois connection is a galois insertion. -/ def essSurjFullFunctorGaloisInsertion [F.EssSurj] [F.Full] (X : C) : GaloisInsertion (Sieve.functorPushforward F : Sieve X → Sieve (F.obj X)) (Sieve.functorPullback F) := by apply (functor_galoisConnection F X).toGaloisInsertion intro S Y f hf refine ⟨_, F.preimage ((F.objObjPreimageIso Y).hom ≫ f), (F.objObjPreimageIso Y).inv, ?_⟩ simpa using hf /-- When `F` is fully faithful, the galois connection is a galois coinsertion. -/ def fullyFaithfulFunctorGaloisCoinsertion [F.Full] [F.Faithful] (X : C) : GaloisCoinsertion (Sieve.functorPushforward F : Sieve X → Sieve (F.obj X)) (Sieve.functorPullback F) := by apply (functor_galoisConnection F X).toGaloisCoinsertion rintro S Y f ⟨Z, g, h, h₁, h₂⟩ rw [← F.map_preimage h, ← F.map_comp] at h₂ rw [F.map_injective h₂] exact S.downward_closed h₁ _ lemma functorPushforward_functor (S : Sieve X) (e : C ≌ D) : S.functorPushforward e.functor = (S.pullback (e.unitInv.app X)).functorPullback e.inverse := by ext Y iYX constructor · rintro ⟨Z, iZX, iYZ, hiZX, rfl⟩ simpa using S.downward_closed hiZX (e.inverse.map iYZ ≫ e.unitInv.app Z) · intro H exact ⟨_, e.inverse.map iYX ≫ e.unitInv.app X, e.counitInv.app Y, by simpa using H, by simp⟩ @[simp] lemma mem_functorPushforward_functor {Y : D} {S : Sieve X} {e : C ≌ D} {f : Y ⟶ e.functor.obj X} : S.functorPushforward e.functor f ↔ S (e.inverse.map f ≫ e.unitInv.app X) := congr($(S.functorPushforward_functor e).arrows f) lemma functorPushforward_inverse {X : D} (S : Sieve X) (e : C ≌ D) : S.functorPushforward e.inverse = (S.pullback (e.counit.app X)).functorPullback e.functor := Sieve.functorPushforward_functor S e.symm @[simp] lemma mem_functorPushforward_inverse {X : D} {S : Sieve X} {e : C ≌ D} {f : Y ⟶ e.inverse.obj X} : S.functorPushforward e.inverse f ↔ S (e.functor.map f ≫ e.counit.app X) := congr($(S.functorPushforward_inverse e).arrows f) variable (e : C ≌ D) lemma functorPushforward_equivalence_eq_pullback {U : C} (S : Sieve U) : Sieve.functorPushforward e.inverse (Sieve.functorPushforward e.functor S) = Sieve.pullback (e.unitInv.app U) S := by ext; simp lemma pullback_functorPushforward_equivalence_eq {X : C} (S : Sieve X) : Sieve.pullback (e.unit.app X) (Sieve.functorPushforward e.inverse (Sieve.functorPushforward e.functor S)) = S := by ext; simp end Functor /-- A sieve induces a presheaf. -/ @[simps] def functor (S : Sieve X) : Cᵒᵖ ⥤ Type v₁ where obj Y := { g : Y.unop ⟶ X // S g } map f g := ⟨f.unop ≫ g.1, downward_closed _ g.2 _⟩ /-- If a sieve S is contained in a sieve T, then we have a morphism of presheaves on their induced presheaves. -/ @[simps] def natTransOfLe {S T : Sieve X} (h : S ≤ T) : S.functor ⟶ T.functor where app Y f := ⟨f.1, h _ f.2⟩ /-- The natural inclusion from the functor induced by a sieve to the yoneda embedding. -/ @[simps] def functorInclusion (S : Sieve X) : S.functor ⟶ yoneda.obj X where app Y f := f.1 theorem natTransOfLe_comm {S T : Sieve X} (h : S ≤ T) : natTransOfLe h ≫ functorInclusion _ = functorInclusion _ := rfl /-- The presheaf induced by a sieve is a subobject of the yoneda embedding. -/ instance functorInclusion_is_mono : Mono S.functorInclusion := ⟨fun f g h => by ext Y y simpa [Subtype.ext_iff_val] using congr_fun (NatTrans.congr_app h Y) y⟩ -- TODO: Show that when `f` is mono, this is right inverse to `functorInclusion` up to isomorphism. /-- A natural transformation to a representable functor induces a sieve. This is the left inverse of `functorInclusion`, shown in `sieveOfSubfunctor_functorInclusion`. -/ @[simps] def sieveOfSubfunctor {R} (f : R ⟶ yoneda.obj X) : Sieve X where arrows Y g := ∃ t, f.app (Opposite.op Y) t = g downward_closed := by rintro Y Z _ ⟨t, rfl⟩ g refine ⟨R.map g.op t, ?_⟩ rw [FunctorToTypes.naturality _ _ f] simp theorem sieveOfSubfunctor_functorInclusion : sieveOfSubfunctor S.functorInclusion = S := by ext simp only [functorInclusion_app, sieveOfSubfunctor_apply] constructor · rintro ⟨⟨f, hf⟩, rfl⟩ exact hf · intro hf exact ⟨⟨_, hf⟩, rfl⟩ instance functorInclusion_top_isIso : IsIso (⊤ : Sieve X).functorInclusion := ⟨⟨{ app := fun Y a => ⟨a, ⟨⟩⟩ }, rfl, rfl⟩⟩ end Sieve end CategoryTheory
CategoryTheory\Sites\Spaces.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import Mathlib.CategoryTheory.Sites.Grothendieck import Mathlib.CategoryTheory.Sites.Pretopology import Mathlib.CategoryTheory.Limits.Lattice import Mathlib.Topology.Sets.Opens /-! # Grothendieck topology on a topological space Define the Grothendieck topology and the pretopology associated to a topological space, and show that the pretopology induces the topology. The covering (pre)sieves on `X` are those for which the union of domains contains `X`. ## Tags site, Grothendieck topology, space ## References * [nLab, *Grothendieck topology*](https://ncatlab.org/nlab/show/Grothendieck+topology) * [S. MacLane, I. Moerdijk, *Sheaves in Geometry and Logic*][MM92] ## Implementation notes We define the two separately, rather than defining the Grothendieck topology as that generated by the pretopology for the purpose of having nice definitional properties for the sieves. -/ universe u namespace Opens variable (T : Type u) [TopologicalSpace T] open CategoryTheory TopologicalSpace CategoryTheory.Limits /-- The Grothendieck topology associated to a topological space. -/ def grothendieckTopology : GrothendieckTopology (Opens T) where sieves X S := ∀ x ∈ X, ∃ (U : _) (f : U ⟶ X), S f ∧ x ∈ U top_mem' X x hx := ⟨_, 𝟙 _, trivial, hx⟩ pullback_stable' X Y S f hf y hy := by rcases hf y (f.le hy) with ⟨U, g, hg, hU⟩ refine ⟨U ⊓ Y, homOfLE inf_le_right, ?_, hU, hy⟩ apply S.downward_closed hg (homOfLE inf_le_left) transitive' X S hS R hR x hx := by rcases hS x hx with ⟨U, f, hf, hU⟩ rcases hR hf _ hU with ⟨V, g, hg, hV⟩ exact ⟨_, g ≫ f, hg, hV⟩ /-- The Grothendieck pretopology associated to a topological space. -/ def pretopology : Pretopology (Opens T) where coverings X R := ∀ x ∈ X, ∃ (U : _) (f : U ⟶ X), R f ∧ x ∈ U has_isos X Y f i x hx := ⟨_, _, Presieve.singleton_self _, (inv f).le hx⟩ pullbacks X Y f S hS x hx := by rcases hS _ (f.le hx) with ⟨U, g, hg, hU⟩ refine ⟨_, _, Presieve.pullbackArrows.mk _ _ hg, ?_⟩ have : U ⊓ Y ≤ pullback g f := leOfHom (pullback.lift (homOfLE inf_le_left) (homOfLE inf_le_right) rfl) apply this ⟨hU, hx⟩ transitive X S Ti hS hTi x hx := by rcases hS x hx with ⟨U, f, hf, hU⟩ rcases hTi f hf x hU with ⟨V, g, hg, hV⟩ exact ⟨_, _, ⟨_, g, f, hf, hg, rfl⟩, hV⟩ /-- The pretopology associated to a space is the largest pretopology that generates the Grothendieck topology associated to the space. -/ @[simp] theorem pretopology_ofGrothendieck : Pretopology.ofGrothendieck _ (Opens.grothendieckTopology T) = Opens.pretopology T := by apply le_antisymm · intro X R hR x hx rcases hR x hx with ⟨U, f, ⟨V, g₁, g₂, hg₂, _⟩, hU⟩ exact ⟨V, g₂, hg₂, g₁.le hU⟩ · intro X R hR x hx rcases hR x hx with ⟨U, f, hf, hU⟩ exact ⟨U, f, Sieve.le_generate R U hf, hU⟩ /-- The pretopology associated to a space induces the Grothendieck topology associated to the space. -/ @[simp] theorem pretopology_toGrothendieck : Pretopology.toGrothendieck _ (Opens.pretopology T) = Opens.grothendieckTopology T := by rw [← pretopology_ofGrothendieck] apply (Pretopology.gi (Opens T)).l_u_eq end Opens
CategoryTheory\Sites\Subsheaf.lean
/- Copyright (c) 2022 Andrew Yang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Andrew Yang -/ import Mathlib.CategoryTheory.Elementwise import Mathlib.CategoryTheory.Adjunction.Evaluation import Mathlib.Tactic.CategoryTheory.Elementwise import Mathlib.CategoryTheory.Adhesive import Mathlib.CategoryTheory.Sites.ConcreteSheafification /-! # Subsheaf of types We define the sub(pre)sheaf of a type valued presheaf. ## Main results - `CategoryTheory.GrothendieckTopology.Subpresheaf` : A subpresheaf of a presheaf of types. - `CategoryTheory.GrothendieckTopology.Subpresheaf.sheafify` : The sheafification of a subpresheaf as a subpresheaf. Note that this is a sheaf only when the whole sheaf is. - `CategoryTheory.GrothendieckTopology.Subpresheaf.sheafify_isSheaf` : The sheafification is a sheaf - `CategoryTheory.GrothendieckTopology.Subpresheaf.sheafifyLift` : The descent of a map into a sheaf to the sheafification. - `CategoryTheory.GrothendieckTopology.imageSheaf` : The image sheaf of a morphism. - `CategoryTheory.GrothendieckTopology.imageFactorization` : The image sheaf as a `Limits.imageFactorization`. -/ universe w v u open Opposite CategoryTheory namespace CategoryTheory.GrothendieckTopology variable {C : Type u} [Category.{v} C] (J : GrothendieckTopology C) /-- A subpresheaf of a presheaf consists of a subset of `F.obj U` for every `U`, compatible with the restriction maps `F.map i`. -/ @[ext] structure Subpresheaf (F : Cᵒᵖ ⥤ Type w) where /-- If `G` is a sub-presheaf of `F`, then the sections of `G` on `U` forms a subset of sections of `F` on `U`. -/ obj : ∀ U, Set (F.obj U) /-- If `G` is a sub-presheaf of `F` and `i : U ⟶ V`, then for each `G`-sections on `U` `x`, `F i x` is in `F(V)`. -/ map : ∀ {U V : Cᵒᵖ} (i : U ⟶ V), obj U ⊆ F.map i ⁻¹' obj V variable {F F' F'' : Cᵒᵖ ⥤ Type w} (G G' : Subpresheaf F) instance : PartialOrder (Subpresheaf F) := PartialOrder.lift Subpresheaf.obj (fun _ _ => Subpresheaf.ext) instance : Top (Subpresheaf F) := ⟨⟨fun U => ⊤, @fun U V _ x _ => by aesop_cat⟩⟩ instance : Nonempty (Subpresheaf F) := inferInstance /-- The subpresheaf as a presheaf. -/ @[simps!] def Subpresheaf.toPresheaf : Cᵒᵖ ⥤ Type w where obj U := G.obj U map := @fun U V i x => ⟨F.map i x, G.map i x.prop⟩ map_id X := by ext ⟨x, _⟩ dsimp simp only [FunctorToTypes.map_id_apply] map_comp := @fun X Y Z i j => by ext ⟨x, _⟩ dsimp simp only [FunctorToTypes.map_comp_apply] instance {U} : CoeHead (G.toPresheaf.obj U) (F.obj U) where coe := Subtype.val /-- The inclusion of a subpresheaf to the original presheaf. -/ @[simps] def Subpresheaf.ι : G.toPresheaf ⟶ F where app U x := x instance : Mono G.ι := ⟨@fun _ _ _ e => NatTrans.ext <| funext fun U => funext fun x => Subtype.ext <| congr_fun (congr_app e U) x⟩ /-- The inclusion of a subpresheaf to a larger subpresheaf -/ @[simps] def Subpresheaf.homOfLe {G G' : Subpresheaf F} (h : G ≤ G') : G.toPresheaf ⟶ G'.toPresheaf where app U x := ⟨x, h U x.prop⟩ instance {G G' : Subpresheaf F} (h : G ≤ G') : Mono (Subpresheaf.homOfLe h) := ⟨fun _ _ e => NatTrans.ext <| funext fun U => funext fun x => Subtype.ext <| (congr_arg Subtype.val <| (congr_fun (congr_app e U) x : _) : _)⟩ @[reassoc (attr := simp)] theorem Subpresheaf.homOfLe_ι {G G' : Subpresheaf F} (h : G ≤ G') : Subpresheaf.homOfLe h ≫ G'.ι = G.ι := by ext rfl instance : IsIso (Subpresheaf.ι (⊤ : Subpresheaf F)) := by refine @NatIso.isIso_of_isIso_app _ _ _ _ _ _ _ ?_ intro X rw [isIso_iff_bijective] exact ⟨Subtype.coe_injective, fun x => ⟨⟨x, _root_.trivial⟩, rfl⟩⟩ theorem Subpresheaf.eq_top_iff_isIso : G = ⊤ ↔ IsIso G.ι := by constructor · rintro rfl infer_instance · intro H ext U x apply iff_true_iff.mpr rw [← IsIso.inv_hom_id_apply (G.ι.app U) x] exact ((inv (G.ι.app U)) x).2 /-- If the image of a morphism falls in a subpresheaf, then the morphism factors through it. -/ @[simps!] def Subpresheaf.lift (f : F' ⟶ F) (hf : ∀ U x, f.app U x ∈ G.obj U) : F' ⟶ G.toPresheaf where app U x := ⟨f.app U x, hf U x⟩ naturality := by have := elementwise_of% f.naturality intros refine funext fun x => Subtype.ext ?_ simp only [toPresheaf_obj, types_comp_apply] exact this _ _ @[reassoc (attr := simp)] theorem Subpresheaf.lift_ι (f : F' ⟶ F) (hf : ∀ U x, f.app U x ∈ G.obj U) : G.lift f hf ≫ G.ι = f := by ext rfl /-- Given a subpresheaf `G` of `F`, an `F`-section `s` on `U`, we may define a sieve of `U` consisting of all `f : V ⟶ U` such that the restriction of `s` along `f` is in `G`. -/ @[simps] def Subpresheaf.sieveOfSection {U : Cᵒᵖ} (s : F.obj U) : Sieve (unop U) where arrows V f := F.map f.op s ∈ G.obj (op V) downward_closed := @fun V W i hi j => by simp only [op_unop, op_comp, FunctorToTypes.map_comp_apply] exact G.map _ hi /-- Given an `F`-section `s` on `U` and a subpresheaf `G`, we may define a family of elements in `G` consisting of the restrictions of `s` -/ def Subpresheaf.familyOfElementsOfSection {U : Cᵒᵖ} (s : F.obj U) : (G.sieveOfSection s).1.FamilyOfElements G.toPresheaf := fun _ i hi => ⟨F.map i.op s, hi⟩ theorem Subpresheaf.family_of_elements_compatible {U : Cᵒᵖ} (s : F.obj U) : (G.familyOfElementsOfSection s).Compatible := by intro Y₁ Y₂ Z g₁ g₂ f₁ f₂ h₁ h₂ e refine Subtype.ext ?_ -- Porting note: `ext1` does not work here change F.map g₁.op (F.map f₁.op s) = F.map g₂.op (F.map f₂.op s) rw [← FunctorToTypes.map_comp_apply, ← FunctorToTypes.map_comp_apply, ← op_comp, ← op_comp, e] theorem Subpresheaf.nat_trans_naturality (f : F' ⟶ G.toPresheaf) {U V : Cᵒᵖ} (i : U ⟶ V) (x : F'.obj U) : (f.app V (F'.map i x)).1 = F.map i (f.app U x).1 := congr_arg Subtype.val (FunctorToTypes.naturality _ _ f i x) /-- The sheafification of a subpresheaf as a subpresheaf. Note that this is a sheaf only when the whole presheaf is a sheaf. -/ def Subpresheaf.sheafify : Subpresheaf F where obj U := { s | G.sieveOfSection s ∈ J (unop U) } map := by rintro U V i s hs refine J.superset_covering ?_ (J.pullback_stable i.unop hs) intro _ _ h dsimp at h ⊢ rwa [← FunctorToTypes.map_comp_apply] theorem Subpresheaf.le_sheafify : G ≤ G.sheafify J := by intro U s hs change _ ∈ J _ convert J.top_mem U.unop -- Porting note: `U.unop` can not be inferred now rw [eq_top_iff] rintro V i - exact G.map i.op hs variable {J} theorem Subpresheaf.eq_sheafify (h : Presieve.IsSheaf J F) (hG : Presieve.IsSheaf J G.toPresheaf) : G = G.sheafify J := by apply (G.le_sheafify J).antisymm intro U s hs suffices ((hG _ hs).amalgamate _ (G.family_of_elements_compatible s)).1 = s by rw [← this] exact ((hG _ hs).amalgamate _ (G.family_of_elements_compatible s)).2 apply (h _ hs).isSeparatedFor.ext intro V i hi exact (congr_arg Subtype.val ((hG _ hs).valid_glue (G.family_of_elements_compatible s) _ hi) : _) theorem Subpresheaf.sheafify_isSheaf (hF : Presieve.IsSheaf J F) : Presieve.IsSheaf J (G.sheafify J).toPresheaf := by intro U S hS x hx let S' := Sieve.bind S fun Y f hf => G.sieveOfSection (x f hf).1 have := fun (V) (i : V ⟶ U) (hi : S' i) => hi -- Porting note: change to explicit variable so that `choose` can find the correct -- dependent functions. Thus everything follows need two additional explicit variables. choose W i₁ i₂ hi₂ h₁ h₂ using this dsimp [-Sieve.bind_apply] at * let x'' : Presieve.FamilyOfElements F S' := fun V i hi => F.map (i₁ V i hi).op (x _ (hi₂ V i hi)) have H : ∀ s, x.IsAmalgamation s ↔ x''.IsAmalgamation s.1 := by intro s constructor · intro H V i hi dsimp only [x''] conv_lhs => rw [← h₂ _ _ hi] rw [← H _ (hi₂ _ _ hi)] exact FunctorToTypes.map_comp_apply F (i₂ _ _ hi).op (i₁ _ _ hi).op _ · intro H V i hi refine Subtype.ext ?_ apply (hF _ (x i hi).2).isSeparatedFor.ext intro V' i' hi' have hi'' : S' (i' ≫ i) := ⟨_, _, _, hi, hi', rfl⟩ have := H _ hi'' rw [op_comp, F.map_comp] at this exact this.trans (congr_arg Subtype.val (hx _ _ (hi₂ _ _ hi'') hi (h₂ _ _ hi''))) have : x''.Compatible := by intro V₁ V₂ V₃ g₁ g₂ g₃ g₄ S₁ S₂ e rw [← FunctorToTypes.map_comp_apply, ← FunctorToTypes.map_comp_apply] exact congr_arg Subtype.val (hx (g₁ ≫ i₁ _ _ S₁) (g₂ ≫ i₁ _ _ S₂) (hi₂ _ _ S₁) (hi₂ _ _ S₂) (by simp only [Category.assoc, h₂, e])) obtain ⟨t, ht, ht'⟩ := hF _ (J.bind_covering hS fun V i hi => (x i hi).2) _ this refine ⟨⟨t, _⟩, (H ⟨t, ?_⟩).mpr ht, fun y hy => Subtype.ext (ht' _ ((H _).mp hy))⟩ refine J.superset_covering ?_ (J.bind_covering hS fun V i hi => (x i hi).2) intro V i hi dsimp rw [ht _ hi] exact h₁ _ _ hi theorem Subpresheaf.eq_sheafify_iff (h : Presieve.IsSheaf J F) : G = G.sheafify J ↔ Presieve.IsSheaf J G.toPresheaf := ⟨fun e => e.symm ▸ G.sheafify_isSheaf h, G.eq_sheafify h⟩ theorem Subpresheaf.isSheaf_iff (h : Presieve.IsSheaf J F) : Presieve.IsSheaf J G.toPresheaf ↔ ∀ (U) (s : F.obj U), G.sieveOfSection s ∈ J (unop U) → s ∈ G.obj U := by rw [← G.eq_sheafify_iff h] change _ ↔ G.sheafify J ≤ G exact ⟨Eq.ge, (G.le_sheafify J).antisymm⟩ theorem Subpresheaf.sheafify_sheafify (h : Presieve.IsSheaf J F) : (G.sheafify J).sheafify J = G.sheafify J := ((Subpresheaf.eq_sheafify_iff _ h).mpr <| G.sheafify_isSheaf h).symm /-- The lift of a presheaf morphism onto the sheafification subpresheaf. -/ noncomputable def Subpresheaf.sheafifyLift (f : G.toPresheaf ⟶ F') (h : Presieve.IsSheaf J F') : (G.sheafify J).toPresheaf ⟶ F' where app U s := (h (G.sieveOfSection s.1) s.prop).amalgamate (_) ((G.family_of_elements_compatible s.1).compPresheafMap f) naturality := by intro U V i ext s apply (h _ ((Subpresheaf.sheafify J G).toPresheaf.map i s).prop).isSeparatedFor.ext intro W j hj refine (Presieve.IsSheafFor.valid_glue (h _ ((G.sheafify J).toPresheaf.map i s).2) ((G.family_of_elements_compatible _).compPresheafMap _) _ hj).trans ?_ dsimp conv_rhs => rw [← FunctorToTypes.map_comp_apply] change _ = F'.map (j ≫ i.unop).op _ refine Eq.trans ?_ (Presieve.IsSheafFor.valid_glue (h _ s.2) ((G.family_of_elements_compatible s.1).compPresheafMap f) (j ≫ i.unop) ?_).symm swap -- Porting note: need to swap two goals otherwise the first goal needs to be proven -- inside the second goal any way · dsimp [Presieve.FamilyOfElements.compPresheafMap] at hj ⊢ rwa [FunctorToTypes.map_comp_apply] · dsimp [Presieve.FamilyOfElements.compPresheafMap] exact congr_arg _ (Subtype.ext (FunctorToTypes.map_comp_apply _ _ _ _).symm) theorem Subpresheaf.to_sheafifyLift (f : G.toPresheaf ⟶ F') (h : Presieve.IsSheaf J F') : Subpresheaf.homOfLe (G.le_sheafify J) ≫ G.sheafifyLift f h = f := by ext U s apply (h _ ((Subpresheaf.homOfLe (G.le_sheafify J)).app U s).prop).isSeparatedFor.ext intro V i hi have := elementwise_of% f.naturality -- Porting note: filled in some underscores where Lean3 could automatically fill. exact (Presieve.IsSheafFor.valid_glue (h _ ((homOfLe (_ : G ≤ sheafify J G)).app U s).2) ((G.family_of_elements_compatible _).compPresheafMap _) _ hi).trans (this _ _) theorem Subpresheaf.to_sheafify_lift_unique (h : Presieve.IsSheaf J F') (l₁ l₂ : (G.sheafify J).toPresheaf ⟶ F') (e : Subpresheaf.homOfLe (G.le_sheafify J) ≫ l₁ = Subpresheaf.homOfLe (G.le_sheafify J) ≫ l₂) : l₁ = l₂ := by ext U ⟨s, hs⟩ apply (h _ hs).isSeparatedFor.ext rintro V i hi dsimp at hi erw [← FunctorToTypes.naturality, ← FunctorToTypes.naturality] exact (congr_fun (congr_app e <| op V) ⟨_, hi⟩ : _) theorem Subpresheaf.sheafify_le (h : G ≤ G') (hF : Presieve.IsSheaf J F) (hG' : Presieve.IsSheaf J G'.toPresheaf) : G.sheafify J ≤ G' := by intro U x hx convert ((G.sheafifyLift (Subpresheaf.homOfLe h) hG').app U ⟨x, hx⟩).2 apply (hF _ hx).isSeparatedFor.ext intro V i hi have := congr_arg (fun f : G.toPresheaf ⟶ G'.toPresheaf => (NatTrans.app f (op V) ⟨_, hi⟩).1) (G.to_sheafifyLift (Subpresheaf.homOfLe h) hG') convert this.symm erw [← Subpresheaf.nat_trans_naturality] rfl section Image /-- The image presheaf of a morphism, whose components are the set-theoretic images. -/ @[simps] def imagePresheaf (f : F' ⟶ F) : Subpresheaf F where obj U := Set.range (f.app U) map := by rintro U V i _ ⟨x, rfl⟩ have := elementwise_of% f.naturality exact ⟨_, this i x⟩ @[simp] theorem top_subpresheaf_obj (U) : (⊤ : Subpresheaf F).obj U = ⊤ := rfl @[simp] theorem imagePresheaf_id : imagePresheaf (𝟙 F) = ⊤ := by ext simp /-- A morphism factors through the image presheaf. -/ @[simps!] def toImagePresheaf (f : F' ⟶ F) : F' ⟶ (imagePresheaf f).toPresheaf := (imagePresheaf f).lift f fun _ _ => Set.mem_range_self _ variable (J) /-- A morphism factors through the sheafification of the image presheaf. -/ @[simps!] def toImagePresheafSheafify (f : F' ⟶ F) : F' ⟶ ((imagePresheaf f).sheafify J).toPresheaf := toImagePresheaf f ≫ Subpresheaf.homOfLe ((imagePresheaf f).le_sheafify J) variable {J} @[reassoc (attr := simp)] theorem toImagePresheaf_ι (f : F' ⟶ F) : toImagePresheaf f ≫ (imagePresheaf f).ι = f := (imagePresheaf f).lift_ι _ _ theorem imagePresheaf_comp_le (f₁ : F ⟶ F') (f₂ : F' ⟶ F'') : imagePresheaf (f₁ ≫ f₂) ≤ imagePresheaf f₂ := fun U _ hx => ⟨f₁.app U hx.choose, hx.choose_spec⟩ instance isIso_toImagePresheaf {F F' : Cᵒᵖ ⥤ TypeMax.{v, w}} (f : F ⟶ F') [hf : Mono f] : IsIso (toImagePresheaf f) := by have : ∀ (X : Cᵒᵖ), IsIso ((toImagePresheaf f).app X) := by intro X rw [isIso_iff_bijective] constructor · intro x y e have := (NatTrans.mono_iff_mono_app _ _).mp hf X rw [mono_iff_injective] at this exact this (congr_arg Subtype.val e : _) · rintro ⟨_, ⟨x, rfl⟩⟩ exact ⟨x, rfl⟩ apply NatIso.isIso_of_isIso_app /-- The image sheaf of a morphism between sheaves, defined to be the sheafification of `image_presheaf`. -/ @[simps] def imageSheaf {F F' : Sheaf J (Type w)} (f : F ⟶ F') : Sheaf J (Type w) := ⟨((imagePresheaf f.1).sheafify J).toPresheaf, by rw [isSheaf_iff_isSheaf_of_type] apply Subpresheaf.sheafify_isSheaf rw [← isSheaf_iff_isSheaf_of_type] exact F'.2⟩ /-- A morphism factors through the image sheaf. -/ @[simps] def toImageSheaf {F F' : Sheaf J (Type w)} (f : F ⟶ F') : F ⟶ imageSheaf f := ⟨toImagePresheafSheafify J f.1⟩ /-- The inclusion of the image sheaf to the target. -/ @[simps] def imageSheafι {F F' : Sheaf J (Type w)} (f : F ⟶ F') : imageSheaf f ⟶ F' := ⟨Subpresheaf.ι _⟩ @[reassoc (attr := simp)] theorem toImageSheaf_ι {F F' : Sheaf J (Type w)} (f : F ⟶ F') : toImageSheaf f ≫ imageSheafι f = f := by ext1 simp [toImagePresheafSheafify] instance {F F' : Sheaf J (Type w)} (f : F ⟶ F') : Mono (imageSheafι f) := (sheafToPresheaf J _).mono_of_mono_map (by dsimp infer_instance) instance {F F' : Sheaf J (Type w)} (f : F ⟶ F') : Epi (toImageSheaf f) := by refine ⟨@fun G' g₁ g₂ e => ?_⟩ ext U ⟨s, hx⟩ apply ((isSheaf_iff_isSheaf_of_type J _).mp G'.2 _ hx).isSeparatedFor.ext rintro V i ⟨y, e'⟩ change (g₁.val.app _ ≫ G'.val.map _) _ = (g₂.val.app _ ≫ G'.val.map _) _ rw [← NatTrans.naturality, ← NatTrans.naturality] have E : (toImageSheaf f).val.app (op V) y = (imageSheaf f).val.map i.op ⟨s, hx⟩ := Subtype.ext e' have := congr_arg (fun f : F ⟶ G' => (Sheaf.Hom.val f).app _ y) e dsimp at this ⊢ convert this <;> exact E.symm /-- The mono factorization given by `image_sheaf` for a morphism. -/ def imageMonoFactorization {F F' : Sheaf J (Type w)} (f : F ⟶ F') : Limits.MonoFactorisation f where I := imageSheaf f m := imageSheafι f e := toImageSheaf f /-- The mono factorization given by `image_sheaf` for a morphism is an image. -/ noncomputable def imageFactorization {F F' : Sheaf J TypeMax.{v, u}} (f : F ⟶ F') : Limits.ImageFactorisation f where F := imageMonoFactorization f isImage := { lift := fun I => by -- Porting note: need to specify the target category (TypeMax.{v, u}) for this to work. haveI M := (Sheaf.Hom.mono_iff_presheaf_mono J TypeMax.{v, u} _).mp I.m_mono haveI := isIso_toImagePresheaf I.m.1 refine ⟨Subpresheaf.homOfLe ?_ ≫ inv (toImagePresheaf I.m.1)⟩ apply Subpresheaf.sheafify_le · conv_lhs => rw [← I.fac] apply imagePresheaf_comp_le · rw [← isSheaf_iff_isSheaf_of_type] exact F'.2 · apply Presieve.isSheaf_iso J (asIso <| toImagePresheaf I.m.1) rw [← isSheaf_iff_isSheaf_of_type] exact I.I.2 lift_fac := fun I => by ext1 dsimp [imageMonoFactorization] generalize_proofs h rw [← Subpresheaf.homOfLe_ι h, Category.assoc] congr 1 rw [IsIso.inv_comp_eq, toImagePresheaf_ι] } instance : Limits.HasImages (Sheaf J (Type max v u)) := ⟨@fun _ _ f => ⟨⟨imageFactorization f⟩⟩⟩ end Image end CategoryTheory.GrothendieckTopology
CategoryTheory\Sites\Types.lean
/- Copyright (c) 2020 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau -/ import Mathlib.CategoryTheory.Sites.Canonical /-! # Grothendieck Topology and Sheaves on the Category of Types In this file we define a Grothendieck topology on the category of types, and construct the canonical functor that sends a type to a sheaf over the category of types, and make this an equivalence of categories. Then we prove that the topology defined is the canonical topology. -/ universe u namespace CategoryTheory --open scoped CategoryTheory.Type -- Porting note: unknown namespace /-- A Grothendieck topology associated to the category of all types. A sieve is a covering iff it is jointly surjective. -/ def typesGrothendieckTopology : GrothendieckTopology (Type u) where sieves α S := ∀ x : α, S fun _ : PUnit => x top_mem' _ _ := trivial pullback_stable' _ _ _ f hs x := hs (f x) transitive' _ _ hs _ hr x := hr (hs x) PUnit.unit /-- The discrete sieve on a type, which only includes arrows whose image is a subsingleton. -/ @[simps] def discreteSieve (α : Type u) : Sieve α where arrows _ f := ∃ x, ∀ y, f y = x downward_closed := fun ⟨x, hx⟩ g => ⟨x, fun y => hx <| g y⟩ theorem discreteSieve_mem (α : Type u) : discreteSieve α ∈ typesGrothendieckTopology α := fun x => ⟨x, fun _ => rfl⟩ /-- The discrete presieve on a type, which only includes arrows whose domain is a singleton. -/ def discretePresieve (α : Type u) : Presieve α := fun β _ => ∃ x : β, ∀ y : β, y = x theorem generate_discretePresieve_mem (α : Type u) : Sieve.generate (discretePresieve α) ∈ typesGrothendieckTopology α := fun x => ⟨PUnit, id, fun _ => x, ⟨PUnit.unit, fun _ => Subsingleton.elim _ _⟩, rfl⟩ open Presieve theorem isSheaf_yoneda' {α : Type u} : IsSheaf typesGrothendieckTopology (yoneda.obj α) := fun β S hs x hx => ⟨fun y => x _ (hs y) PUnit.unit, fun γ f h => funext fun z => by convert congr_fun (hx (𝟙 _) (fun _ => z) (hs <| f z) h rfl) PUnit.unit using 1, fun f hf => funext fun y => by convert congr_fun (hf _ (hs y)) PUnit.unit⟩ /-- The yoneda functor that sends a type to a sheaf over the category of types. -/ @[simps] def yoneda' : Type u ⥤ SheafOfTypes typesGrothendieckTopology where obj α := ⟨yoneda.obj α, isSheaf_yoneda'⟩ map f := ⟨yoneda.map f⟩ @[simp] theorem yoneda'_comp : yoneda'.{u} ⋙ sheafOfTypesToPresheaf _ = yoneda := rfl open Opposite /-- Given a presheaf `P` on the category of types, construct a map `P(α) → (α → P(*))` for all type `α`. -/ def eval (P : Type uᵒᵖ ⥤ Type u) (α : Type u) (s : P.obj (op α)) (x : α) : P.obj (op PUnit) := P.map (↾fun _ => x).op s /-- Given a sheaf `S` on the category of types, construct a map `(α → S(*)) → S(α)` that is inverse to `eval`. -/ noncomputable def typesGlue (S : Type uᵒᵖ ⥤ Type u) (hs : IsSheaf typesGrothendieckTopology S) (α : Type u) (f : α → S.obj (op PUnit)) : S.obj (op α) := (hs.isSheafFor _ _ (generate_discretePresieve_mem α)).amalgamate (fun β g hg => S.map (↾fun _ => PUnit.unit).op <| f <| g <| Classical.choose hg) fun β γ δ g₁ g₂ f₁ f₂ hf₁ hf₂ h => (hs.isSheafFor _ _ (generate_discretePresieve_mem δ)).isSeparatedFor.ext fun ε g ⟨x, _⟩ => by have : f₁ (Classical.choose hf₁) = f₂ (Classical.choose hf₂) := Classical.choose_spec hf₁ (g₁ <| g x) ▸ Classical.choose_spec hf₂ (g₂ <| g x) ▸ congr_fun h _ simp_rw [← FunctorToTypes.map_comp_apply, this, ← op_comp] rfl theorem eval_typesGlue {S hs α} (f) : eval.{u} S α (typesGlue S hs α f) = f := by funext x apply (IsSheafFor.valid_glue _ _ _ <| ⟨PUnit.unit, fun _ => Subsingleton.elim _ _⟩).trans convert FunctorToTypes.map_id_apply S _ theorem typesGlue_eval {S hs α} (s) : typesGlue.{u} S hs α (eval S α s) = s := by apply (hs.isSheafFor _ _ (generate_discretePresieve_mem α)).isSeparatedFor.ext intro β f hf apply (IsSheafFor.valid_glue _ _ _ hf).trans apply (FunctorToTypes.map_comp_apply _ _ _ _).symm.trans rw [← op_comp] --congr 2 -- Porting note: This tactic didn't work. Find an alternative. suffices ((↾fun _ ↦ PUnit.unit) ≫ ↾fun _ ↦ f (Classical.choose hf)) = f by rw [this] funext x exact congr_arg f (Classical.choose_spec hf x).symm /-- Given a sheaf `S`, construct an equivalence `S(α) ≃ (α → S(*))`. -/ @[simps] noncomputable def evalEquiv (S : Type uᵒᵖ ⥤ Type u) (hs : IsSheaf typesGrothendieckTopology S) (α : Type u) : S.obj (op α) ≃ (α → S.obj (op PUnit)) where toFun := eval S α invFun := typesGlue S hs α left_inv := typesGlue_eval right_inv := eval_typesGlue theorem eval_map (S : Type uᵒᵖ ⥤ Type u) (α β) (f : β ⟶ α) (s x) : eval S β (S.map f.op s) x = eval S α s (f x) := by simp_rw [eval, ← FunctorToTypes.map_comp_apply, ← op_comp]; rfl /-- Given a sheaf `S`, construct an isomorphism `S ≅ [-, S(*)]`. -/ @[simps!] noncomputable def equivYoneda (S : Type uᵒᵖ ⥤ Type u) (hs : IsSheaf typesGrothendieckTopology S) : S ≅ yoneda.obj (S.obj (op PUnit)) := NatIso.ofComponents (fun α => Equiv.toIso <| evalEquiv S hs <| unop α) fun {α β} f => funext fun _ => funext fun _ => eval_map S (unop α) (unop β) f.unop _ _ /-- Given a sheaf `S`, construct an isomorphism `S ≅ [-, S(*)]`. -/ @[simps] noncomputable def equivYoneda' (S : SheafOfTypes typesGrothendieckTopology) : S ≅ yoneda'.obj (S.1.obj (op PUnit)) where hom := ⟨(equivYoneda S.1 S.2).hom⟩ inv := ⟨(equivYoneda S.1 S.2).inv⟩ hom_inv_id := by ext1; apply (equivYoneda S.1 S.2).hom_inv_id inv_hom_id := by ext1; apply (equivYoneda S.1 S.2).inv_hom_id theorem eval_app (S₁ S₂ : SheafOfTypes.{u} typesGrothendieckTopology) (f : S₁ ⟶ S₂) (α : Type u) (s : S₁.1.obj (op α)) (x : α) : eval S₂.1 α (f.val.app (op α) s) x = f.val.app (op PUnit) (eval S₁.1 α s x) := (congr_fun (f.val.naturality (↾fun _ : PUnit => x).op) s).symm /-- `yoneda'` induces an equivalence of category between `Type u` and `SheafOfTypes typesGrothendieckTopology`. -/ @[simps!] noncomputable def typeEquiv : Type u ≌ SheafOfTypes typesGrothendieckTopology := Equivalence.mk yoneda' (sheafOfTypesToPresheaf _ ⋙ (evaluation _ _).obj (op PUnit)) (NatIso.ofComponents (fun _α => -- α ≅ PUnit ⟶ α { hom := fun x _ => x inv := fun f => f PUnit.unit hom_inv_id := funext fun _ => rfl inv_hom_id := funext fun _ => funext fun y => PUnit.casesOn y rfl }) fun _ => rfl) (Iso.symm <| NatIso.ofComponents (fun S => equivYoneda' S) fun {S₁ S₂} f => SheafOfTypes.Hom.ext <| NatTrans.ext <| funext fun α => funext fun s => funext fun x => eval_app S₁ S₂ f (unop α) s x) theorem subcanonical_typesGrothendieckTopology : Sheaf.Subcanonical typesGrothendieckTopology.{u} := Sheaf.Subcanonical.of_yoneda_isSheaf _ fun _ => isSheaf_yoneda' theorem typesGrothendieckTopology_eq_canonical : typesGrothendieckTopology.{u} = Sheaf.canonicalTopology (Type u) := by refine le_antisymm subcanonical_typesGrothendieckTopology (sInf_le ?_) refine ⟨yoneda.obj (ULift Bool), ⟨_, rfl⟩, GrothendieckTopology.ext ?_⟩ funext α ext S refine ⟨fun hs x => ?_, fun hs β f => isSheaf_yoneda' _ fun y => hs _⟩ by_contra hsx have : (fun _ => ULift.up true) = fun _ => ULift.up false := (hs PUnit fun _ => x).isSeparatedFor.ext fun β f hf => funext fun y => hsx.elim <| S.2 hf fun _ => y simp [Function.funext_iff] at this end CategoryTheory
CategoryTheory\Sites\Whiskering.lean
/- Copyright (c) 2021 Adam Topaz. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Adam Topaz -/ import Mathlib.CategoryTheory.Sites.Sheaf /-! In this file we construct the functor `Sheaf J A ⥤ Sheaf J B` between sheaf categories obtained by composition with a functor `F : A ⥤ B`. In order for the sheaf condition to be preserved, `F` must preserve the correct limits. The lemma `Presheaf.IsSheaf.comp` says that composition with such an `F` indeed preserves the sheaf condition. The functor between sheaf categories is called `sheafCompose J F`. Given a natural transformation `η : F ⟶ G`, we obtain a natural transformation `sheafCompose J F ⟶ sheafCompose J G`, which we call `sheafCompose_map J η`. -/ namespace CategoryTheory open CategoryTheory.Limits universe v₁ v₂ v₃ u₁ u₂ u₃ variable {C : Type u₁} [Category.{v₁} C] variable {A : Type u₂} [Category.{v₂} A] variable {B : Type u₃} [Category.{v₃} B] variable (J : GrothendieckTopology C) variable {U : C} (R : Presieve U) variable (F G H : A ⥤ B) (η : F ⟶ G) (γ : G ⟶ H) /-- Describes the property of a functor to "preserve sheaves". -/ class GrothendieckTopology.HasSheafCompose : Prop where /-- For every sheaf `P`, `P ⋙ F` is a sheaf. -/ isSheaf (P : Cᵒᵖ ⥤ A) (hP : Presheaf.IsSheaf J P) : Presheaf.IsSheaf J (P ⋙ F) variable [J.HasSheafCompose F] [J.HasSheafCompose G] [J.HasSheafCompose H] /-- Composing a functor which `HasSheafCompose`, yields a functor between sheaf categories. -/ @[simps] def sheafCompose : Sheaf J A ⥤ Sheaf J B where obj G := ⟨G.val ⋙ F, GrothendieckTopology.HasSheafCompose.isSheaf G.val G.2⟩ map η := ⟨whiskerRight η.val _⟩ map_id _ := Sheaf.Hom.ext <| whiskerRight_id _ map_comp _ _ := Sheaf.Hom.ext <| whiskerRight_comp _ _ _ instance [F.Faithful] : (sheafCompose J F ⋙ sheafToPresheaf _ _).Faithful := show (sheafToPresheaf _ _ ⋙ (whiskeringRight Cᵒᵖ A B).obj F).Faithful from inferInstance instance [F.Faithful] [F.Full] : (sheafCompose J F ⋙ sheafToPresheaf _ _).Full := show (sheafToPresheaf _ _ ⋙ (whiskeringRight Cᵒᵖ A B).obj F).Full from inferInstance instance [F.Faithful] : (sheafCompose J F).Faithful := Functor.Faithful.of_comp (sheafCompose J F) (sheafToPresheaf _ _) instance [F.Full] [F.Faithful] : (sheafCompose J F).Full := Functor.Full.of_comp_faithful (sheafCompose J F) (sheafToPresheaf _ _) instance [F.ReflectsIsomorphisms] : (sheafCompose J F).ReflectsIsomorphisms where reflects {G₁ G₂} f _ := by rw [← isIso_iff_of_reflects_iso _ (sheafToPresheaf _ _), ← isIso_iff_of_reflects_iso _ ((whiskeringRight Cᵒᵖ A B).obj F)] change IsIso ((sheafToPresheaf _ _).map ((sheafCompose J F).map f)) infer_instance variable {F G} /-- If `η : F ⟶ G` is a natural transformation then we obtain a morphism of functors `sheafCompose J F ⟶ sheafCompose J G` by whiskering with `η` on the level of presheaves. -/ def sheafCompose_map : sheafCompose J F ⟶ sheafCompose J G where app := fun X => .mk <| whiskerLeft _ η @[simp] lemma sheafCompose_id : sheafCompose_map (F := F) J (𝟙 _) = 𝟙 _ := rfl @[simp] lemma sheafCompose_comp : sheafCompose_map J (η ≫ γ) = sheafCompose_map J η ≫ sheafCompose_map J γ := rfl namespace GrothendieckTopology.Cover variable (F G) {J} variable (P : Cᵒᵖ ⥤ A) {X : C} (S : J.Cover X) /-- The multicospan associated to a cover `S : J.Cover X` and a presheaf of the form `P ⋙ F` is isomorphic to the composition of the multicospan associated to `S` and `P`, composed with `F`. -/ @[simps!] def multicospanComp : (S.index (P ⋙ F)).multicospan ≅ (S.index P).multicospan ⋙ F := NatIso.ofComponents (fun t => match t with | WalkingMulticospan.left a => Iso.refl _ | WalkingMulticospan.right b => Iso.refl _) (by rintro (a | b) (a | b) (f | f | f) all_goals aesop_cat) /-- Mapping the multifork associated to a cover `S : J.Cover X` and a presheaf `P` with respect to a functor `F` is isomorphic (upto a natural isomorphism of the underlying functors) to the multifork associated to `S` and `P ⋙ F`. -/ def mapMultifork : F.mapCone (S.multifork P) ≅ (Limits.Cones.postcompose (S.multicospanComp F P).hom).obj (S.multifork (P ⋙ F)) := Cones.ext (Iso.refl _) end GrothendieckTopology.Cover /-- Composing a sheaf with a functor preserving the limit of `(S.index P).multicospan` yields a functor between sheaf categories. -/ instance hasSheafCompose_of_preservesMulticospan (F : A ⥤ B) [∀ (X : C) (S : J.Cover X) (P : Cᵒᵖ ⥤ A), PreservesLimit (S.index P).multicospan F] : J.HasSheafCompose F where isSheaf P hP := by rw [Presheaf.isSheaf_iff_multifork] at hP ⊢ intro X S obtain ⟨h⟩ := hP X S replace h := isLimitOfPreserves F h replace h := Limits.IsLimit.ofIsoLimit h (S.mapMultifork F P) exact ⟨Limits.IsLimit.postcomposeHomEquiv (S.multicospanComp F P) _ h⟩ /-- Composing a sheaf with a functor preserving limits of the same size as the hom sets in `C` yields a functor between sheaf categories. Note: the size of the limit that `F` is required to preserve in `hasSheafCompose_of_preservesMulticospan` is in general larger than this. -/ instance hasSheafCompose_of_preservesLimitsOfSize [PreservesLimitsOfSize.{v₁, max u₁ v₁} F] : J.HasSheafCompose F where isSheaf _ hP := Presheaf.isSheaf_comp_of_isSheaf J _ F hP variable {J} lemma Sheaf.isSeparated [ConcreteCategory A] [J.HasSheafCompose (forget A)] (F : Sheaf J A) : Presheaf.IsSeparated J F.val := by rintro X S hS x y h exact (Presieve.isSeparated_of_isSheaf _ _ ((isSheaf_iff_isSheaf_of_type _ _).1 ((sheafCompose J (forget A)).obj F).2) S hS).ext (fun _ _ hf => h _ _ hf) lemma Presheaf.IsSheaf.isSeparated {F : Cᵒᵖ ⥤ A} [ConcreteCategory A] [J.HasSheafCompose (forget A)] (hF : Presheaf.IsSheaf J F) : Presheaf.IsSeparated J F := Sheaf.isSeparated ⟨F, hF⟩ end CategoryTheory
CategoryTheory\Sites\Coherent\Basic.lean
/- Copyright (c) 2023 Adam Topaz. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Adam Topaz, Dagur Asgeirsson, Filippo A. E. Nuccio, Riccardo Brasca -/ import Mathlib.CategoryTheory.Extensive import Mathlib.CategoryTheory.Sites.Coverage import Mathlib.CategoryTheory.EffectiveEpi.Basic /-! # The Coherent, Regular and Extensive Grothendieck Topologies This file defines three related Grothendieck topologies on a category `C`. The first one is called the *coherent* topology. For that to exist, the category `C` must satisfy a condition called `Precoherent C`, which is essentially the minimal requirement for the coherent coverage to exist. It means that finite effective epimorphic families can be "pulled back". Given such a category, the coherent coverage is `coherentCoverage C` and the corresponding Grothendieck topology is `coherentTopology C`. The covering sieves of this coverage are generated by presieves consisting of finite effective epimorphic families. The second one is called the *regular* topology and for that to exist, the category `C` must satisfy a condition called `Preregular C`. This means that effective epimorphisms can be "pulled back". The regular coverage is `regularCoverage C` and the corresponding Grothendieck topology is `regularTopology C`. The covering sieves of this coverage are generated by presieves consisting of a single effective epimorphism. The third one is called the *extensive* coverage and for that to exist, the category `C` must satisfy a condition called `FinitaryPreExtensive C`. This means `C` has finite coproducts and that those are preserved by pullbacks. This condition is weaker than `FinitaryExtensive`, where in addition finite coproducts are disjoint. The extensive coverage is `extensiveCoverage C` and the corresponding Grothendieck topology is `extensiveTopology C`. The covering sieves of this coverage are generated by presieves consisting finitely many arrows that together induce an isomorphism from the coproduct to the target. ## References: - [Elephant]: *Sketches of an Elephant*, P. T. Johnstone: C2.1, Example 2.1.12. - [nLab, *Coherent Coverage*](https://ncatlab.org/nlab/show/coherent+coverage) -/ namespace CategoryTheory open Limits variable (C : Type*) [Category C] /-- The condition `Precoherent C` is essentially the minimal condition required to define the coherent coverage on `C`. -/ class Precoherent : Prop where /-- Given an effective epi family `π₁` over `B₁` and a morphism `f : B₂ ⟶ B₁`, there exists an effective epi family `π₂` over `B₂`, such that `π₂` factors through `π₁`. -/ pullback {B₁ B₂ : C} (f : B₂ ⟶ B₁) : ∀ (α : Type) [Finite α] (X₁ : α → C) (π₁ : (a : α) → (X₁ a ⟶ B₁)), EffectiveEpiFamily X₁ π₁ → ∃ (β : Type) (_ : Finite β) (X₂ : β → C) (π₂ : (b : β) → (X₂ b ⟶ B₂)), EffectiveEpiFamily X₂ π₂ ∧ ∃ (i : β → α) (ι : (b : β) → (X₂ b ⟶ X₁ (i b))), ∀ (b : β), ι b ≫ π₁ _ = π₂ _ ≫ f /-- The coherent coverage on a precoherent category `C`. -/ def coherentCoverage [Precoherent C] : Coverage C where covering B := { S | ∃ (α : Type) (_ : Finite α) (X : α → C) (π : (a : α) → (X a ⟶ B)), S = Presieve.ofArrows X π ∧ EffectiveEpiFamily X π } pullback := by rintro B₁ B₂ f S ⟨α, _, X₁, π₁, rfl, hS⟩ obtain ⟨β,_,X₂,π₂,h,i,ι,hh⟩ := Precoherent.pullback f α X₁ π₁ hS refine ⟨Presieve.ofArrows X₂ π₂, ⟨β, inferInstance, X₂, π₂, rfl, h⟩, ?_⟩ rintro _ _ ⟨b⟩ exact ⟨(X₁ (i b)), ι _, π₁ _, ⟨_⟩, hh _⟩ /-- The coherent Grothendieck topology on a precoherent category `C`. -/ def coherentTopology [Precoherent C] : GrothendieckTopology C := Coverage.toGrothendieck _ <| coherentCoverage C /-- The condition `Preregular C` is property that effective epis can be "pulled back" along any morphism. This is satisfied e.g. by categories that have pullbacks that preserve effective epimorphisms (like `Profinite` and `CompHaus`), and categories where every object is projective (like `Stonean`). -/ class Preregular : Prop where /-- For `X`, `Y`, `Z`, `f`, `g` like in the diagram, where `g` is an effective epi, there exists an object `W`, an effective epi `h : W ⟶ X` and a morphism `i : W ⟶ Z` making the diagram commute. ``` W --i-→ Z | | h g ↓ ↓ X --f-→ Y ``` -/ exists_fac : ∀ {X Y Z : C} (f : X ⟶ Y) (g : Z ⟶ Y) [EffectiveEpi g], (∃ (W : C) (h : W ⟶ X) (_ : EffectiveEpi h) (i : W ⟶ Z), i ≫ g = h ≫ f) /-- The regular coverage on a regular category `C`. -/ def regularCoverage [Preregular C] : Coverage C where covering B := { S | ∃ (X : C) (f : X ⟶ B), S = Presieve.ofArrows (fun (_ : Unit) ↦ X) (fun (_ : Unit) ↦ f) ∧ EffectiveEpi f } pullback := by intro X Y f S ⟨Z, π, hπ, h_epi⟩ have := Preregular.exists_fac f π obtain ⟨W, h, _, i, this⟩ := this refine ⟨Presieve.singleton h, ⟨?_, ?_⟩⟩ · exact ⟨W, h, by {rw [Presieve.ofArrows_pUnit h]}, inferInstance⟩ · intro W g hg cases hg refine ⟨Z, i, π, ⟨?_, this⟩⟩ cases hπ rw [Presieve.ofArrows_pUnit] exact Presieve.singleton.mk /-- The regular Grothendieck topology on a preregular category `C`. -/ def regularTopology [Preregular C] : GrothendieckTopology C := Coverage.toGrothendieck _ <| regularCoverage C /-- The extensive coverage on an extensive category `C` TODO: use general colimit API instead of `IsIso (Sigma.desc π)` -/ def extensiveCoverage [FinitaryPreExtensive C] : Coverage C where covering B := { S | ∃ (α : Type) (_ : Finite α) (X : α → C) (π : (a : α) → (X a ⟶ B)), S = Presieve.ofArrows X π ∧ IsIso (Sigma.desc π) } pullback := by intro X Y f S ⟨α, hα, Z, π, hS, h_iso⟩ let Z' : α → C := fun a ↦ pullback f (π a) let π' : (a : α) → Z' a ⟶ Y := fun a ↦ pullback.fst _ _ refine ⟨@Presieve.ofArrows C _ _ α Z' π', ⟨?_, ?_⟩⟩ · constructor exact ⟨hα, Z', π', ⟨by simp only, FinitaryPreExtensive.sigma_desc_iso (fun x => π x) f h_iso⟩⟩ · intro W g hg rcases hg with ⟨a⟩ refine ⟨Z a, pullback.snd _ _, π a, ?_, by rw [CategoryTheory.Limits.pullback.condition]⟩ rw [hS] exact Presieve.ofArrows.mk a /-- The extensive Grothendieck topology on a finitary pre-extensive category `C`. -/ def extensiveTopology [FinitaryPreExtensive C] : GrothendieckTopology C := Coverage.toGrothendieck _ <| extensiveCoverage C end CategoryTheory
CategoryTheory\Sites\Coherent\CoherentSheaves.lean
/- Copyright (c) 2023 Adam Topaz. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Adam Topaz -/ import Mathlib.CategoryTheory.Sites.Canonical import Mathlib.CategoryTheory.Sites.Coherent.Basic import Mathlib.CategoryTheory.Sites.EffectiveEpimorphic /-! # Sheaves for the coherent topology This file characterises sheaves for the coherent topology ## Main result * `isSheaf_coherent`: a presheaf of types for the is a sheaf for the coherent topology if and only if it satisfies the sheaf condition with respect to every presieve consiting of a finite effective epimorphic family. -/ namespace CategoryTheory variable {C : Type*} [Category C] [Precoherent C] universe w in lemma isSheaf_coherent (P : Cᵒᵖ ⥤ Type w) : Presieve.IsSheaf (coherentTopology C) P ↔ (∀ (B : C) (α : Type) [Finite α] (X : α → C) (π : (a : α) → (X a ⟶ B)), EffectiveEpiFamily X π → (Presieve.ofArrows X π).IsSheafFor P) := by constructor · intro hP B α _ X π h simp only [coherentTopology, Presieve.isSheaf_coverage] at hP apply hP exact ⟨α, inferInstance, X, π, rfl, h⟩ · intro h simp only [coherentTopology, Presieve.isSheaf_coverage] rintro B S ⟨α, _, X, π, rfl, hS⟩ exact h _ _ _ _ hS namespace coherentTopology /-- Every Yoneda-presheaf is a sheaf for the coherent topology. -/ theorem isSheaf_yoneda_obj (W : C) : Presieve.IsSheaf (coherentTopology C) (yoneda.obj W) := by rw [isSheaf_coherent] intro X α _ Y π H have h_colim := isColimitOfEffectiveEpiFamilyStruct Y π H.effectiveEpiFamily.some rw [← Sieve.generateFamily_eq] at h_colim intro x hx let x_ext := Presieve.FamilyOfElements.sieveExtend x have hx_ext := Presieve.FamilyOfElements.Compatible.sieveExtend hx let S := Sieve.generate (Presieve.ofArrows Y π) obtain ⟨t, t_amalg, t_uniq⟩ : ∃! t, x_ext.IsAmalgamation t := (Sieve.forallYonedaIsSheaf_iff_colimit S).mpr ⟨h_colim⟩ W x_ext hx_ext refine ⟨t, ?_, ?_⟩ · convert Presieve.isAmalgamation_restrict (Sieve.le_generate (Presieve.ofArrows Y π)) _ _ t_amalg exact (Presieve.restrict_extend hx).symm · exact fun y hy ↦ t_uniq y <| Presieve.isAmalgamation_sieveExtend x y hy variable (C) in /-- The coherent topology on a precoherent category is subcanonical. -/ theorem subcanonical : Sheaf.Subcanonical (coherentTopology C) := Sheaf.Subcanonical.of_yoneda_isSheaf _ isSheaf_yoneda_obj end coherentTopology end CategoryTheory
CategoryTheory\Sites\Coherent\CoherentTopology.lean
/- Copyright (c) 2023 Adam Topaz. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Adam Topaz, Nick Kuhn -/ import Mathlib.CategoryTheory.Sites.Coherent.CoherentSheaves /-! # Description of the covering sieves of the coherent topology This file characterises the covering sieves of the coherent topology. ## Main result * `coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily`: a sieve is a covering sieve for the coherent topology if and only if it contains a finite effective epimorphic family. -/ namespace CategoryTheory variable {C : Type*} [Category C] [Precoherent C] {X : C} /-- For a precoherent category, any sieve that contains an `EffectiveEpiFamily` is a sieve of the coherent topology. Note: This is one direction of `mem_sieves_iff_hasEffectiveEpiFamily`, but is needed for the proof. -/ theorem coherentTopology.mem_sieves_of_hasEffectiveEpiFamily (S : Sieve X) : (∃ (α : Type) (_ : Finite α) (Y : α → C) (π : (a : α) → (Y a ⟶ X)), EffectiveEpiFamily Y π ∧ (∀ a : α, (S.arrows) (π a)) ) → (S ∈ GrothendieckTopology.sieves (coherentTopology C) X) := by intro ⟨α, _, Y, π, hπ⟩ apply (coherentCoverage C).mem_toGrothendieck_sieves_of_superset (R := Presieve.ofArrows Y π) · exact fun _ _ h ↦ by cases h; exact hπ.2 _ · exact ⟨_, inferInstance, Y, π, rfl, hπ.1⟩ /-- Effective epi families in a precoherent category are transitive, in the sense that an `EffectiveEpiFamily` and an `EffectiveEpiFamily` over each member, the composition is an `EffectiveEpiFamily`. Note: The finiteness condition is an artifact of the proof and is probably unnecessary. -/ theorem EffectiveEpiFamily.transitive_of_finite {α : Type} [Finite α] {Y : α → C} (π : (a : α) → (Y a ⟶ X)) (h : EffectiveEpiFamily Y π) {β : α → Type} [∀ (a : α), Finite (β a)] {Y_n : (a : α) → β a → C} (π_n : (a : α) → (b : β a) → (Y_n a b ⟶ Y a)) (H : ∀ a, EffectiveEpiFamily (Y_n a) (π_n a)) : EffectiveEpiFamily (fun (c : Σ a, β a) => Y_n c.fst c.snd) (fun c => π_n c.fst c.snd ≫ π c.fst) := by rw [← Sieve.effectiveEpimorphic_family] suffices h₂ : (Sieve.generate (Presieve.ofArrows (fun (⟨a, b⟩ : Σ _, β _) => Y_n a b) (fun ⟨a,b⟩ => π_n a b ≫ π a))) ∈ GrothendieckTopology.sieves (coherentTopology C) X by change Nonempty _ rw [← Sieve.forallYonedaIsSheaf_iff_colimit] exact fun W => coherentTopology.isSheaf_yoneda_obj W _ h₂ -- Show that a covering sieve is a colimit, which implies the original set of arrows is regular -- epimorphic. We use the transitivity property of saturation apply Coverage.Saturate.transitive X (Sieve.generate (Presieve.ofArrows Y π)) · apply Coverage.Saturate.of use α, inferInstance, Y, π · intro V f ⟨Y₁, h, g, ⟨hY, hf⟩⟩ rw [← hf, Sieve.pullback_comp] apply (coherentTopology C).pullback_stable' apply coherentTopology.mem_sieves_of_hasEffectiveEpiFamily -- Need to show that the pullback of the family `π_n` to a given `Y i` is effective epimorphic obtain ⟨i⟩ := hY exact ⟨β i, inferInstance, Y_n i, π_n i, H i, fun b ↦ ⟨Y_n i b, (𝟙 _), π_n i b ≫ π i, ⟨(⟨i, b⟩ : Σ (i : α), β i)⟩, by simp⟩⟩ instance precoherentEffectiveEpiFamilyCompEffectiveEpis {α : Type} [Finite α] {Y Z : α → C} (π : (a : α) → (Y a ⟶ X)) [EffectiveEpiFamily Y π] (f : (a : α) → Z a ⟶ Y a) [h : ∀ a, EffectiveEpi (f a)] : EffectiveEpiFamily _ fun a ↦ f a ≫ π a := by simp_rw [effectiveEpi_iff_effectiveEpiFamily] at h exact EffectiveEpiFamily.reindex (e := Equiv.sigmaPUnit α) _ _ (EffectiveEpiFamily.transitive_of_finite (β := fun _ ↦ Unit) _ inferInstance _ h) /-- A sieve belongs to the coherent topology if and only if it contains a finite `EffectiveEpiFamily`. -/ theorem coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily (S : Sieve X) : (S ∈ GrothendieckTopology.sieves (coherentTopology C) X) ↔ (∃ (α : Type) (_ : Finite α) (Y : α → C) (π : (a : α) → (Y a ⟶ X)), EffectiveEpiFamily Y π ∧ (∀ a : α, (S.arrows) (π a)) ) := by constructor · intro h induction' h with Y T hS Y Y R S _ _ a b · obtain ⟨a, h, Y', π, h', _⟩ := hS refine ⟨a, h, Y', π, inferInstance, fun a' ↦ ?_⟩ obtain ⟨rfl, _⟩ := h' exact ⟨Y' a', 𝟙 Y' a', π a', Presieve.ofArrows.mk a', by simp⟩ · exact ⟨Unit, inferInstance, fun _ => Y, fun _ => (𝟙 Y), inferInstance, by simp⟩ · obtain ⟨α, w, Y₁, π, ⟨h₁,h₂⟩⟩ := a choose β _ Y_n π_n H using fun a => b (h₂ a) exact ⟨(Σ a, β a), inferInstance, fun ⟨a,b⟩ => Y_n a b, fun ⟨a, b⟩ => (π_n a b) ≫ (π a), EffectiveEpiFamily.transitive_of_finite _ h₁ _ (fun a => (H a).1), fun c => (H c.fst).2 c.snd⟩ · exact coherentTopology.mem_sieves_of_hasEffectiveEpiFamily S end CategoryTheory
CategoryTheory\Sites\Coherent\Comparison.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.Basic import Mathlib.CategoryTheory.EffectiveEpi.Comp import Mathlib.CategoryTheory.EffectiveEpi.Extensive /-! # Connections between the regular, extensive and coherent topologies This file compares the regular, extensive and coherent topologies. ## Main results * `instance : Precoherent C` given `Preregular C` and `FinitaryPreExtensive C`. * `extensive_union_regular_generates_coherent`: the union of the regular and extensive coverages generates the coherent topology on `C` if `C` is precoherent, preextensive and preregular. -/ namespace CategoryTheory open Limits GrothendieckTopology Sieve variable (C : Type*) [Category C] instance [Precoherent C] [HasFiniteCoproducts C] : Preregular C where exists_fac {X Y Z} f g _ := by have hp := Precoherent.pullback f PUnit (fun () ↦ Z) (fun () ↦ g) simp only [exists_const] at hp rw [← effectiveEpi_iff_effectiveEpiFamily g] at hp obtain ⟨β, _, X₂, π₂, h, ι, hι⟩ := hp inferInstance refine ⟨∐ X₂, Sigma.desc π₂, inferInstance, Sigma.desc ι, ?_⟩ ext b simpa using hι b instance [FinitaryPreExtensive C] [Preregular C] : Precoherent C where pullback {B₁ B₂} f α _ X₁ π₁ h := by refine ⟨α, inferInstance, ?_⟩ obtain ⟨Y, g, _, g', hg⟩ := Preregular.exists_fac f (Sigma.desc π₁) let X₂ := fun a ↦ pullback g' (Sigma.ι X₁ a) let π₂ := fun a ↦ pullback.fst g' (Sigma.ι X₁ a) ≫ g let π' := fun a ↦ pullback.fst g' (Sigma.ι X₁ a) have _ := FinitaryPreExtensive.sigma_desc_iso (fun a ↦ Sigma.ι X₁ a) g' inferInstance refine ⟨X₂, π₂, ?_, ?_⟩ · have : (Sigma.desc π' ≫ g) = Sigma.desc π₂ := by ext; simp rw [← effectiveEpi_desc_iff_effectiveEpiFamily, ← this] infer_instance · refine ⟨id, fun b ↦ pullback.snd _ _, fun b ↦ ?_⟩ simp only [π₂, id_eq, Category.assoc, ← hg] rw [← Category.assoc, pullback.condition] simp /-- The union of the extensive and regular coverages generates the coherent topology on `C`. -/ theorem extensive_regular_generate_coherent [Preregular C] [FinitaryPreExtensive C] : ((extensiveCoverage C) ⊔ (regularCoverage C)).toGrothendieck = (coherentTopology C) := by ext B S refine ⟨fun h ↦ ?_, fun h ↦ ?_⟩ · induction h with | of Y T hT => apply Coverage.Saturate.of simp only [Coverage.sup_covering, Set.mem_union] at hT exact Or.elim hT (fun ⟨α, x, X, π, ⟨h, _⟩⟩ ↦ ⟨α, x, X, π, ⟨h, inferInstance⟩⟩) (fun ⟨Z, f, ⟨h, _⟩⟩ ↦ ⟨Unit, inferInstance, fun _ ↦ Z, fun _ ↦ f, ⟨h, inferInstance⟩⟩) | top => apply Coverage.Saturate.top | transitive Y T => apply Coverage.Saturate.transitive Y T<;> [assumption; assumption] · induction h with | of Y T hT => obtain ⟨I, _, X, f, rfl, hT⟩ := hT apply Coverage.Saturate.transitive Y (generate (Presieve.ofArrows (fun (_ : Unit) ↦ (∐ fun (i : I) => X i)) (fun (_ : Unit) ↦ Sigma.desc f))) · apply Coverage.Saturate.of simp only [Coverage.sup_covering, extensiveCoverage, regularCoverage, Set.mem_union, Set.mem_setOf_eq] exact Or.inr ⟨_, Sigma.desc f, ⟨rfl, inferInstance⟩⟩ · rintro R g ⟨W, ψ, σ, ⟨⟩, rfl⟩ change _ ∈ sieves ((extensiveCoverage C) ⊔ (regularCoverage C)).toGrothendieck _ rw [Sieve.pullback_comp] apply pullback_stable' have : generate (Presieve.ofArrows X fun (i : I) ↦ Sigma.ι X i) ≤ (generate (Presieve.ofArrows X f)).pullback (Sigma.desc f) := by rintro Q q ⟨E, e, r, ⟨hq, rfl⟩⟩ exact ⟨E, e, r ≫ (Sigma.desc f), by cases hq; simpa using Presieve.ofArrows.mk _, by simp⟩ apply Coverage.saturate_of_superset _ this apply Coverage.Saturate.of refine Or.inl ⟨I, inferInstance, _, _, ⟨rfl, ?_⟩⟩ convert IsIso.id _ aesop | top => apply Coverage.Saturate.top | transitive Y T => apply Coverage.Saturate.transitive Y T<;> [assumption; assumption] end CategoryTheory
CategoryTheory\Sites\Coherent\Equivalence.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.SheafComparison import Mathlib.CategoryTheory.Sites.Equivalence /-! # Coherence and equivalence of categories This file proves that the coherent and regular topologies transfer nicely along equivalences of categories. -/ namespace CategoryTheory variable {C : Type*} [Category C] open GrothendieckTopology namespace Equivalence variable {D : Type*} [Category D] section Coherent variable [Precoherent C] /-- `Precoherent` is preserved by equivalence of categories. -/ theorem precoherent (e : C ≌ D) : Precoherent D := e.inverse.reflects_precoherent instance [EssentiallySmall C] : Precoherent (SmallModel C) := (equivSmallModel C).precoherent instance (e : C ≌ D) : haveI := precoherent e e.inverse.IsDenseSubsite (coherentTopology D) (coherentTopology C) where functorPushforward_mem_iff := by rw [coherentTopology.eq_induced e.inverse] simp only [Functor.mem_inducedTopology_sieves_iff, implies_true] variable (A : Type*) [Category A] /-- Equivalent precoherent categories give equivalent coherent toposes. -/ @[simps!] def sheafCongrPrecoherent (e : C ≌ D) : haveI := e.precoherent Sheaf (coherentTopology C) A ≌ Sheaf (coherentTopology D) A := e.sheafCongr _ _ _ open Presheaf /-- The coherent sheaf condition can be checked after precomposing with the equivalence. -/ theorem precoherent_isSheaf_iff (e : C ≌ D) (F : Cᵒᵖ ⥤ A) : haveI := e.precoherent IsSheaf (coherentTopology C) F ↔ IsSheaf (coherentTopology D) (e.inverse.op ⋙ F) := by refine ⟨fun hF ↦ ((e.sheafCongrPrecoherent A).functor.obj ⟨F, hF⟩).cond, fun hF ↦ ?_⟩ rw [isSheaf_of_iso_iff (P' := e.functor.op ⋙ e.inverse.op ⋙ F)] · exact (e.sheafCongrPrecoherent A).inverse.obj ⟨e.inverse.op ⋙ F, hF⟩ |>.cond · exact isoWhiskerRight e.op.unitIso F /-- The coherent sheaf condition on an essentially small site can be checked after precomposing with the equivalence with a small category. -/ theorem precoherent_isSheaf_iff_of_essentiallySmall [EssentiallySmall C] (F : Cᵒᵖ ⥤ A) : IsSheaf (coherentTopology C) F ↔ IsSheaf (coherentTopology (SmallModel C)) ((equivSmallModel C).inverse.op ⋙ F) := precoherent_isSheaf_iff _ _ _ end Coherent section Regular variable [Preregular C] /-- `Preregular` is preserved by equivalence of categories. -/ theorem preregular (e : C ≌ D) : Preregular D := e.inverse.reflects_preregular instance [EssentiallySmall C] : Preregular (SmallModel C) := (equivSmallModel C).preregular instance (e : C ≌ D) : haveI := preregular e e.inverse.IsDenseSubsite (regularTopology D) (regularTopology C) where functorPushforward_mem_iff := by rw [regularTopology.eq_induced e.inverse] simp only [Functor.mem_inducedTopology_sieves_iff, implies_true] variable (A : Type*) [Category A] /-- Equivalent preregular categories give equivalent regular toposes. -/ @[simps!] def sheafCongrPreregular (e : C ≌ D) : haveI := e.preregular Sheaf (regularTopology C) A ≌ Sheaf (regularTopology D) A := e.sheafCongr _ _ _ open Presheaf /-- The regular sheaf condition can be checked after precomposing with the equivalence. -/ theorem preregular_isSheaf_iff (e : C ≌ D) (F : Cᵒᵖ ⥤ A) : haveI := e.preregular IsSheaf (regularTopology C) F ↔ IsSheaf (regularTopology D) (e.inverse.op ⋙ F) := by refine ⟨fun hF ↦ ((e.sheafCongrPreregular A).functor.obj ⟨F, hF⟩).cond, fun hF ↦ ?_⟩ rw [isSheaf_of_iso_iff (P' := e.functor.op ⋙ e.inverse.op ⋙ F)] · exact (e.sheafCongrPreregular A).inverse.obj ⟨e.inverse.op ⋙ F, hF⟩ |>.cond · exact isoWhiskerRight e.op.unitIso F /-- The regular sheaf condition on an essentially small site can be checked after precomposing with the equivalence with a small category. -/ theorem preregular_isSheaf_iff_of_essentiallySmall [EssentiallySmall C] (F : Cᵒᵖ ⥤ A) : IsSheaf (regularTopology C) F ↔ IsSheaf (regularTopology (SmallModel C)) ((equivSmallModel C).inverse.op ⋙ F) := preregular_isSheaf_iff _ _ _ end Regular end Equivalence end CategoryTheory
CategoryTheory\Sites\Coherent\ExtensiveSheaves.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson, Filippo A. E. Nuccio, Riccardo Brasca -/ import Mathlib.CategoryTheory.Limits.Preserves.Finite import Mathlib.CategoryTheory.Sites.Canonical import Mathlib.CategoryTheory.Sites.Coherent.Basic import Mathlib.CategoryTheory.Sites.Preserves /-! # Sheaves for the extensive topology This file characterises sheaves for the extensive topology. ## Main result * `isSheaf_iff_preservesFiniteProducts`: In a finitary extensive category, the sheaves for the extensive topology are precisely those preserving finite products. -/ universe w namespace CategoryTheory open Limits Presieve Opposite variable {C : Type*} [Category C] {D : Type*} [Category D] variable [FinitaryPreExtensive C] /-- A presieve is *extensive* if it is finite and its arrows induce an isomorphism from the coproduct to the target. -/ class Presieve.Extensive {X : C} (R : Presieve X) : Prop where /-- `R` consists of a finite collection of arrows that together induce an isomorphism from the coproduct of their sources. -/ arrows_nonempty_isColimit : ∃ (α : Type) (_ : Finite α) (Z : α → C) (π : (a : α) → (Z a ⟶ X)), R = Presieve.ofArrows Z π ∧ Nonempty (IsColimit (Cofan.mk X π)) instance {X : C} (S : Presieve X) [S.Extensive] : S.hasPullbacks where has_pullbacks := by obtain ⟨_, _, _, _, rfl, ⟨hc⟩⟩ := Presieve.Extensive.arrows_nonempty_isColimit (R := S) intro _ _ _ _ _ hg cases hg apply FinitaryPreExtensive.hasPullbacks_of_is_coproduct hc /-- A finite product preserving presheaf is a sheaf for the extensive topology on a category which is `FinitaryPreExtensive`. -/ theorem isSheafFor_extensive_of_preservesFiniteProducts {X : C} (S : Presieve X) [S.Extensive] (F : Cᵒᵖ ⥤ Type w) [PreservesFiniteProducts F] : S.IsSheafFor F := by obtain ⟨α, _, Z, π, rfl, ⟨hc⟩⟩ := Extensive.arrows_nonempty_isColimit (R := S) have : (ofArrows Z (Cofan.mk X π).inj).hasPullbacks := (inferInstance : (ofArrows Z π).hasPullbacks) cases nonempty_fintype α exact isSheafFor_of_preservesProduct _ _ hc instance {α : Type} [Finite α] (Z : α → C) : (ofArrows Z (fun i ↦ Sigma.ι Z i)).Extensive := ⟨⟨α, inferInstance, Z, (fun i ↦ Sigma.ι Z i), rfl, ⟨coproductIsCoproduct _⟩⟩⟩ /-- Every Yoneda-presheaf is a sheaf for the extensive topology. -/ theorem extensiveTopology.isSheaf_yoneda_obj (W : C) : Presieve.IsSheaf (extensiveTopology C) (yoneda.obj W) := by erw [isSheaf_coverage] intro X R ⟨Y, α, Z, π, hR, hi⟩ have : IsIso (Sigma.desc (Cofan.inj (Cofan.mk X π))) := hi have : R.Extensive := ⟨Y, α, Z, π, hR, ⟨Cofan.isColimitOfIsIsoSigmaDesc (Cofan.mk X π)⟩⟩ exact isSheafFor_extensive_of_preservesFiniteProducts _ _ /-- The extensive topology on a finitary pre-extensive category is subcanonical. -/ theorem extensiveTopology.subcanonical : Sheaf.Subcanonical (extensiveTopology C) := Sheaf.Subcanonical.of_yoneda_isSheaf _ isSheaf_yoneda_obj variable [FinitaryExtensive C] /-- A presheaf of sets on a category which is `FinitaryExtensive` is a sheaf iff it preserves finite products. -/ theorem Presieve.isSheaf_iff_preservesFiniteProducts (F : Cᵒᵖ ⥤ Type w) : Presieve.IsSheaf (extensiveTopology C) F ↔ Nonempty (PreservesFiniteProducts F) := by refine ⟨fun hF ↦ ⟨⟨fun α _ ↦ ⟨fun {K} ↦ ?_⟩⟩⟩, fun hF ↦ ?_⟩ · erw [Presieve.isSheaf_coverage] at hF let Z : α → C := fun i ↦ unop (K.obj ⟨i⟩) have : (Presieve.ofArrows Z (Cofan.mk (∐ Z) (Sigma.ι Z)).inj).hasPullbacks := (inferInstance : (Presieve.ofArrows Z (Sigma.ι Z)).hasPullbacks) have : ∀ (i : α), Mono (Cofan.inj (Cofan.mk (∐ Z) (Sigma.ι Z)) i) := (inferInstance : ∀ (i : α), Mono (Sigma.ι Z i)) let i : K ≅ Discrete.functor (fun i ↦ op (Z i)) := Discrete.natIsoFunctor let _ : PreservesLimit (Discrete.functor (fun i ↦ op (Z i))) F := Presieve.preservesProductOfIsSheafFor F ?_ initialIsInitial _ (coproductIsCoproduct Z) (FinitaryExtensive.isPullback_initial_to_sigma_ι Z) (hF (Presieve.ofArrows Z (fun i ↦ Sigma.ι Z i)) ?_) · exact preservesLimitOfIsoDiagram F i.symm · apply hF refine ⟨Empty, inferInstance, Empty.elim, IsEmpty.elim inferInstance, rfl, ⟨default,?_, ?_⟩⟩ · ext b cases b · simp only [eq_iff_true_of_subsingleton] · refine ⟨α, inferInstance, Z, (fun i ↦ Sigma.ι Z i), rfl, ?_⟩ suffices Sigma.desc (fun i ↦ Sigma.ι Z i) = 𝟙 _ by rw [this]; infer_instance ext simp · let _ := hF.some erw [Presieve.isSheaf_coverage] intro X R ⟨Y, α, Z, π, hR, hi⟩ have : IsIso (Sigma.desc (Cofan.inj (Cofan.mk X π))) := hi have : R.Extensive := ⟨Y, α, Z, π, hR, ⟨Cofan.isColimitOfIsIsoSigmaDesc (Cofan.mk X π)⟩⟩ exact isSheafFor_extensive_of_preservesFiniteProducts R F /-- A presheaf on a category which is `FinitaryExtensive` is a sheaf iff it preserves finite products. -/ theorem Presheaf.isSheaf_iff_preservesFiniteProducts (F : Cᵒᵖ ⥤ D) : IsSheaf (extensiveTopology C) F ↔ Nonempty (PreservesFiniteProducts F) := by constructor · intro h rw [IsSheaf] at h refine ⟨⟨fun J _ ↦ ⟨fun {K} ↦ ⟨fun {c} hc ↦ ?_⟩⟩⟩⟩ apply coyonedaJointlyReflectsLimits intro ⟨E⟩ specialize h E rw [Presieve.isSheaf_iff_preservesFiniteProducts] at h have : PreservesLimit K (F.comp (coyoneda.obj ⟨E⟩)) := (h.some.preserves J).preservesLimit change IsLimit ((F.comp (coyoneda.obj ⟨E⟩)).mapCone c) apply this.preserves exact hc · intro ⟨_⟩ E rw [Presieve.isSheaf_iff_preservesFiniteProducts] exact ⟨inferInstance⟩ noncomputable instance (F : Sheaf (extensiveTopology C) D) : PreservesFiniteProducts F.val := ((Presheaf.isSheaf_iff_preservesFiniteProducts F.val).mp F.cond).some end CategoryTheory
CategoryTheory\Sites\Coherent\ExtensiveTopology.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.Basic /-! # Description of the covering sieves of the extensive topology This file characterises the covering sieves of the extensive topology. ## Main result * `extensiveTopology.mem_sieves_iff_contains_colimit_cofan`: a sieve is a covering sieve for the extensive topology if and only if it contains a finite family of morphisms with fixed target exhibiting the target as a coproduct of the sources. -/ open CategoryTheory Limits variable {C : Type*} [Category C] [FinitaryPreExtensive C] namespace CategoryTheory lemma extensiveTopology.mem_sieves_iff_contains_colimit_cofan {X : C} (S : Sieve X) : S ∈ (extensiveTopology C).sieves X ↔ (∃ (α : Type) (_ : Finite α) (Y : α → C) (π : (a : α) → (Y a ⟶ X)), Nonempty (IsColimit (Cofan.mk X π)) ∧ (∀ a : α, (S.arrows) (π a))) := by constructor · intro h induction h with | of X S hS => obtain ⟨α, _, Y, π, h, h'⟩ := hS refine ⟨α, inferInstance, Y, π, ?_, fun a ↦ ?_⟩ · have : IsIso (Sigma.desc (Cofan.mk X π).inj) := by simpa using h' exact ⟨Cofan.isColimitOfIsIsoSigmaDesc (Cofan.mk X π)⟩ · obtain ⟨rfl, _⟩ := h exact ⟨Y a, 𝟙 Y a, π a, Presieve.ofArrows.mk a, by simp⟩ | top X => refine ⟨Unit, inferInstance, fun _ => X, fun _ => (𝟙 X), ⟨?_⟩, by simp⟩ have : IsIso (Sigma.desc (Cofan.mk X fun (_ : Unit) ↦ 𝟙 X).inj) := by have : IsIso (coproductUniqueIso (fun () => X)).hom := inferInstance exact this exact Cofan.isColimitOfIsIsoSigmaDesc (Cofan.mk X _) | transitive X R S _ _ a b => obtain ⟨α, w, Y₁, π, h, h'⟩ := a choose β _ Y_n π_n H using fun a => b (h' a) exact ⟨(Σ a, β a), inferInstance, fun ⟨a,b⟩ => Y_n a b, fun ⟨a, b⟩ => (π_n a b) ≫ (π a), ⟨Limits.Cofan.isColimitTrans _ h.some _ (fun a ↦ (H a).1.some)⟩, fun c => (H c.fst).2 c.snd⟩ · intro ⟨α, _, Y, π, h, h'⟩ apply (extensiveCoverage C).mem_toGrothendieck_sieves_of_superset (R := Presieve.ofArrows Y π) · exact fun _ _ hh ↦ by cases hh; exact h' _ · refine ⟨α, inferInstance, Y, π, rfl, ?_⟩ erw [Limits.Cofan.isColimit_iff_isIso_sigmaDesc (c := Cofan.mk X π)] exact h end CategoryTheory
CategoryTheory\Sites\Coherent\LocallySurjective.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.ExtensiveTopology import Mathlib.CategoryTheory.Sites.Coherent.SheafComparison import Mathlib.CategoryTheory.Sites.LocallySurjective /-! # Locally surjective morphisms of coherent sheaves This file characterises locally surjective morphisms of presheaves for the coherent, regular and extensive topologies. ## Main results * `regularTopology.isLocallySurjective_iff` A morphism of presheaves `f : F ⟶ G` is locally surjective for the regular topology iff for every object `X` of `C`, and every `y : G(X)`, there is an effective epimorphism `φ : X' ⟶ X` and an `x : F(X)` such that `f_{X'}(x) = G(φ)(y)`. * `coherentTopology.isLocallySurjective_iff` a morphism of sheaves for the coherent topology on a preregular finitary extensive category is locally surjective if and only if it is locally surjective for the regular topology. * `extensiveTopology.isLocallySurjective_iff` a morphism of sheaves for the extensive topology on a finitary extensive category is locally surjective iff it is objectwise surjective. -/ universe w open CategoryTheory Sheaf Limits Opposite attribute [local instance] ConcreteCategory.hasCoeToSort ConcreteCategory.instFunLike namespace CategoryTheory variable {C : Type*} (D : Type*) [Category C] [Category D] [ConcreteCategory.{w} D] lemma regularTopology.isLocallySurjective_iff [Preregular C] {F G : Cᵒᵖ ⥤ D} (f : F ⟶ G) : Presheaf.IsLocallySurjective (regularTopology C) f ↔ ∀ (X : C) (y : G.obj ⟨X⟩), (∃ (X' : C) (φ : X' ⟶ X) (_ : EffectiveEpi φ) (x : F.obj ⟨X'⟩), f.app ⟨X'⟩ x = G.map ⟨φ⟩ y) := by constructor · intro ⟨h⟩ X y specialize h y rw [regularTopology.mem_sieves_iff_hasEffectiveEpi] at h obtain ⟨X', π, h, h'⟩ := h exact ⟨X', π, h, h'⟩ · intro h refine ⟨fun y ↦ ?_⟩ obtain ⟨X', π, h, h'⟩ := h _ y rw [regularTopology.mem_sieves_iff_hasEffectiveEpi] exact ⟨X', π, h, h'⟩ lemma extensiveTopology.surjective_of_isLocallySurjective_sheafOfTypes [FinitaryPreExtensive C] {F G : Cᵒᵖ ⥤ Type w} (f : F ⟶ G) [PreservesFiniteProducts F] [PreservesFiniteProducts G] (h : Presheaf.IsLocallySurjective (extensiveTopology C) f) {X : C} : Function.Surjective (f.app (op X)) := by intro x replace h := h.1 x rw [mem_sieves_iff_contains_colimit_cofan] at h obtain ⟨α, _, Y, π, h, h'⟩ := h let y : (a : α) → (F.obj ⟨Y a⟩) := fun a ↦ (h' a).choose let _ : Fintype α := Fintype.ofFinite _ let ht := (Types.productLimitCone (fun a ↦ F.obj ⟨Y a⟩)).isLimit let ht' := (Functor.Initial.isLimitWhiskerEquiv (Discrete.opposite α).inverse (Cocone.op (Cofan.mk X π))).symm h.some.op let i : ((a : α) → (F.obj ⟨Y a⟩)) ≅ (F.obj ⟨X⟩) := ht.conePointsIsoOfNatIso (isLimitOfPreserves F ht') (Discrete.natIso (fun _ ↦ (Iso.refl (F.obj ⟨_⟩)))) refine ⟨i.hom y, ?_⟩ apply Concrete.isLimit_ext _ (isLimitOfPreserves G ht') intro ⟨a⟩ simp only [Functor.comp_obj, Discrete.opposite_inverse_obj, Functor.op_obj, Discrete.functor_obj, Functor.mapCone_pt, Cone.whisker_pt, Cocone.op_pt, Cofan.mk_pt, Functor.const_obj_obj, Functor.mapCone_π_app, Cone.whisker_π, Cocone.op_π, whiskerLeft_app, NatTrans.op_app, Cofan.mk_ι_app] have : f.app ⟨Y a⟩ (y a) = G.map (π a).op x := (h' a).choose_spec change _ = G.map (π a).op x erw [← this, ← NatTrans.naturality_apply (φ := f)] apply congrArg change (i.hom ≫ F.map (π a).op) y = _ erw [IsLimit.map_π] rfl lemma extensiveTopology.presheafIsLocallySurjective_iff [FinitaryPreExtensive C] {F G : Cᵒᵖ ⥤ D} (f : F ⟶ G) [PreservesFiniteProducts F] [PreservesFiniteProducts G] [PreservesFiniteProducts (forget D)] : Presheaf.IsLocallySurjective (extensiveTopology C) f ↔ ∀ (X : C), Function.Surjective (f.app (op X)) := by constructor · rw [Presheaf.isLocallySurjective_iff_whisker_forget (J := extensiveTopology C)] exact fun h _ ↦ surjective_of_isLocallySurjective_sheafOfTypes (whiskerRight f (forget D)) h · intro h refine ⟨fun {X} y ↦ ?_⟩ obtain ⟨x, hx⟩ := h X y convert (extensiveTopology C).top_mem' X rw [← Sieve.id_mem_iff_eq_top] simpa [Presheaf.imageSieve] using ⟨x, hx⟩ lemma extensiveTopology.isLocallySurjective_iff [FinitaryExtensive C] {F G : Sheaf (extensiveTopology C) D} (f : F ⟶ G) [PreservesFiniteProducts (forget D)] : IsLocallySurjective f ↔ ∀ (X : C), Function.Surjective (f.val.app (op X)) := extensiveTopology.presheafIsLocallySurjective_iff _ f.val lemma regularTopology.isLocallySurjective_sheafOfTypes [Preregular C] [FinitaryPreExtensive C] {F G : Cᵒᵖ ⥤ Type w} (f : F ⟶ G) [PreservesFiniteProducts F] [PreservesFiniteProducts G] (h : Presheaf.IsLocallySurjective (coherentTopology C) f) : Presheaf.IsLocallySurjective (regularTopology C) f where imageSieve_mem y := by replace h := h.1 y rw [coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily] at h obtain ⟨α, _, Z, π, h, h'⟩ := h rw [mem_sieves_iff_hasEffectiveEpi] let x : (a : α) → (F.obj ⟨Z a⟩) := fun a ↦ (h' a).choose let _ : Fintype α := Fintype.ofFinite _ let i' : ((a : α) → (F.obj ⟨Z a⟩)) ≅ (F.obj ⟨∐ Z⟩) := (Types.productIso _).symm ≪≫ (PreservesProduct.iso F _).symm ≪≫ F.mapIso (opCoproductIsoProduct _).symm refine ⟨∐ Z, Sigma.desc π, inferInstance, i'.hom x, ?_⟩ have := preservesLimitsOfShapeOfEquiv (Discrete.opposite α).symm G apply Concrete.isLimit_ext _ (isLimitOfPreserves G (coproductIsCoproduct Z).op) intro ⟨⟨a⟩⟩ simp only [Functor.comp_obj, Functor.op_obj, Discrete.functor_obj, Functor.mapCone_pt, Cocone.op_pt, Cofan.mk_pt, Functor.const_obj_obj, Functor.mapCone_π_app, Cocone.op_π, NatTrans.op_app, Cofan.mk_ι_app, Functor.mapIso_symm, Iso.symm_hom, Iso.trans_hom, Functor.mapIso_inv, types_comp_apply, i', ← NatTrans.naturality_apply] have : f.app ⟨Z a⟩ (x a) = G.map (π a).op y := (h' a).choose_spec convert this · change F.map _ (F.map _ _) = _ rw [← FunctorToTypes.map_comp_apply, opCoproductIsoProduct_inv_comp_ι, ← piComparison_comp_π] change ((PreservesProduct.iso F _).hom ≫ _) _ = _ have := Types.productIso_hom_comp_eval (fun a ↦ F.obj (op (Z a))) a rw [← Iso.eq_inv_comp] at this simp only [types_comp_apply, inv_hom_id_apply, congrFun this x] · change G.map _ (G.map _ _) = _ simp only [← FunctorToTypes.map_comp_apply, ← op_comp, Sigma.ι_desc] lemma coherentTopology.presheafIsLocallySurjective_iff {F G : Cᵒᵖ ⥤ D} (f : F ⟶ G) [Preregular C] [FinitaryPreExtensive C] [PreservesFiniteProducts F] [PreservesFiniteProducts G] [PreservesFiniteProducts (forget D)] : Presheaf.IsLocallySurjective (coherentTopology C) f ↔ Presheaf.IsLocallySurjective (regularTopology C) f := by constructor · rw [Presheaf.isLocallySurjective_iff_whisker_forget, Presheaf.isLocallySurjective_iff_whisker_forget (J := regularTopology C)] exact regularTopology.isLocallySurjective_sheafOfTypes _ · refine Presheaf.isLocallySurjective_of_le (J := regularTopology C) ?_ _ rw [← extensive_regular_generate_coherent] exact (Coverage.gi _).gc.monotone_l le_sup_right lemma coherentTopology.isLocallySurjective_iff [Preregular C] [FinitaryExtensive C] {F G : Sheaf (coherentTopology C) D} (f : F ⟶ G) [PreservesFiniteProducts (forget D)] : IsLocallySurjective f ↔ Presheaf.IsLocallySurjective (regularTopology C) f.val := presheafIsLocallySurjective_iff _ f.val end CategoryTheory
CategoryTheory\Sites\Coherent\ReflectsPrecoherent.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.EffectiveEpi.Enough import Mathlib.CategoryTheory.EffectiveEpi.Preserves import Mathlib.CategoryTheory.Sites.Coherent.CoherentTopology /-! # Reflecting the property of being precoherent We prove that given a fully faithful functor `F : C ⥤ D` which preserves and reflects finite effective epimorphic families, such that for every object `X` of `D` there exists an object `W` of `C` with an effective epi `π : F.obj W ⟶ X`, the category `C` is `Precoherent` whenever `D` is. -/ namespace CategoryTheory variable {C D : Type*} [Category C] [Category D] (F : C ⥤ D) [F.PreservesFiniteEffectiveEpiFamilies] [F.ReflectsFiniteEffectiveEpiFamilies] [F.EffectivelyEnough] [Precoherent D] [F.Full] [F.Faithful] lemma Functor.reflects_precoherent : Precoherent C where pullback {B₁ B₂} f α _ X₁ π₁ _ := by obtain ⟨β, _, Y₂, τ₂, H, i, ι, hh⟩ := Precoherent.pullback (F.map f) _ _ (fun a ↦ F.map (π₁ a)) inferInstance refine ⟨β, inferInstance, _, fun b ↦ F.preimage (F.effectiveEpiOver (Y₂ b) ≫ τ₂ b), F.finite_effectiveEpiFamily_of_map _ _ ?_, ⟨i, fun b ↦ F.preimage (F.effectiveEpiOver (Y₂ b) ≫ ι b), ?_⟩⟩ · simp only [Functor.map_preimage] infer_instance · intro b apply F.map_injective simp [hh b] end CategoryTheory
CategoryTheory\Sites\Coherent\ReflectsPreregular.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.EffectiveEpi.Enough import Mathlib.CategoryTheory.EffectiveEpi.Preserves import Mathlib.CategoryTheory.Sites.Coherent.RegularTopology /-! # Reflecting the property of being preregular We prove that given a fully faithful functor `F : C ⥤ D`, with `Preregular D`, such that for every object `X` of `D` there exists an object `W` of `C` with an effective epi `π : F.obj W ⟶ X`, the category `C` is `Preregular`. -/ namespace CategoryTheory variable {C D : Type*} [Category C] [Category D] (F : C ⥤ D) [F.PreservesEffectiveEpis] [F.ReflectsEffectiveEpis] [F.EffectivelyEnough] [Preregular D] [F.Full] [F.Faithful] lemma Functor.reflects_preregular : Preregular C where exists_fac f g _ := by obtain ⟨W, f', _, i, w⟩ := Preregular.exists_fac (F.map f) (F.map g) refine ⟨_, F.preimage (F.effectiveEpiOver W ≫ f'), ⟨F.effectiveEpi_of_map _ ?_, F.preimage (F.effectiveEpiOver W ≫ i), ?_⟩⟩ · simp only [Functor.map_preimage] infer_instance · apply F.map_injective simp [w] end CategoryTheory
CategoryTheory\Sites\Coherent\RegularSheaves.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson, Filippo A. E. Nuccio, Riccardo Brasca -/ import Mathlib.CategoryTheory.EffectiveEpi.Preserves import Mathlib.CategoryTheory.Limits.Final.ParallelPair import Mathlib.CategoryTheory.Preadditive.Projective import Mathlib.CategoryTheory.Sites.Canonical import Mathlib.CategoryTheory.Sites.Coherent.Basic import Mathlib.CategoryTheory.Sites.EffectiveEpimorphic /-! # Sheaves for the regular topology This file characterises sheaves for the regular topology. ## Main results * `equalizerCondition_iff_isSheaf`: In a preregular category with pullbacks, the sheaves for the regular topology are precisely the presheaves satisfying an equaliser condition with respect to effective epimorphisms. * `isSheaf_of_projective`: In a preregular category in which every object is projective, every presheaf is a sheaf for the regular topology. -/ namespace CategoryTheory open Limits variable {C D E : Type*} [Category C] [Category D] [Category E] open Opposite Presieve Functor /-- A presieve is *regular* if it consists of a single effective epimorphism. -/ class Presieve.regular {X : C} (R : Presieve X) : Prop where /-- `R` consists of a single epimorphism. -/ single_epi : ∃ (Y : C) (f : Y ⟶ X), R = Presieve.ofArrows (fun (_ : Unit) ↦ Y) (fun (_ : Unit) ↦ f) ∧ EffectiveEpi f namespace regularTopology lemma equalizerCondition_w (P : Cᵒᵖ ⥤ D) {X B : C} {π : X ⟶ B} (c : PullbackCone π π) : P.map π.op ≫ P.map c.fst.op = P.map π.op ≫ P.map c.snd.op := by simp only [← Functor.map_comp, ← op_comp, c.condition] /-- A contravariant functor on `C` satisifies `SingleEqualizerCondition` with respect to a morphism `π` if it takes its kernel pair to an equalizer diagram. -/ def SingleEqualizerCondition (P : Cᵒᵖ ⥤ D) ⦃X B : C⦄ (π : X ⟶ B) : Prop := ∀ (c : PullbackCone π π) (_ : IsLimit c), Nonempty (IsLimit (Fork.ofι (P.map π.op) (equalizerCondition_w P c))) /-- A contravariant functor on `C` satisfies `EqualizerCondition` if it takes kernel pairs of effective epimorphisms to equalizer diagrams. -/ def EqualizerCondition (P : Cᵒᵖ ⥤ D) : Prop := ∀ ⦃X B : C⦄ (π : X ⟶ B) [EffectiveEpi π], SingleEqualizerCondition P π /-- The equalizer condition is preserved by natural isomorphism. -/ theorem equalizerCondition_of_natIso {P P' : Cᵒᵖ ⥤ D} (i : P ≅ P') (hP : EqualizerCondition P) : EqualizerCondition P' := fun X B π _ c hc ↦ ⟨Fork.isLimitOfIsos _ (hP π c hc).some _ (i.app _) (i.app _) (i.app _)⟩ /-- Precomposing with a pullback-preserving functor preserves the equalizer condition. -/ theorem equalizerCondition_precomp_of_preservesPullback (P : Cᵒᵖ ⥤ D) (F : E ⥤ C) [∀ {X B} (π : X ⟶ B) [EffectiveEpi π], PreservesLimit (cospan π π) F] [F.PreservesEffectiveEpis] (hP : EqualizerCondition P) : EqualizerCondition (F.op ⋙ P) := by intro X B π _ c hc have h : P.map (F.map π).op = (F.op ⋙ P).map π.op := by simp refine ⟨(IsLimit.equivIsoLimit (ForkOfι.ext ?_ _ h)) ?_⟩ · simp only [Functor.comp_map, op_map, Quiver.Hom.unop_op, ← map_comp, ← op_comp, c.condition] · refine (hP (F.map π) (PullbackCone.mk (F.map c.fst) (F.map c.snd) ?_) ?_).some · simp only [← map_comp, c.condition] · exact (isLimitMapConePullbackConeEquiv F c.condition) (isLimitOfPreserves F (hc.ofIsoLimit (PullbackCone.ext (Iso.refl _) (by simp) (by simp)))) /-- The canonical map to the explicit equalizer. -/ def MapToEqualizer (P : Cᵒᵖ ⥤ Type*) {W X B : C} (f : X ⟶ B) (g₁ g₂ : W ⟶ X) (w : g₁ ≫ f = g₂ ≫ f) : P.obj (op B) → { x : P.obj (op X) | P.map g₁.op x = P.map g₂.op x } := fun t ↦ ⟨P.map f.op t, by simp only [Set.mem_setOf_eq, ← FunctorToTypes.map_comp_apply, ← op_comp, w]⟩ theorem EqualizerCondition.bijective_mapToEqualizer_pullback (P : Cᵒᵖ ⥤ Type*) (hP : EqualizerCondition P) : ∀ (X B : C) (π : X ⟶ B) [EffectiveEpi π] [HasPullback π π], Function.Bijective (MapToEqualizer P π (pullback.fst π π) (pullback.snd π π) pullback.condition) := by intro X B π _ _ specialize hP π _ (pullbackIsPullback π π) rw [Types.type_equalizer_iff_unique] at hP rw [Function.bijective_iff_existsUnique] intro ⟨b, hb⟩ obtain ⟨a, ha₁, ha₂⟩ := hP b hb refine ⟨a, ?_, ?_⟩ · simpa [MapToEqualizer] using ha₁ · simpa [MapToEqualizer] using ha₂ theorem EqualizerCondition.mk (P : Cᵒᵖ ⥤ Type*) (hP : ∀ (X B : C) (π : X ⟶ B) [EffectiveEpi π] [HasPullback π π], Function.Bijective (MapToEqualizer P π (pullback.fst π π) (pullback.snd π π) pullback.condition)) : EqualizerCondition P := by intro X B π _ c hc have : HasPullback π π := ⟨c, hc⟩ specialize hP X B π rw [Types.type_equalizer_iff_unique] rw [Function.bijective_iff_existsUnique] at hP intro b hb have h₁ : ((pullbackIsPullback π π).conePointUniqueUpToIso hc).hom ≫ c.fst = pullback.fst π π := by simp have hb' : P.map (pullback.fst π π).op b = P.map (pullback.snd _ _).op b := by rw [← h₁, op_comp, FunctorToTypes.map_comp_apply, hb] simp [← FunctorToTypes.map_comp_apply, ← op_comp] obtain ⟨a, ha₁, ha₂⟩ := hP ⟨b, hb'⟩ refine ⟨a, ?_, ?_⟩ · simpa [MapToEqualizer] using ha₁ · simpa [MapToEqualizer] using ha₂ lemma equalizerCondition_w' (P : Cᵒᵖ ⥤ Type*) {X B : C} (π : X ⟶ B) [HasPullback π π] : P.map π.op ≫ P.map (pullback.fst π π).op = P.map π.op ≫ P.map (pullback.snd π π).op := by simp only [← Functor.map_comp, ← op_comp, pullback.condition] lemma mapToEqualizer_eq_comp (P : Cᵒᵖ ⥤ Type*) {X B : C} (π : X ⟶ B) [HasPullback π π] : MapToEqualizer P π (pullback.fst π π) (pullback.snd π π) pullback.condition = equalizer.lift (P.map π.op) (equalizerCondition_w' P π) ≫ (Types.equalizerIso _ _).hom := by rw [← Iso.comp_inv_eq (α := Types.equalizerIso _ _)] apply equalizer.hom_ext aesop /-- An alternative phrasing of the explicit equalizer condition, using more categorical language. -/ theorem equalizerCondition_iff_isIso_lift (P : Cᵒᵖ ⥤ Type*) : EqualizerCondition P ↔ ∀ (X B : C) (π : X ⟶ B) [EffectiveEpi π] [HasPullback π π], IsIso (equalizer.lift (P.map π.op) (equalizerCondition_w' P π)) := by constructor · intro hP X B π _ _ have h := hP.bijective_mapToEqualizer_pullback _ X B π rw [← isIso_iff_bijective, mapToEqualizer_eq_comp] at h exact IsIso.of_isIso_comp_right (equalizer.lift (P.map π.op) (equalizerCondition_w' P π)) (Types.equalizerIso _ _).hom · intro hP apply EqualizerCondition.mk intro X B π _ _ rw [mapToEqualizer_eq_comp, ← isIso_iff_bijective] infer_instance /-- `P` satisfies the equalizer condition iff its precomposition by an equivalence does. -/ theorem equalizerCondition_iff_of_equivalence (P : Cᵒᵖ ⥤ D) (e : C ≌ E) : EqualizerCondition P ↔ EqualizerCondition (e.op.inverse ⋙ P) := ⟨fun h ↦ equalizerCondition_precomp_of_preservesPullback P e.inverse h, fun h ↦ equalizerCondition_of_natIso (e.op.funInvIdAssoc P) (equalizerCondition_precomp_of_preservesPullback (e.op.inverse ⋙ P) e.functor h)⟩ open WalkingParallelPair WalkingParallelPairHom in theorem parallelPair_pullback_initial {X B : C} (π : X ⟶ B) (c : PullbackCone π π) (hc : IsLimit c) : (parallelPair (C := (Sieve.ofArrows (fun (_ : Unit) => X) (fun _ => π)).arrows.categoryᵒᵖ) (Y := op ((Presieve.categoryMk _ (c.fst ≫ π) ⟨_, c.fst, π, ofArrows.mk (), rfl⟩))) (X := op ((Presieve.categoryMk _ π (Sieve.ofArrows_mk _ _ Unit.unit)))) (Quiver.Hom.op (Over.homMk c.fst)) (Quiver.Hom.op (Over.homMk c.snd c.condition.symm))).Initial := by apply Limits.parallelPair_initial_mk · intro ⟨Z⟩ obtain ⟨_, f, g, ⟨⟩, hh⟩ := Z.property let X' : (Presieve.ofArrows (fun () ↦ X) (fun () ↦ π)).category := Presieve.categoryMk _ π (ofArrows.mk ()) let f' : Z.obj.left ⟶ X'.obj.left := f exact ⟨(Over.homMk f').op⟩ · intro ⟨Z⟩ ⟨i⟩ ⟨j⟩ let ij := PullbackCone.IsLimit.lift hc i.left j.left (by erw [i.w, j.w]; rfl) refine ⟨Quiver.Hom.op (Over.homMk ij (by simpa [ij] using i.w)), ?_, ?_⟩ all_goals congr all_goals exact Comma.hom_ext _ _ (by erw [Over.comp_left]; simp [ij]) rfl /-- Given a limiting pullback cone, the fork in `SingleEqualizerCondition` is limiting iff the diagram in `Presheaf.isSheaf_iff_isLimit_coverage` is limiting. -/ noncomputable def isLimit_forkOfι_equiv (P : Cᵒᵖ ⥤ D) {X B : C} (π : X ⟶ B) (c : PullbackCone π π) (hc : IsLimit c) : IsLimit (Fork.ofι (P.map π.op) (equalizerCondition_w P c)) ≃ IsLimit (P.mapCone (Sieve.ofArrows (fun (_ : Unit) ↦ X) fun _ ↦ π).arrows.cocone.op) := by let S := (Sieve.ofArrows (fun (_ : Unit) => X) (fun _ => π)).arrows let X' := S.categoryMk π ⟨_, 𝟙 _, π, ofArrows.mk (), Category.id_comp _⟩ let P' := S.categoryMk (c.fst ≫ π) ⟨_, c.fst, π, ofArrows.mk (), rfl⟩ let fst : P' ⟶ X' := Over.homMk c.fst let snd : P' ⟶ X' := Over.homMk c.snd c.condition.symm let F : S.categoryᵒᵖ ⥤ D := S.diagram.op ⋙ P let G := parallelPair (P.map c.fst.op) (P.map c.snd.op) let H := parallelPair fst.op snd.op have : H.Initial := parallelPair_pullback_initial π c hc let i : H ⋙ F ≅ G := parallelPair.ext (Iso.refl _) (Iso.refl _) (by aesop) (by aesop) refine (IsLimit.equivOfNatIsoOfIso i.symm _ _ ?_).trans (Functor.Initial.isLimitWhiskerEquiv H _) refine Cones.ext (Iso.refl _) ?_ rintro ⟨_ | _⟩ all_goals aesop lemma equalizerConditionMap_iff_nonempty_isLimit (P : Cᵒᵖ ⥤ D) ⦃X B : C⦄ (π : X ⟶ B) [HasPullback π π] : SingleEqualizerCondition P π ↔ Nonempty (IsLimit (P.mapCone (Sieve.ofArrows (fun (_ : Unit) => X) (fun _ => π)).arrows.cocone.op)) := by constructor · intro h exact ⟨isLimit_forkOfι_equiv _ _ _ (pullbackIsPullback π π) (h _ (pullbackIsPullback π π)).some⟩ · intro ⟨h⟩ exact fun c hc ↦ ⟨(isLimit_forkOfι_equiv _ _ _ hc).symm h⟩ lemma equalizerCondition_iff_isSheaf (F : Cᵒᵖ ⥤ D) [Preregular C] [∀ {Y X : C} (f : Y ⟶ X) [EffectiveEpi f], HasPullback f f] : EqualizerCondition F ↔ Presheaf.IsSheaf (regularTopology C) F := by dsimp [regularTopology] rw [Presheaf.isSheaf_iff_isLimit_coverage] constructor · rintro hF X _ ⟨Y, f, rfl, _⟩ exact (equalizerConditionMap_iff_nonempty_isLimit F f).1 (hF f) · intro hF Y X f _ exact (equalizerConditionMap_iff_nonempty_isLimit F f).2 (hF _ ⟨_, f, rfl, inferInstance⟩) lemma isSheafFor_regular_of_projective {X : C} (S : Presieve X) [S.regular] [Projective X] (F : Cᵒᵖ ⥤ Type*) : S.IsSheafFor F := by obtain ⟨Y, f, rfl, hf⟩ := Presieve.regular.single_epi (R := S) rw [isSheafFor_arrows_iff] refine fun x hx ↦ ⟨F.map (Projective.factorThru (𝟙 _) f).op <| x (), fun _ ↦ ?_, fun y h ↦ ?_⟩ · simpa using (hx () () Y (𝟙 Y) (f ≫ (Projective.factorThru (𝟙 _) f)) (by simp)).symm · simp only [← h (), ← FunctorToTypes.map_comp_apply, ← op_comp, Projective.factorThru_comp, op_id, FunctorToTypes.map_id_apply] /-- Every presheaf is a sheaf for the regular topology if every object of `C` is projective. -/ theorem isSheaf_of_projective (F : Cᵒᵖ ⥤ D) [Preregular C] [∀ (X : C), Projective X] : Presheaf.IsSheaf (regularTopology C) F := fun _ ↦ (isSheaf_coverage _ _).mpr fun S ⟨_, h⟩ ↦ have : S.regular := ⟨_, h⟩ isSheafFor_regular_of_projective _ _ /-- Every Yoneda-presheaf is a sheaf for the regular topology. -/ lemma isSheaf_yoneda_obj [Preregular C] (W : C) : Presieve.IsSheaf (regularTopology C) (yoneda.obj W) := by rw [regularTopology, isSheaf_coverage] intro X S ⟨_, hS⟩ have : S.regular := ⟨_, hS⟩ obtain ⟨Y, f, rfl, hf⟩ := Presieve.regular.single_epi (R := S) have h_colim := isColimitOfEffectiveEpiStruct f hf.effectiveEpi.some rw [← Sieve.generateSingleton_eq, ← Presieve.ofArrows_pUnit] at h_colim intro x hx let x_ext := Presieve.FamilyOfElements.sieveExtend x have hx_ext := Presieve.FamilyOfElements.Compatible.sieveExtend hx let S := Sieve.generate (Presieve.ofArrows (fun () ↦ Y) (fun () ↦ f)) obtain ⟨t, t_amalg, t_uniq⟩ := (Sieve.forallYonedaIsSheaf_iff_colimit S).mpr ⟨h_colim⟩ W x_ext hx_ext refine ⟨t, ?_, ?_⟩ · convert Presieve.isAmalgamation_restrict (Sieve.le_generate (Presieve.ofArrows (fun () ↦ Y) (fun () ↦ f))) _ _ t_amalg exact (Presieve.restrict_extend hx).symm · exact fun y hy ↦ t_uniq y <| Presieve.isAmalgamation_sieveExtend x y hy /-- The regular topology on any preregular category is subcanonical. -/ theorem subcanonical [Preregular C] : Sheaf.Subcanonical (regularTopology C) := Sheaf.Subcanonical.of_yoneda_isSheaf _ isSheaf_yoneda_obj end regularTopology end CategoryTheory
CategoryTheory\Sites\Coherent\RegularTopology.lean
/- Copyright (c) 2023 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.RegularSheaves /-! # Description of the covering sieves of the regular topology This file characterises the covering sieves of the regular topology. ## Main result * `regularTopology.mem_sieves_iff_hasEffectiveEpi`: a sieve is a covering sieve for the regular topology if and only if it contains an effective epi. -/ namespace CategoryTheory.regularTopology open Limits variable {C : Type*} [Category C] [Preregular C] {X : C} /-- For a preregular category, any sieve that contains an `EffectiveEpi` is a covering sieve of the regular topology. Note: This is one direction of `mem_sieves_iff_hasEffectiveEpi`, but is needed for the proof. -/ theorem mem_sieves_of_hasEffectiveEpi (S : Sieve X) : (∃ (Y : C) (π : Y ⟶ X), EffectiveEpi π ∧ S.arrows π) → (S ∈ (regularTopology C).sieves X) := by rintro ⟨Y, π, h⟩ have h_le : Sieve.generate (Presieve.ofArrows (fun () ↦ Y) (fun _ ↦ π)) ≤ S := by rw [Sieve.generate_le_iff (Presieve.ofArrows _ _) S] apply Presieve.le_of_factorsThru_sieve (Presieve.ofArrows _ _) S _ intro W g f refine ⟨W, 𝟙 W, ?_⟩ cases f exact ⟨π, ⟨h.2, Category.id_comp π⟩⟩ apply Coverage.saturate_of_superset (regularCoverage C) h_le exact Coverage.Saturate.of X _ ⟨Y, π, rfl, h.1⟩ /-- Effective epis in a preregular category are stable under composition. -/ instance {Y Y' : C} (π : Y ⟶ X) [EffectiveEpi π] (π' : Y' ⟶ Y) [EffectiveEpi π'] : EffectiveEpi (π' ≫ π) := by rw [effectiveEpi_iff_effectiveEpiFamily, ← Sieve.effectiveEpimorphic_family] suffices h₂ : (Sieve.generate (Presieve.ofArrows _ _)) ∈ GrothendieckTopology.sieves (regularTopology C) X by change Nonempty _ rw [← Sieve.forallYonedaIsSheaf_iff_colimit] exact fun W => regularTopology.isSheaf_yoneda_obj W _ h₂ apply Coverage.Saturate.transitive X (Sieve.generate (Presieve.ofArrows (fun () ↦ Y) (fun () ↦ π))) · apply Coverage.Saturate.of use Y, π · intro V f ⟨Y₁, h, g, ⟨hY, hf⟩⟩ rw [← hf, Sieve.pullback_comp] apply (regularTopology C).pullback_stable' apply regularTopology.mem_sieves_of_hasEffectiveEpi cases hY exact ⟨Y', π', inferInstance, Y', (𝟙 _), π' ≫ π, Presieve.ofArrows.mk (), (by simp)⟩ /-- A sieve is a cover for the regular topology if and only if it contains an `EffectiveEpi`. -/ theorem mem_sieves_iff_hasEffectiveEpi (S : Sieve X) : (S ∈ (regularTopology C).sieves X) ↔ ∃ (Y : C) (π : Y ⟶ X), EffectiveEpi π ∧ (S.arrows π) := by constructor · intro h induction' h with Y T hS Y Y R S _ _ a b · rcases hS with ⟨Y', π, h'⟩ refine ⟨Y', π, h'.2, ?_⟩ rcases h' with ⟨rfl, _⟩ exact ⟨Y', 𝟙 Y', π, Presieve.ofArrows.mk (), (by simp)⟩ · exact ⟨Y, (𝟙 Y), inferInstance, by simp only [Sieve.top_apply, forall_const]⟩ · rcases a with ⟨Y₁, π, ⟨h₁,h₂⟩⟩ choose Y' π' _ H using b h₂ exact ⟨Y', π' ≫ π, inferInstance, (by simpa using H)⟩ · exact regularTopology.mem_sieves_of_hasEffectiveEpi S end CategoryTheory.regularTopology
CategoryTheory\Sites\Coherent\SheafComparison.lean
/- Copyright (c) 2024 Dagur Asgeirsson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Dagur Asgeirsson -/ import Mathlib.CategoryTheory.Sites.Coherent.Comparison import Mathlib.CategoryTheory.Sites.Coherent.ExtensiveSheaves import Mathlib.CategoryTheory.Sites.Coherent.ReflectsPrecoherent import Mathlib.CategoryTheory.Sites.Coherent.ReflectsPreregular import Mathlib.CategoryTheory.Sites.InducedTopology import Mathlib.CategoryTheory.Sites.Whiskering /-! # Categories of coherent sheaves Given a fully faithful functor `F : C ⥤ D` into a precoherent category, which preserves and reflects finite effective epi families, and satisfies the property `F.EffectivelyEnough` (meaning that to every object in `C` there is an effective epi from an object in the image of `F`), the categories of coherent sheaves on `C` and `D` are equivalent (see `CategoryTheory.coherentTopology.equivalence`). The main application of this equivalence is the characterisation of condensed sets as coherent sheaves on either `CompHaus`, `Profinite` or `Stonean`. See the file `Condensed/Equivalence.lean` We give the corresonding result for the regular topology as well (see `CategoryTheory.regularTopology.equivalence`). -/ universe v₁ v₂ v₃ v₄ u₁ u₂ u₃ u₄ namespace CategoryTheory open Limits Functor regularTopology variable {C D : Type*} [Category C] [Category D] (F : C ⥤ D) namespace coherentTopology variable [F.PreservesFiniteEffectiveEpiFamilies] [F.ReflectsFiniteEffectiveEpiFamilies] [F.Full] [F.Faithful] [F.EffectivelyEnough] [Precoherent D] instance : F.IsCoverDense (coherentTopology _) := by refine F.isCoverDense_of_generate_singleton_functor_π_mem _ fun B ↦ ⟨_, F.effectiveEpiOver B, ?_⟩ apply Coverage.Saturate.of refine ⟨Unit, inferInstance, fun _ => F.effectiveEpiOverObj B, fun _ => F.effectiveEpiOver B, ?_ , ?_⟩ · funext; ext -- Do we want `Presieve.ext`? refine ⟨fun ⟨⟩ ↦ ⟨()⟩, ?_⟩ rintro ⟨⟩ simp · rw [← effectiveEpi_iff_effectiveEpiFamily] infer_instance theorem exists_effectiveEpiFamily_iff_mem_induced (X : C) (S : Sieve X) : (∃ (α : Type) (_ : Finite α) (Y : α → C) (π : (a : α) → (Y a ⟶ X)), EffectiveEpiFamily Y π ∧ (∀ a : α, (S.arrows) (π a)) ) ↔ (S ∈ F.inducedTopology (coherentTopology _) X) := by refine ⟨fun ⟨α, _, Y, π, ⟨H₁, H₂⟩⟩ ↦ ?_, fun hS ↦ ?_⟩ · apply (mem_sieves_iff_hasEffectiveEpiFamily (Sieve.functorPushforward _ S)).mpr refine ⟨α, inferInstance, fun i => F.obj (Y i), fun i => F.map (π i), ⟨?_, fun a => Sieve.image_mem_functorPushforward F S (H₂ a)⟩⟩ exact F.map_finite_effectiveEpiFamily _ _ · obtain ⟨α, _, Y, π, ⟨H₁, H₂⟩⟩ := (mem_sieves_iff_hasEffectiveEpiFamily _).mp hS refine ⟨α, inferInstance, ?_⟩ let Z : α → C := fun a ↦ (Functor.EffectivelyEnough.presentation (F := F) (Y a)).some.p let g₀ : (a : α) → F.obj (Z a) ⟶ Y a := fun a ↦ F.effectiveEpiOver (Y a) have : EffectiveEpiFamily _ (fun a ↦ g₀ a ≫ π a) := inferInstance refine ⟨Z , fun a ↦ F.preimage (g₀ a ≫ π a), ?_, fun a ↦ (?_ : S.arrows (F.preimage _))⟩ · refine F.finite_effectiveEpiFamily_of_map _ _ ?_ simpa using this · obtain ⟨W, g₁, g₂, h₁, h₂⟩ := H₂ a rw [h₂] convert S.downward_closed h₁ (F.preimage (g₀ a ≫ g₂)) exact F.map_injective (by simp) lemma eq_induced : haveI := F.reflects_precoherent coherentTopology C = F.inducedTopology (coherentTopology _) := by ext X S have := F.reflects_precoherent rw [← exists_effectiveEpiFamily_iff_mem_induced F X] rw [← coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily S] instance : haveI := F.reflects_precoherent; F.IsDenseSubsite (coherentTopology C) (coherentTopology D) where functorPushforward_mem_iff := by simp_rw [eq_induced F]; rfl lemma coverPreserving : haveI := F.reflects_precoherent CoverPreserving (coherentTopology _) (coherentTopology _) F := IsDenseSubsite.coverPreserving _ _ _ section SheafEquiv variable {C : Type u₁} {D : Type u₂} [Category.{v₁} C] [Category.{v₂} D] (F : C ⥤ D) [F.PreservesFiniteEffectiveEpiFamilies] [F.ReflectsFiniteEffectiveEpiFamilies] [F.Full] [F.Faithful] [Precoherent D] [F.EffectivelyEnough] /-- The equivalence from coherent sheaves on `C` to coherent sheaves on `D`, given a fully faithful functor `F : C ⥤ D` to a precoherent category, which preserves and reflects effective epimorphic families, and satisfies `F.EffectivelyEnough`. -/ noncomputable def equivalence (A : Type u₃) [Category.{v₃} A] [∀ X, HasLimitsOfShape (StructuredArrow X F.op) A] : haveI := F.reflects_precoherent Sheaf (coherentTopology C) A ≌ Sheaf (coherentTopology D) A := Functor.IsDenseSubsite.sheafEquiv F _ _ _ end SheafEquiv section RegularExtensive variable {C : Type u₁} {D : Type u₂} [Category.{v₁} C] [Category.{v₂} D] (F : C ⥤ D) [F.PreservesEffectiveEpis] [F.ReflectsEffectiveEpis] [F.Full] [F.Faithful] [FinitaryExtensive D] [Preregular D] [FinitaryPreExtensive C] [PreservesFiniteCoproducts F] [F.EffectivelyEnough] /-- The equivalence from coherent sheaves on `C` to coherent sheaves on `D`, given a fully faithful functor `F : C ⥤ D` to an extensive preregular category, which preserves and reflects effective epimorphisms and satisfies `F.EffectivelyEnough`. -/ noncomputable def equivalence' (A : Type u₃) [Category.{v₃} A] [∀ X, HasLimitsOfShape (StructuredArrow X F.op) A] : haveI := F.reflects_precoherent Sheaf (coherentTopology C) A ≌ Sheaf (coherentTopology D) A := Functor.IsDenseSubsite.sheafEquiv F _ _ _ end RegularExtensive end coherentTopology namespace regularTopology variable [F.PreservesEffectiveEpis] [F.ReflectsEffectiveEpis] [F.Full] [F.Faithful] [F.EffectivelyEnough] [Preregular D] instance : F.IsCoverDense (regularTopology _) := by refine F.isCoverDense_of_generate_singleton_functor_π_mem _ fun B ↦ ⟨_, F.effectiveEpiOver B, ?_⟩ apply Coverage.Saturate.of refine ⟨F.effectiveEpiOverObj B, F.effectiveEpiOver B, ?_, inferInstance⟩ funext; ext -- Do we want `Presieve.ext`? refine ⟨fun ⟨⟩ ↦ ⟨()⟩, ?_⟩ rintro ⟨⟩ simp theorem exists_effectiveEpi_iff_mem_induced (X : C) (S : Sieve X) : (∃ (Y : C) (π : Y ⟶ X), EffectiveEpi π ∧ S.arrows π) ↔ (S ∈ F.inducedTopology (regularTopology _) X) := by refine ⟨fun ⟨Y, π, ⟨H₁, H₂⟩⟩ ↦ ?_, fun hS ↦ ?_⟩ · apply (mem_sieves_iff_hasEffectiveEpi (Sieve.functorPushforward _ S)).mpr refine ⟨F.obj Y, F.map π, ⟨?_, Sieve.image_mem_functorPushforward F S H₂⟩⟩ exact F.map_effectiveEpi _ · obtain ⟨Y, π, ⟨H₁, H₂⟩⟩ := (mem_sieves_iff_hasEffectiveEpi _).mp hS let g₀ := F.effectiveEpiOver Y refine ⟨_, F.preimage (g₀ ≫ π), ?_, (?_ : S.arrows (F.preimage _))⟩ · refine F.effectiveEpi_of_map _ ?_ simp only [map_preimage] infer_instance · obtain ⟨W, g₁, g₂, h₁, h₂⟩ := H₂ rw [h₂] convert S.downward_closed h₁ (F.preimage (g₀ ≫ g₂)) exact F.map_injective (by simp) lemma eq_induced : haveI := F.reflects_preregular regularTopology C = F.inducedTopology (regularTopology _) := by ext X S have := F.reflects_preregular rw [← exists_effectiveEpi_iff_mem_induced F X] rw [← mem_sieves_iff_hasEffectiveEpi S] instance : haveI := F.reflects_preregular; F.IsDenseSubsite (regularTopology C) (regularTopology D) where functorPushforward_mem_iff := by simp_rw [eq_induced F]; rfl lemma coverPreserving : haveI := F.reflects_preregular CoverPreserving (regularTopology _) (regularTopology _) F := IsDenseSubsite.coverPreserving _ _ _ section SheafEquiv variable {C : Type u₁} {D : Type u₂} [Category.{v₁} C] [Category.{v₂} D] (F : C ⥤ D) [F.PreservesEffectiveEpis] [F.ReflectsEffectiveEpis] [F.Full] [F.Faithful] [Preregular D] [F.EffectivelyEnough] /-- The equivalence from regular sheaves on `C` to regular sheaves on `D`, given a fully faithful functor `F : C ⥤ D` to a preregular category, which preserves and reflects effective epimorphisms and satisfies `F.EffectivelyEnough`. -/ noncomputable def equivalence (A : Type u₃) [Category.{v₃} A] [∀ X, HasLimitsOfShape (StructuredArrow X F.op) A] : haveI := F.reflects_preregular Sheaf (regularTopology C) A ≌ Sheaf (regularTopology D) A := Functor.IsDenseSubsite.sheafEquiv F _ _ _ end SheafEquiv end regularTopology namespace Presheaf variable {A : Type u₃} [Category.{v₃} A] (F : Cᵒᵖ ⥤ A) theorem isSheaf_coherent_iff_regular_and_extensive [Preregular C] [FinitaryPreExtensive C] : IsSheaf (coherentTopology C) F ↔ IsSheaf (extensiveTopology C) F ∧ IsSheaf (regularTopology C) F := by rw [← extensive_regular_generate_coherent] exact isSheaf_sup (extensiveCoverage C) (regularCoverage C) F theorem isSheaf_iff_preservesFiniteProducts_and_equalizerCondition [Preregular C] [FinitaryExtensive C] [h : ∀ {Y X : C} (f : Y ⟶ X) [EffectiveEpi f], HasPullback f f] : IsSheaf (coherentTopology C) F ↔ Nonempty (PreservesFiniteProducts F) ∧ EqualizerCondition F := by rw [isSheaf_coherent_iff_regular_and_extensive] exact and_congr (isSheaf_iff_preservesFiniteProducts _) (@equalizerCondition_iff_isSheaf _ _ _ _ F _ h).symm noncomputable instance [Preregular C] [FinitaryExtensive C] (F : Sheaf (coherentTopology C) A) : PreservesFiniteProducts F.val := ((Presheaf.isSheaf_iff_preservesFiniteProducts F.val).1 ((Presheaf.isSheaf_coherent_iff_regular_and_extensive F.val).mp F.cond).1).some theorem isSheaf_iff_preservesFiniteProducts_of_projective [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] : IsSheaf (coherentTopology C) F ↔ Nonempty (PreservesFiniteProducts F) := by rw [isSheaf_coherent_iff_regular_and_extensive, and_iff_left (isSheaf_of_projective F), isSheaf_iff_preservesFiniteProducts] theorem isSheaf_iff_extensiveSheaf_of_projective [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] : IsSheaf (coherentTopology C) F ↔ IsSheaf (extensiveTopology C) F := by rw [isSheaf_iff_preservesFiniteProducts_of_projective, isSheaf_iff_preservesFiniteProducts] /-- The categories of coherent sheaves and extensive sheaves on `C` are equivalent if `C` is preregular, finitary extensive, and every object is projective. -/ @[simps] def coherentExtensiveEquivalence [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] : Sheaf (coherentTopology C) A ≌ Sheaf (extensiveTopology C) A where functor := { obj := fun F ↦ ⟨F.val, (isSheaf_iff_extensiveSheaf_of_projective F.val).mp F.cond⟩ map := fun f ↦ ⟨f.val⟩ } inverse := { obj := fun F ↦ ⟨F.val, (isSheaf_iff_extensiveSheaf_of_projective F.val).mpr F.cond⟩ map := fun f ↦ ⟨f.val⟩ } unitIso := Iso.refl _ counitIso := Iso.refl _ variable {B : Type u₄} [Category.{v₄} B] variable (s : A ⥤ B) lemma isSheaf_coherent_of_hasPullbacks_comp [Preregular C] [FinitaryExtensive C] [h : ∀ {Y X : C} (f : Y ⟶ X) [EffectiveEpi f], HasPullback f f] [PreservesFiniteLimits s] (hF : IsSheaf (coherentTopology C) F) : IsSheaf (coherentTopology C) (F ⋙ s) := by rw [isSheaf_iff_preservesFiniteProducts_and_equalizerCondition (h := h)] at hF ⊢ have := hF.1.some refine ⟨⟨inferInstance⟩, fun _ _ π _ c hc ↦ ⟨?_⟩⟩ exact isLimitForkMapOfIsLimit s _ (hF.2 π c hc).some lemma isSheaf_coherent_of_hasPullbacks_of_comp [Preregular C] [FinitaryExtensive C] [h : ∀ {Y X : C} (f : Y ⟶ X) [EffectiveEpi f], HasPullback f f] [ReflectsFiniteLimits s] (hF : IsSheaf (coherentTopology C) (F ⋙ s)) : IsSheaf (coherentTopology C) F := by rw [isSheaf_iff_preservesFiniteProducts_and_equalizerCondition (h := h)] at hF ⊢ refine ⟨⟨⟨fun J _ ↦ ⟨fun {K} ↦ ⟨fun {c} hc ↦ ?_⟩⟩⟩⟩, fun _ _ π _ c hc ↦ ⟨?_⟩⟩ · exact isLimitOfReflects s ((hF.1.some.1 J).1.1 hc) · exact isLimitOfIsLimitForkMap s _ (hF.2 π c hc).some lemma isSheaf_coherent_of_projective_comp [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] [PreservesFiniteProducts s] (hF : IsSheaf (coherentTopology C) F) : IsSheaf (coherentTopology C) (F ⋙ s) := by rw [isSheaf_iff_preservesFiniteProducts_of_projective] at hF ⊢ have := hF.some exact ⟨inferInstance⟩ lemma isSheaf_coherent_of_projective_of_comp [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] [ReflectsFiniteProducts s] (hF : IsSheaf (coherentTopology C) (F ⋙ s)) : IsSheaf (coherentTopology C) F := by rw [isSheaf_iff_preservesFiniteProducts_of_projective] at hF ⊢ refine ⟨⟨fun J _ ↦ ⟨fun {K} ↦ ⟨fun {c} hc ↦ ?_⟩⟩⟩⟩ exact isLimitOfReflects s ((hF.some.1 J).1.1 hc) instance [Preregular C] [FinitaryExtensive C] [h : ∀ {Y X : C} (f : Y ⟶ X) [EffectiveEpi f], HasPullback f f] [PreservesFiniteLimits s] : (coherentTopology C).HasSheafCompose s where isSheaf F hF := isSheaf_coherent_of_hasPullbacks_comp (h := h) F s hF instance [Preregular C] [FinitaryExtensive C] [∀ (X : C), Projective X] [PreservesFiniteProducts s] : (coherentTopology C).HasSheafCompose s where isSheaf F hF := isSheaf_coherent_of_projective_comp F s hF end CategoryTheory.Presheaf
CategoryTheory\Sites\NonabelianCohomology\H1.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.Algebra.Category.Grp.Basic /-! The cohomology of a sheaf of groups in degree 1 In this file, we shall define the cohomology in degree 1 of a sheaf of groups (TODO). Currently, given a presheaf of groups `G : Cᵒᵖ ⥤ Grp` and a family of objects `U : I → C`, we define 1-cochains/1-cocycles/H^1 with values in `G` over `U`. (This definition neither requires the assumption that `G` is a sheaf, nor that `U` covers the terminal object.) As we do not assume that `G` is a presheaf of abelian groups, this cohomology theory is only defined in low degrees; in the abelian case, it would be a particular case of Čech cohomology (TODO). ## TODO * show that if `1 ⟶ G₁ ⟶ G₂ ⟶ G₃ ⟶ 1` is a short exact sequence of sheaves of groups, and `x₃` is a global section of `G₃` which can be locally lifted to a section of `G₂`, there is an associated canonical cohomology class of `G₁` which is trivial iff `x₃` can be lifted to a global section of `G₂`. (This should hold more generally if `G₂` is a sheaf of sets on which `G₁` acts freely, and `G₃` is the quotient sheaf.) * deduce a similar result for abelian sheaves * when the notion of quasi-coherent sheaves on schemes is defined, show that if `0 ⟶ Q ⟶ M ⟶ N ⟶ 0` is an exact sequence of abelian sheaves over a scheme `X` and `Q` is the underlying sheaf of a quasi-coherent sheaf, then `M(U) ⟶ N(U)` is surjective for any affine open `U`. * take the colimit of `OneCohomology G U` over all covering families `U` (for a Grothendieck topology) # References * [J. Frenkel, *Cohomologie non abélienne et espaces fibrés*][frenkel1957] -/ universe w' w v u namespace CategoryTheory variable {C : Type u} [Category.{v} C] namespace PresheafOfGroups variable (G : Cᵒᵖ ⥤ Grp.{w}) {X : C} {I : Type w'} (U : I → C) /-- A zero cochain consists of a family of sections. -/ def ZeroCochain := ∀ (i : I), G.obj (Opposite.op (U i)) instance : Group (ZeroCochain G U) := Pi.group namespace Cochain₀ #adaptation_note /-- After https://github.com/leanprover/lean4/pull/4481 the `simpNF` linter incorrectly claims this lemma can't be applied by `simp`. -/ @[simp, nolint simpNF] lemma one_apply (i : I) : (1 : ZeroCochain G U) i = 1 := rfl @[simp] lemma inv_apply (γ : ZeroCochain G U) (i : I) : γ⁻¹ i = (γ i)⁻¹ := rfl @[simp] lemma mul_apply (γ₁ γ₂ : ZeroCochain G U) (i : I) : (γ₁ * γ₂) i = γ₁ i * γ₂ i := rfl end Cochain₀ /-- A 1-cochain of a presheaf of groups `G : Cᵒᵖ ⥤ Grp` on a family `U : I → C` of objects consists of the data of an element in `G.obj (Opposite.op T)` whenever we have elements `i` and `j` in `I` and maps `a : T ⟶ U i` and `b : T ⟶ U j`, and it must satisfy a compatibility with respect to precomposition. (When the binary product of `U i` and `U j` exists, this data for all `T`, `a` and `b` corresponds to the data of a section of `G` on this product.) -/ @[ext] structure OneCochain where /-- the data involved in a 1-cochain -/ ev (i j : I) ⦃T : C⦄ (a : T ⟶ U i) (b : T ⟶ U j) : G.obj (Opposite.op T) ev_precomp (i j : I) ⦃T T' : C⦄ (φ : T ⟶ T') (a : T' ⟶ U i) (b : T' ⟶ U j) : G.map φ.op (ev i j a b) = ev i j (φ ≫ a) (φ ≫ b) := by aesop namespace OneCochain attribute [simp] OneCochain.ev_precomp instance : One (OneCochain G U) where one := { ev := fun _ _ _ _ _ ↦ 1 } @[simp] lemma one_ev (i j : I) {T : C} (a : T ⟶ U i) (b : T ⟶ U j) : (1 : OneCochain G U).ev i j a b = 1 := rfl variable {G U} instance : Mul (OneCochain G U) where mul γ₁ γ₂ := { ev := fun i j T a b ↦ γ₁.ev i j a b * γ₂.ev i j a b } @[simp] lemma mul_ev (γ₁ γ₂ : OneCochain G U) (i j : I) {T : C} (a : T ⟶ U i) (b : T ⟶ U j) : (γ₁ * γ₂).ev i j a b = γ₁.ev i j a b * γ₂.ev i j a b := rfl instance : Inv (OneCochain G U) where inv γ := { ev := fun i j T a b ↦ (γ.ev i j a b) ⁻¹} @[simp] lemma inv_ev (γ : OneCochain G U) (i j : I) {T : C} (a : T ⟶ U i) (b : T ⟶ U j) : (γ⁻¹).ev i j a b = (γ.ev i j a b)⁻¹ := rfl instance : Group (OneCochain G U) where mul_assoc _ _ _ := by ext; apply mul_assoc one_mul _ := by ext; apply one_mul mul_one _ := by ext; apply mul_one mul_left_inv _ := by ext; apply mul_left_inv end OneCochain /-- A 1-cocycle is a 1-cochain which satisfies the cocycle condition. -/ structure OneCocycle extends OneCochain G U where ev_trans (i j k : I) ⦃T : C⦄ (a : T ⟶ U i) (b : T ⟶ U j) (c : T ⟶ U k) : ev i j a b * ev j k b c = ev i k a c := by aesop namespace OneCocycle instance : One (OneCocycle G U) where one := OneCocycle.mk 1 @[simp] lemma one_toOneCochain : (1 : OneCocycle G U).toOneCochain = 1 := rfl @[simp] lemma ev_refl (γ : OneCocycle G U) (i : I) ⦃T : C⦄ (a : T ⟶ U i) : γ.ev i i a a = 1 := by simpa using γ.ev_trans i i i a a a lemma ev_symm (γ : OneCocycle G U) (i j : I) ⦃T : C⦄ (a : T ⟶ U i) (b : T ⟶ U j) : γ.ev i j a b = (γ.ev j i b a)⁻¹ := by rw [← mul_left_inj (γ.ev j i b a), γ.ev_trans i j i a b a, ev_refl, mul_left_inv] end OneCocycle variable {G U} /-- The assertion that two cochains in `OneCochain G U` are cohomologous via an explicit zero-cochain. -/ def OneCohomologyRelation (γ₁ γ₂ : OneCochain G U) (α : ZeroCochain G U) : Prop := ∀ (i j : I) ⦃T : C⦄ (a : T ⟶ U i) (b : T ⟶ U j), G.map a.op (α i) * γ₁.ev i j a b = γ₂.ev i j a b * G.map b.op (α j) namespace OneCohomologyRelation lemma refl (γ : OneCochain G U) : OneCohomologyRelation γ γ 1 := fun _ _ _ _ _ ↦ by simp lemma symm {γ₁ γ₂ : OneCochain G U} {α : ZeroCochain G U} (h : OneCohomologyRelation γ₁ γ₂ α) : OneCohomologyRelation γ₂ γ₁ α⁻¹ := fun i j T a b ↦ by rw [← mul_left_inj (G.map b.op (α j)), mul_assoc, ← h i j a b, mul_assoc, Cochain₀.inv_apply, map_inv, inv_mul_cancel_left, Cochain₀.inv_apply, map_inv, mul_left_inv, mul_one] lemma trans {γ₁ γ₂ γ₃ : OneCochain G U} {α β : ZeroCochain G U} (h₁₂ : OneCohomologyRelation γ₁ γ₂ α) (h₂₃ : OneCohomologyRelation γ₂ γ₃ β) : OneCohomologyRelation γ₁ γ₃ (β * α) := fun i j T a b ↦ by dsimp rw [map_mul, map_mul, mul_assoc, h₁₂ i j a b, ← mul_assoc, h₂₃ i j a b, mul_assoc] end OneCohomologyRelation namespace OneCocycle /-- The cohomology (equivalence) relation on 1-cocycles. -/ def IsCohomologous (γ₁ γ₂ : OneCocycle G U) : Prop := ∃ (α : ZeroCochain G U), OneCohomologyRelation γ₁.toOneCochain γ₂.toOneCochain α variable (G U) lemma equivalence_isCohomologous : _root_.Equivalence (IsCohomologous (G := G) (U := U)) where refl γ := ⟨_, OneCohomologyRelation.refl γ.toOneCochain⟩ symm := by rintro γ₁ γ₂ ⟨α, h⟩ exact ⟨_, h.symm⟩ trans := by rintro γ₁ γ₂ γ₂ ⟨α, h⟩ ⟨β, h'⟩ exact ⟨_, h.trans h'⟩ end OneCocycle variable (G U) in /-- The cohomology in degree 1 of a presheaf of groups `G : Cᵒᵖ ⥤ Grp` on a family of objects `U : I → C`. -/ def H1 := Quot (OneCocycle.IsCohomologous (G := G) (U := U)) /-- The cohomology class of a 1-cocycle. -/ def OneCocycle.class (γ : OneCocycle G U) : H1 G U := Quot.mk _ γ instance : One (H1 G U) where one := OneCocycle.class 1 lemma OneCocycle.class_eq_iff (γ₁ γ₂ : OneCocycle G U) : γ₁.class = γ₂.class ↔ γ₁.IsCohomologous γ₂ := (equivalence_isCohomologous _ _ ).quot_mk_eq_iff _ _ lemma OneCocycle.IsCohomologous.class_eq {γ₁ γ₂ : OneCocycle G U} (h : γ₁.IsCohomologous γ₂) : γ₁.class = γ₂.class := Quot.sound h end PresheafOfGroups end CategoryTheory
CategoryTheory\Sites\SheafCohomology\Basic.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.Algebra.Category.Grp.Abelian import Mathlib.Algebra.Category.Grp.Adjunctions import Mathlib.Algebra.Homology.DerivedCategory.Ext import Mathlib.CategoryTheory.Sites.Abelian import Mathlib.CategoryTheory.Sites.ConstantSheaf /-! # Sheaf cohomology Let `C` be a category equipped with a Grothendieck topology `J`. We define the cohomology types `Sheaf.H F n` of an abelian sheaf `F` on the site `(C, J)` for all `n : ℕ`. These abelian groups are defined as the `Ext`-groups from the constant abelian sheaf with values `ℤ` (actually `ULift ℤ`) to `F`. We also define `Sheaf.cohomologyPresheaf F n : Cᵒᵖ ⥤ AddCommGrp` which is the presheaf which sends `U` to the `n`th `Ext`-group from the free abelian sheaf generated by the presheaf of sets `yoneda.obj U` to `F`. ## TODO * if `U` is a terminal object of `C`, define an isomorphism `(F.cohomologyPresheaf n).obj (Opposite.op U) ≃+ Sheaf.H F n`. * if `U : C`, define an isomorphism `(F.cohomologyPresheaf n).obj (Opposite.op U) ≃+ Sheaf.H (F.over U) n`. -/ universe w' w v u namespace CategoryTheory open Abelian variable {C : Type u} [Category.{v} C] {J : GrothendieckTopology C} namespace Sheaf section variable (F : Sheaf J AddCommGrp.{w}) [HasSheafify J AddCommGrp.{w}] [HasExt.{w'} (Sheaf J AddCommGrp.{w})] /-- The cohomology of an abelian sheaf in degree `n`. -/ def H (n : ℕ) : Type w' := Ext ((constantSheaf J AddCommGrp.{w}).obj (AddCommGrp.of (ULift ℤ))) F n noncomputable instance (n : ℕ) : AddCommGroup (F.H n) := by dsimp only [H] infer_instance end section variable [HasSheafify J AddCommGrp.{v}] [HasExt.{w'} (Sheaf J AddCommGrp.{v})] variable (J) in /-- The bifunctor which sends an abelian sheaf `F` and an object `U` to the `n`th Ext-group from the free abelian sheaf generated by the presheaf of sets `yoneda.obj U` to `F`. -/ noncomputable def cohomologyPresheafFunctor (n : ℕ) : Sheaf J AddCommGrp.{v} ⥤ Cᵒᵖ ⥤ AddCommGrp.{w'} := Functor.flip (Functor.op (yoneda ⋙ (whiskeringRight _ _ _).obj AddCommGrp.free ⋙ presheafToSheaf _ _) ⋙ extFunctor n) /-- Given an abelian sheaf `F`, this is the presheaf which sends `U` to the `n`th Ext-group from the free abelian sheaf generated by the presheaf of sets `yoneda.obj U` to `F`. -/ noncomputable abbrev cohomologyPresheaf (F : Sheaf J AddCommGrp.{v}) (n : ℕ) : Cᵒᵖ ⥤ AddCommGrp.{w'} := (cohomologyPresheafFunctor J n).obj F end end Sheaf end CategoryTheory
CategoryTheory\SmallObject\Construction.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Limits.Shapes.Products import Mathlib.CategoryTheory.Limits.Shapes.Pullback.HasPullback /-! # Construction for the small object argument Given a family of morphisms `f i : A i ⟶ B i` in a category `C` and an object `S : C`, we define a functor `SmallObject.functor f S : Over S ⥤ Over S` which sends an object given by `πX : X ⟶ S` to the pushout `functorObj f πX`: ``` ∐ functorObjSrcFamily f πX ⟶ X | | | | v v ∐ functorObjTgtFamily f πX ⟶ functorObj f S πX ``` where the morphism on the left is a coproduct (of copies of maps `f i`) indexed by a type `FunctorObjIndex f πX` which parametrizes the diagrams of the form ``` A i ⟶ X | | | | v v B i ⟶ S ``` The morphism `ιFunctorObj f S πX : X ⟶ functorObj f πX` is part of a natural transformation `SmallObject.ε f S : 𝟭 (Over S) ⟶ functor f S`. The main idea in this construction is that for any commutative square as above, there may not exist a lifting `B i ⟶ X`, but the construction provides a tautological morphism `B i ⟶ functorObj f πX` (see `SmallObject.ιFunctorObj_extension`). ## TODO * Show that `ιFunctorObj f πX : X ⟶ functorObj f πX` has the left lifting property with respect to the class of morphisms that have the right lifting property with respect to the morphisms `f i`. ## References - https://ncatlab.org/nlab/show/small+object+argument -/ universe w v u namespace CategoryTheory open Category Limits namespace SmallObject variable {C : Type u} [Category.{v} C] {I : Type w} {A B : I → C} (f : ∀ i, A i ⟶ B i) section variable {S : C} {X Y Z : C} (πX : X ⟶ S) (πY : Y ⟶ S) (φ : X ⟶ Y) /-- Given a family of morphisms `f i : A i ⟶ B i` and a morphism `πX : X ⟶ S`, this type parametrizes the commutative squares with a morphism `f i` on the left and `πX` in the right. -/ structure FunctorObjIndex where /-- an element in the index type -/ i : I /-- the top morphism in the square -/ t : A i ⟶ X /-- the bottom morphism in the square -/ b : B i ⟶ S w : t ≫ πX = f i ≫ b attribute [reassoc (attr := simp)] FunctorObjIndex.w variable [HasColimitsOfShape (Discrete (FunctorObjIndex f πX)) C] [HasColimitsOfShape (Discrete (FunctorObjIndex f πY)) C] /-- The family of objects `A x.i` parametrized by `x : FunctorObjIndex f πX`. -/ abbrev functorObjSrcFamily (x : FunctorObjIndex f πX) : C := A x.i /-- The family of objects `B x.i` parametrized by `x : FunctorObjIndex f πX`. -/ abbrev functorObjTgtFamily (x : FunctorObjIndex f πX) : C := B x.i /-- The family of the morphisms `f x.i : A x.i ⟶ B x.i` parametrized by `x : FunctorObjIndex f πX`. -/ abbrev functorObjLeftFamily (x : FunctorObjIndex f πX) : functorObjSrcFamily f πX x ⟶ functorObjTgtFamily f πX x := f x.i /-- The top morphism in the pushout square in the definition of `pushoutObj f πX`. -/ noncomputable abbrev functorObjTop : ∐ functorObjSrcFamily f πX ⟶ X := Limits.Sigma.desc (fun x => x.t) /-- The left morphism in the pushout square in the definition of `pushoutObj f πX`. -/ noncomputable abbrev functorObjLeft : ∐ functorObjSrcFamily f πX ⟶ ∐ functorObjTgtFamily f πX := Limits.Sigma.map (functorObjLeftFamily f πX) section variable [HasPushout (functorObjTop f πX) (functorObjLeft f πX)] /-- The functor `SmallObject.functor f S : Over S ⥤ Over S` that is part of the small object argument for a family of morphisms `f`, on an object given as a morphism `πX : X ⟶ S`. -/ noncomputable abbrev functorObj : C := pushout (functorObjTop f πX) (functorObjLeft f πX) /-- The canonical morphism `X ⟶ functorObj f πX`. -/ noncomputable abbrev ιFunctorObj : X ⟶ functorObj f πX := pushout.inl _ _ /-- The canonical morphism `∐ (functorObjTgtFamily f πX) ⟶ functorObj f πX`. -/ noncomputable abbrev ρFunctorObj : ∐ functorObjTgtFamily f πX ⟶ functorObj f πX := pushout.inr _ _ @[reassoc] lemma functorObj_comm : functorObjTop f πX ≫ ιFunctorObj f πX = functorObjLeft f πX ≫ ρFunctorObj f πX := pushout.condition @[reassoc] lemma FunctorObjIndex.comm (x : FunctorObjIndex f πX) : f x.i ≫ Sigma.ι (functorObjTgtFamily f πX) x ≫ ρFunctorObj f πX = x.t ≫ ιFunctorObj f πX := by simpa using (Sigma.ι (functorObjSrcFamily f πX) x ≫= functorObj_comm f πX).symm /-- The canonical projection on the base object. -/ noncomputable abbrev π'FunctorObj : ∐ functorObjTgtFamily f πX ⟶ S := Sigma.desc (fun x => x.b) /-- The canonical projection on the base object. -/ noncomputable def πFunctorObj : functorObj f πX ⟶ S := pushout.desc πX (π'FunctorObj f πX) (by ext; simp [π'FunctorObj]) @[reassoc (attr := simp)] lemma ρFunctorObj_π : ρFunctorObj f πX ≫ πFunctorObj f πX = π'FunctorObj f πX := by simp [πFunctorObj] @[reassoc (attr := simp)] lemma ιFunctorObj_πFunctorObj : ιFunctorObj f πX ≫ πFunctorObj f πX = πX := by simp [ιFunctorObj, πFunctorObj] /-- The canonical morphism `∐ (functorObjSrcFamily f πX) ⟶ ∐ (functorObjSrcFamily f πY)` induced by a morphism in `φ : X ⟶ Y` such that `φ ≫ πX = πY`. -/ noncomputable def functorMapSrc (hφ : φ ≫ πY = πX) : ∐ (functorObjSrcFamily f πX) ⟶ ∐ functorObjSrcFamily f πY := Sigma.map' (fun x => FunctorObjIndex.mk x.i (x.t ≫ φ) x.b (by simp [hφ])) (fun _ => 𝟙 _) end variable (hφ : φ ≫ πY = πX) @[reassoc] lemma ι_functorMapSrc (i : I) (t : A i ⟶ X) (b : B i ⟶ S) (w : t ≫ πX = f i ≫ b) (t' : A i ⟶ Y) (fac : t ≫ φ = t') : Sigma.ι _ (FunctorObjIndex.mk i t b w) ≫ functorMapSrc f πX πY φ hφ = Sigma.ι (functorObjSrcFamily f πY) (FunctorObjIndex.mk i t' b (by rw [← w, ← fac, assoc, hφ])) := by subst fac simp [functorMapSrc] @[reassoc (attr := simp)] lemma functorMapSrc_functorObjTop : functorMapSrc f πX πY φ hφ ≫ functorObjTop f πY = functorObjTop f πX ≫ φ := by ext ⟨i, t, b, w⟩ simp [ι_functorMapSrc_assoc f πX πY φ hφ i t b w _ rfl] /-- The canonical morphism `∐ functorObjTgtFamily f πX ⟶ ∐ functorObjTgtFamily f πY` induced by a morphism in `φ : X ⟶ Y` such that `φ ≫ πX = πY`. -/ noncomputable def functorMapTgt (hφ : φ ≫ πY = πX) : ∐ functorObjTgtFamily f πX ⟶ ∐ functorObjTgtFamily f πY := Sigma.map' (fun x => FunctorObjIndex.mk x.i (x.t ≫ φ) x.b (by simp [hφ])) (fun _ => 𝟙 _) @[reassoc] lemma ι_functorMapTgt (i : I) (t : A i ⟶ X) (b : B i ⟶ S) (w : t ≫ πX = f i ≫ b) (t' : A i ⟶ Y) (fac : t ≫ φ = t') : Sigma.ι _ (FunctorObjIndex.mk i t b w) ≫ functorMapTgt f πX πY φ hφ = Sigma.ι (functorObjTgtFamily f πY) (FunctorObjIndex.mk i t' b (by rw [← w, ← fac, assoc, hφ])) := by subst fac simp [functorMapTgt] lemma functorMap_comm : functorObjLeft f πX ≫ functorMapTgt f πX πY φ hφ = functorMapSrc f πX πY φ hφ ≫ functorObjLeft f πY := by ext ⟨i, t, b, w⟩ simp only [ι_colimMap_assoc, Discrete.natTrans_app, ι_colimMap, ι_functorMapTgt f πX πY φ hφ i t b w _ rfl, ι_functorMapSrc_assoc f πX πY φ hφ i t b w _ rfl] variable [HasPushout (functorObjTop f πX) (functorObjLeft f πX)] [HasPushout (functorObjTop f πY) (functorObjLeft f πY)] /-- The functor `SmallObject.functor f S : Over S ⥤ Over S` that is part of the small object argument for a family of morphisms `f`, on morphisms. -/ noncomputable def functorMap : functorObj f πX ⟶ functorObj f πY := pushout.map _ _ _ _ φ (functorMapTgt f πX πY φ hφ) (functorMapSrc f πX πY φ hφ) (by simp) (functorMap_comm f πX πY φ hφ) @[reassoc (attr := simp)] lemma functorMap_π : functorMap f πX πY φ hφ ≫ πFunctorObj f πY = πFunctorObj f πX := by ext ⟨i, t, b, w⟩ · simp [functorMap, hφ] · simp [functorMap, ι_functorMapTgt_assoc f πX πY φ hφ i t b w _ rfl] variable (X) in @[simp] lemma functorMap_id : functorMap f πX πX (𝟙 X) (by simp) = 𝟙 _ := by ext ⟨i, t, b, w⟩ · simp [functorMap] · simp [functorMap, ι_functorMapTgt_assoc f πX πX (𝟙 X) (by simp) i t b w t (by simp)] @[reassoc (attr := simp)] lemma ιFunctorObj_naturality : ιFunctorObj f πX ≫ functorMap f πX πY φ hφ = φ ≫ ιFunctorObj f πY := by simp [ιFunctorObj, functorMap] lemma ιFunctorObj_extension {i : I} (t : A i ⟶ X) (b : B i ⟶ S) (sq : CommSq t (f i) πX b) : ∃ (l : B i ⟶ functorObj f πX), f i ≫ l = t ≫ ιFunctorObj f πX ∧ l ≫ πFunctorObj f πX = b := ⟨Sigma.ι (functorObjTgtFamily f πX) (FunctorObjIndex.mk i t b sq.w) ≫ ρFunctorObj f πX, (FunctorObjIndex.mk i t b _).comm, by simp⟩ end variable (S : C) [HasPushouts C] [∀ {X : C} (πX : X ⟶ S), HasColimitsOfShape (Discrete (FunctorObjIndex f πX)) C] /-- The functor `Over S ⥤ Over S` that is constructed in order to apply the small object argument to a family of morphisms `f i : A i ⟶ B i`, see the introduction of the file `Mathlib.CategoryTheory.SmallObject.Construction` -/ @[simps! obj map] noncomputable def functor : Over S ⥤ Over S where obj π := Over.mk (πFunctorObj f π.hom) map {π₁ π₂} φ := Over.homMk (functorMap f π₁.hom π₂.hom φ.left (Over.w φ)) map_id _ := by ext; dsimp; simp map_comp {π₁ π₂ π₃} φ φ' := by ext1 dsimp ext ⟨i, t, b, w⟩ · simp · simp [functorMap, ι_functorMapTgt_assoc f π₁.hom π₂.hom φ.left (Over.w φ) i t b w _ rfl, ι_functorMapTgt_assoc f π₁.hom π₃.hom (φ.left ≫ φ'.left) (Over.w (φ ≫ φ')) i t b w _ rfl, ι_functorMapTgt_assoc f π₂.hom π₃.hom (φ'.left) (Over.w φ') i (t ≫ φ.left) b (by simp [w]) (t ≫ φ.left ≫ φ'.left) (by simp)] /-- The canonical natural transformation `𝟭 (Over S) ⟶ functor f S`. -/ @[simps! app] noncomputable def ε : 𝟭 (Over S) ⟶ functor f S where app w := Over.homMk (ιFunctorObj f w.hom) end SmallObject end CategoryTheory
CategoryTheory\SmallObject\Iteration.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Category.Preorder import Mathlib.CategoryTheory.Limits.IsLimit import Mathlib.Order.IsWellOrderLimitElement /-! # Transfinite iterations of a functor In this file, given a functor `Φ : C ⥤ C` and a natural transformation `ε : 𝟭 C ⟶ Φ`, we shall define the transfinite iterations of `Φ` (TODO). Given `j : J` where `J` is a well ordered set, we first introduce a category `Iteration ε j`. An object in this category consists of a functor `F : { i // i ≤ j } ⥤ C ⥤ C` equipped with the data which makes it the `i`th-iteration of `Φ` for all `i` such that `i ≤ j`. Under suitable assumptions on `C`, we shall show that this category `Iteration ε j` is equivalent to the punctual category (TODO). We shall study morphisms in this category, showing first that there is at most one morphism between two morphisms (done), and secondly, that there does always exist a unique morphism between two objects (TODO). Then, we shall show the existence of an object (TODO). In these proofs, which are all done using transfinite induction, we have to treat three cases separately: * the case `j = ⊥`; * the case `j` is a successor; * the case `j` is a limit element. -/ universe u namespace CategoryTheory open Category Limits variable {C : Type*} [Category C] {Φ : C ⥤ C} (ε : 𝟭 C ⟶ Φ) {J : Type u} [LinearOrder J] namespace Functor namespace Iteration variable {j : J} (F : { i // i ≤ j } ⥤ C) /-- The map `F.obj ⟨i, _⟩ ⟶ F.obj ⟨wellOrderSucc i, _⟩` when `F : { i // i ≤ j } ⥤ C` and `i : J` is such that `i < j`. -/ noncomputable abbrev mapSucc' [IsWellOrder J (· < ·)] (i : J) (hi : i < j) : F.obj ⟨i, hi.le⟩ ⟶ F.obj ⟨wellOrderSucc i, wellOrderSucc_le hi⟩ := F.map (homOfLE (by simpa only [Subtype.mk_le_mk] using self_le_wellOrderSucc i)) variable {i : J} (hi : i ≤ j) /-- The functor `{ k // k < i } ⥤ C` obtained by "restriction" of `F : { i // i ≤ j } ⥤ C` when `i ≤ j`. -/ def restrictionLT : { k // k < i } ⥤ C := (monotone_inclusion_lt_le_of_le hi).functor ⋙ F @[simp] lemma restrictionLT_obj (k : J) (hk : k < i) : (restrictionLT F hi).obj ⟨k, hk⟩ = F.obj ⟨k, hk.le.trans hi⟩ := rfl @[simp] lemma restrictionLT_map {k₁ k₂ : { k // k < i }} (φ : k₁ ⟶ k₂) : (restrictionLT F hi).map φ = F.map (homOfLE (by simpa using leOfHom φ)) := rfl /-- Given `F : { i // i ≤ j } ⥤ C`, `i : J` such that `hi : i ≤ j`, this is the cocone consisting of all maps `F.obj ⟨k, hk⟩ ⟶ F.obj ⟨i, hi⟩` for `k : J` such that `k < i`. -/ @[simps] def coconeOfLE : Cocone (restrictionLT F hi) where pt := F.obj ⟨i, hi⟩ ι := { app := fun ⟨k, hk⟩ => F.map (homOfLE (by simpa using hk.le)) naturality := fun ⟨k₁, hk₁⟩ ⟨k₂, hk₂⟩ _ => by simp [comp_id, ← Functor.map_comp, homOfLE_comp] } end Iteration variable [IsWellOrder J (· < ·)] [OrderBot J] /-- The category of `j`th iterations of a functor `Φ` equipped with a natural transformation `ε : 𝟭 C ⟶ Φ`. An object consists of the data of all iterations of `Φ` for `i : J` such that `i ≤ j` (this is the field `F`). Such objects are equipped with data and properties which characterizes the iterations up to a unique isomorphism for the three types of elements: `⊥`, successors, limit elements. -/ structure Iteration (j : J) where /-- The data of all `i`th iterations for `i : J` such that `i ≤ j`. -/ F : { i // i ≤ j } ⥤ C ⥤ C /-- The zeroth iteration is the identity functor. -/ isoZero : F.obj ⟨⊥, bot_le⟩ ≅ 𝟭 C /-- The iteration on a successor element is obtained by composition of the previous iteration with `Φ`. -/ isoSucc (i : J) (hi : i < j) : F.obj ⟨wellOrderSucc i, wellOrderSucc_le hi⟩ ≅ F.obj ⟨i, hi.le⟩ ⋙ Φ /-- The natural map from an iteration to its successor is induced by `ε`. -/ mapSucc'_eq (i : J) (hi : i < j) : Iteration.mapSucc' F i hi = whiskerLeft _ ε ≫ (isoSucc i hi).inv /-- If `i` is a limit element, the `i`th iteration is the colimit of `k`th iterations for `k < i`. -/ isColimit (i : J) [IsWellOrderLimitElement i] (hi : i ≤ j) : IsColimit (Iteration.coconeOfLE F hi) namespace Iteration variable {ε} variable {j : J} section variable (iter : Φ.Iteration ε j) /-- For `iter : Φ.Iteration.ε j`, this is the map `iter.F.obj ⟨i, _⟩ ⟶ iter.F.obj ⟨wellOrderSucc i, _⟩` if `i : J` is such that `i < j`. -/ noncomputable abbrev mapSucc (i : J) (hi : i < j) : iter.F.obj ⟨i, hi.le⟩ ⟶ iter.F.obj ⟨wellOrderSucc i, wellOrderSucc_le hi⟩ := mapSucc' iter.F i hi lemma mapSucc_eq (i : J) (hi : i < j) : iter.mapSucc i hi = whiskerLeft _ ε ≫ (iter.isoSucc i hi).inv := iter.mapSucc'_eq _ hi end variable (iter₁ iter₂ iter₃ : Φ.Iteration ε j) /-- A morphism between two objects `iter₁` and `iter₂` in the category `Φ.Iteration ε j` of `j`th iterations of a functor `Φ` equipped with a natural transformation `ε : 𝟭 C ⟶ Φ` consists of a natural transformation `natTrans : iter₁.F ⟶ iter₂.F` which is compatible with the isomorphisms `isoZero` and `isoSucc`. -/ structure Hom where /-- A natural transformation `iter₁.F ⟶ iter₂.F` -/ natTrans : iter₁.F ⟶ iter₂.F natTrans_app_zero : natTrans.app ⟨⊥, bot_le⟩ = iter₁.isoZero.hom ≫ iter₂.isoZero.inv := by aesop_cat natTrans_app_succ (i : J) (hi : i < j) : natTrans.app ⟨wellOrderSucc i, wellOrderSucc_le hi⟩ = (iter₁.isoSucc i hi).hom ≫ whiskerRight (natTrans.app ⟨i, hi.le⟩) _ ≫ (iter₂.isoSucc i hi).inv := by aesop_cat namespace Hom attribute [simp, reassoc] natTrans_app_zero /-- The identity morphism in the category `Φ.Iteration ε j`. -/ @[simps] def id : Hom iter₁ iter₁ where natTrans := 𝟙 _ variable {iter₁ iter₂ iter₃} -- Note: this is not made a global ext lemma because it is shown below -- that the type of morphisms is a subsingleton. lemma ext' {f g : Hom iter₁ iter₂} (h : f.natTrans = g.natTrans) : f = g := by cases f cases g subst h rfl attribute [local ext] ext' /-- The composition of morphisms in the category `Φ.Iteration ε j`. -/ @[simps] def comp {iter₃ : Iteration ε j} (f : Hom iter₁ iter₂) (g : Hom iter₂ iter₃) : Hom iter₁ iter₃ where natTrans := f.natTrans ≫ g.natTrans natTrans_app_succ i hi := by simp [natTrans_app_succ _ _ hi] instance : Category (Iteration ε j) where Hom := Hom id := id comp := comp instance : Subsingleton (iter₁ ⟶ iter₂) where allEq f g := ext' (by let P := fun (i : J) => ∀ (hi : i ≤ j), f.natTrans.app ⟨i, hi⟩ = g.natTrans.app ⟨i, hi⟩ suffices ∀ (i : J), P i by ext ⟨i, hi⟩ : 2 apply this refine fun _ => WellFoundedLT.induction _ (fun i hi hi' => ?_) obtain rfl|⟨i, rfl, hi''⟩|_ := eq_bot_or_eq_succ_or_isWellOrderLimitElement i · simp only [natTrans_app_zero] · simp only [Hom.natTrans_app_succ _ i (lt_of_lt_of_le hi'' hi'), hi i hi''] · exact (iter₁.isColimit i hi').hom_ext (fun ⟨k, hk⟩ => by simp [hi k hk])) end Hom end Iteration end Functor end CategoryTheory
CategoryTheory\Subobject\Basic.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Scott Morrison -/ import Mathlib.CategoryTheory.Subobject.MonoOver import Mathlib.CategoryTheory.Skeletal import Mathlib.CategoryTheory.ConcreteCategory.Basic import Mathlib.Tactic.ApplyFun import Mathlib.Tactic.CategoryTheory.Elementwise /-! # Subobjects We define `Subobject X` as the quotient (by isomorphisms) of `MonoOver X := {f : Over X // Mono f.hom}`. Here `MonoOver X` is a thin category (a pair of objects has at most one morphism between them), so we can think of it as a preorder. However as it is not skeletal, it is not a partial order. There is a coercion from `Subobject X` back to the ambient category `C` (using choice to pick a representative), and for `P : Subobject X`, `P.arrow : (P : C) ⟶ X` is the inclusion morphism. We provide * `def pullback [HasPullbacks C] (f : X ⟶ Y) : Subobject Y ⥤ Subobject X` * `def map (f : X ⟶ Y) [Mono f] : Subobject X ⥤ Subobject Y` * `def «exists_» [HasImages C] (f : X ⟶ Y) : Subobject X ⥤ Subobject Y` and prove their basic properties and relationships. These are all easy consequences of the earlier development of the corresponding functors for `MonoOver`. The subobjects of `X` form a preorder making them into a category. We have `X ≤ Y` if and only if `X.arrow` factors through `Y.arrow`: see `ofLE`/`ofLEMk`/`ofMkLE`/`ofMkLEMk` and `le_of_comm`. Similarly, to show that two subobjects are equal, we can supply an isomorphism between the underlying objects that commutes with the arrows (`eq_of_comm`). See also * `CategoryTheory.Subobject.factorThru` : an API describing factorization of morphisms through subobjects. * `CategoryTheory.Subobject.lattice` : the lattice structures on subobjects. ## Notes This development originally appeared in Bhavik Mehta's "Topos theory for Lean" repository, and was ported to mathlib by Scott Morrison. ### Implementation note Currently we describe `pullback`, `map`, etc., as functors. It may be better to just say that they are monotone functions, and even avoid using categorical language entirely when describing `Subobject X`. (It's worth keeping this in mind in future use; it should be a relatively easy change here if it looks preferable.) ### Relation to pseudoelements There is a separate development of pseudoelements in `CategoryTheory.Abelian.Pseudoelements`, as a quotient (but not by isomorphism) of `Over X`. When a morphism `f` has an image, the image represents the same pseudoelement. In a category with images `Pseudoelements X` could be constructed as a quotient of `MonoOver X`. In fact, in an abelian category (I'm not sure in what generality beyond that), `Pseudoelements X` agrees with `Subobject X`, but we haven't developed this in mathlib yet. -/ universe v₁ v₂ u₁ u₂ noncomputable section namespace CategoryTheory open CategoryTheory CategoryTheory.Category CategoryTheory.Limits variable {C : Type u₁} [Category.{v₁} C] {X Y Z : C} variable {D : Type u₂} [Category.{v₂} D] /-! We now construct the subobject lattice for `X : C`, as the quotient by isomorphisms of `MonoOver X`. Since `MonoOver X` is a thin category, we use `ThinSkeleton` to take the quotient. Essentially all the structure defined above on `MonoOver X` descends to `Subobject X`, with morphisms becoming inequalities, and isomorphisms becoming equations. -/ /-- The category of subobjects of `X : C`, defined as isomorphism classes of monomorphisms into `X`. -/ def Subobject (X : C) := ThinSkeleton (MonoOver X) instance (X : C) : PartialOrder (Subobject X) := by dsimp only [Subobject] infer_instance namespace Subobject -- Porting note: made it a def rather than an abbreviation -- because Lean would make it too transparent /-- Convenience constructor for a subobject. -/ def mk {X A : C} (f : A ⟶ X) [Mono f] : Subobject X := (toThinSkeleton _).obj (MonoOver.mk' f) section attribute [local ext] CategoryTheory.Comma protected theorem ind {X : C} (p : Subobject X → Prop) (h : ∀ ⦃A : C⦄ (f : A ⟶ X) [Mono f], p (Subobject.mk f)) (P : Subobject X) : p P := by apply Quotient.inductionOn' intro a exact h a.arrow protected theorem ind₂ {X : C} (p : Subobject X → Subobject X → Prop) (h : ∀ ⦃A B : C⦄ (f : A ⟶ X) (g : B ⟶ X) [Mono f] [Mono g], p (Subobject.mk f) (Subobject.mk g)) (P Q : Subobject X) : p P Q := by apply Quotient.inductionOn₂' intro a b exact h a.arrow b.arrow end /-- Declare a function on subobjects of `X` by specifying a function on monomorphisms with codomain `X`. -/ protected def lift {α : Sort*} {X : C} (F : ∀ ⦃A : C⦄ (f : A ⟶ X) [Mono f], α) (h : ∀ ⦃A B : C⦄ (f : A ⟶ X) (g : B ⟶ X) [Mono f] [Mono g] (i : A ≅ B), i.hom ≫ g = f → F f = F g) : Subobject X → α := fun P => Quotient.liftOn' P (fun m => F m.arrow) fun m n ⟨i⟩ => h m.arrow n.arrow ((MonoOver.forget X ⋙ Over.forget X).mapIso i) (Over.w i.hom) @[simp] protected theorem lift_mk {α : Sort*} {X : C} (F : ∀ ⦃A : C⦄ (f : A ⟶ X) [Mono f], α) {h A} (f : A ⟶ X) [Mono f] : Subobject.lift F h (Subobject.mk f) = F f := rfl /-- The category of subobjects is equivalent to the `MonoOver` category. It is more convenient to use the former due to the partial order instance, but oftentimes it is easier to define structures on the latter. -/ noncomputable def equivMonoOver (X : C) : Subobject X ≌ MonoOver X := ThinSkeleton.equivalence _ /-- Use choice to pick a representative `MonoOver X` for each `Subobject X`. -/ noncomputable def representative {X : C} : Subobject X ⥤ MonoOver X := (equivMonoOver X).functor /-- Starting with `A : MonoOver X`, we can take its equivalence class in `Subobject X` then pick an arbitrary representative using `representative.obj`. This is isomorphic (in `MonoOver X`) to the original `A`. -/ noncomputable def representativeIso {X : C} (A : MonoOver X) : representative.obj ((toThinSkeleton _).obj A) ≅ A := (equivMonoOver X).counitIso.app A /-- Use choice to pick a representative underlying object in `C` for any `Subobject X`. Prefer to use the coercion `P : C` rather than explicitly writing `underlying.obj P`. -/ noncomputable def underlying {X : C} : Subobject X ⥤ C := representative ⋙ MonoOver.forget _ ⋙ Over.forget _ instance : CoeOut (Subobject X) C where coe Y := underlying.obj Y -- Porting note: removed as it has become a syntactic tautology -- @[simp] -- theorem underlying_as_coe {X : C} (P : Subobject X) : underlying.obj P = P := -- rfl /-- If we construct a `Subobject Y` from an explicit `f : X ⟶ Y` with `[Mono f]`, then pick an arbitrary choice of underlying object `(Subobject.mk f : C)` back in `C`, it is isomorphic (in `C`) to the original `X`. -/ noncomputable def underlyingIso {X Y : C} (f : X ⟶ Y) [Mono f] : (Subobject.mk f : C) ≅ X := (MonoOver.forget _ ⋙ Over.forget _).mapIso (representativeIso (MonoOver.mk' f)) /-- The morphism in `C` from the arbitrarily chosen underlying object to the ambient object. -/ noncomputable def arrow {X : C} (Y : Subobject X) : (Y : C) ⟶ X := (representative.obj Y).obj.hom instance arrow_mono {X : C} (Y : Subobject X) : Mono Y.arrow := (representative.obj Y).property @[simp] theorem arrow_congr {A : C} (X Y : Subobject A) (h : X = Y) : eqToHom (congr_arg (fun X : Subobject A => (X : C)) h) ≫ Y.arrow = X.arrow := by induction h simp @[simp] theorem representative_coe (Y : Subobject X) : (representative.obj Y : C) = (Y : C) := rfl @[simp] theorem representative_arrow (Y : Subobject X) : (representative.obj Y).arrow = Y.arrow := rfl @[reassoc (attr := simp)] theorem underlying_arrow {X : C} {Y Z : Subobject X} (f : Y ⟶ Z) : underlying.map f ≫ arrow Z = arrow Y := Over.w (representative.map f) @[reassoc (attr := simp), elementwise (attr := simp)] theorem underlyingIso_arrow {X Y : C} (f : X ⟶ Y) [Mono f] : (underlyingIso f).inv ≫ (Subobject.mk f).arrow = f := Over.w _ @[reassoc (attr := simp)] theorem underlyingIso_hom_comp_eq_mk {X Y : C} (f : X ⟶ Y) [Mono f] : (underlyingIso f).hom ≫ f = (mk f).arrow := (Iso.eq_inv_comp _).1 (underlyingIso_arrow f).symm /-- Two morphisms into a subobject are equal exactly if the morphisms into the ambient object are equal -/ @[ext] theorem eq_of_comp_arrow_eq {X Y : C} {P : Subobject Y} {f g : X ⟶ P} (h : f ≫ P.arrow = g ≫ P.arrow) : f = g := (cancel_mono P.arrow).mp h theorem mk_le_mk_of_comm {B A₁ A₂ : C} {f₁ : A₁ ⟶ B} {f₂ : A₂ ⟶ B} [Mono f₁] [Mono f₂] (g : A₁ ⟶ A₂) (w : g ≫ f₂ = f₁) : mk f₁ ≤ mk f₂ := ⟨MonoOver.homMk _ w⟩ @[simp] theorem mk_arrow (P : Subobject X) : mk P.arrow = P := Quotient.inductionOn' P fun Q => by obtain ⟨e⟩ := @Quotient.mk_out' _ (isIsomorphicSetoid _) Q exact Quotient.sound' ⟨MonoOver.isoMk (Iso.refl _) ≪≫ e⟩ theorem le_of_comm {B : C} {X Y : Subobject B} (f : (X : C) ⟶ (Y : C)) (w : f ≫ Y.arrow = X.arrow) : X ≤ Y := by convert mk_le_mk_of_comm _ w <;> simp theorem le_mk_of_comm {B A : C} {X : Subobject B} {f : A ⟶ B} [Mono f] (g : (X : C) ⟶ A) (w : g ≫ f = X.arrow) : X ≤ mk f := le_of_comm (g ≫ (underlyingIso f).inv) <| by simp [w] theorem mk_le_of_comm {B A : C} {X : Subobject B} {f : A ⟶ B} [Mono f] (g : A ⟶ (X : C)) (w : g ≫ X.arrow = f) : mk f ≤ X := le_of_comm ((underlyingIso f).hom ≫ g) <| by simp [w] /-- To show that two subobjects are equal, it suffices to exhibit an isomorphism commuting with the arrows. -/ @[ext (iff := false)] theorem eq_of_comm {B : C} {X Y : Subobject B} (f : (X : C) ≅ (Y : C)) (w : f.hom ≫ Y.arrow = X.arrow) : X = Y := le_antisymm (le_of_comm f.hom w) <| le_of_comm f.inv <| f.inv_comp_eq.2 w.symm -- Porting note (#11182): removed @[ext] /-- To show that two subobjects are equal, it suffices to exhibit an isomorphism commuting with the arrows. -/ theorem eq_mk_of_comm {B A : C} {X : Subobject B} (f : A ⟶ B) [Mono f] (i : (X : C) ≅ A) (w : i.hom ≫ f = X.arrow) : X = mk f := eq_of_comm (i.trans (underlyingIso f).symm) <| by simp [w] -- Porting note (#11182): removed @[ext] /-- To show that two subobjects are equal, it suffices to exhibit an isomorphism commuting with the arrows. -/ theorem mk_eq_of_comm {B A : C} {X : Subobject B} (f : A ⟶ B) [Mono f] (i : A ≅ (X : C)) (w : i.hom ≫ X.arrow = f) : mk f = X := Eq.symm <| eq_mk_of_comm _ i.symm <| by rw [Iso.symm_hom, Iso.inv_comp_eq, w] -- Porting note (#11182): removed @[ext] /-- To show that two subobjects are equal, it suffices to exhibit an isomorphism commuting with the arrows. -/ theorem mk_eq_mk_of_comm {B A₁ A₂ : C} (f : A₁ ⟶ B) (g : A₂ ⟶ B) [Mono f] [Mono g] (i : A₁ ≅ A₂) (w : i.hom ≫ g = f) : mk f = mk g := eq_mk_of_comm _ ((underlyingIso f).trans i) <| by simp [w] -- We make `X` and `Y` explicit arguments here so that when `ofLE` appears in goal statements -- it is possible to see its source and target -- (`h` will just display as `_`, because it is in `Prop`). /-- An inequality of subobjects is witnessed by some morphism between the corresponding objects. -/ def ofLE {B : C} (X Y : Subobject B) (h : X ≤ Y) : (X : C) ⟶ (Y : C) := underlying.map <| h.hom @[reassoc (attr := simp)] theorem ofLE_arrow {B : C} {X Y : Subobject B} (h : X ≤ Y) : ofLE X Y h ≫ Y.arrow = X.arrow := underlying_arrow _ instance {B : C} (X Y : Subobject B) (h : X ≤ Y) : Mono (ofLE X Y h) := by fconstructor intro Z f g w replace w := w =≫ Y.arrow ext simpa using w theorem ofLE_mk_le_mk_of_comm {B A₁ A₂ : C} {f₁ : A₁ ⟶ B} {f₂ : A₂ ⟶ B} [Mono f₁] [Mono f₂] (g : A₁ ⟶ A₂) (w : g ≫ f₂ = f₁) : ofLE _ _ (mk_le_mk_of_comm g w) = (underlyingIso _).hom ≫ g ≫ (underlyingIso _).inv := by ext simp [w] /-- An inequality of subobjects is witnessed by some morphism between the corresponding objects. -/ def ofLEMk {B A : C} (X : Subobject B) (f : A ⟶ B) [Mono f] (h : X ≤ mk f) : (X : C) ⟶ A := ofLE X (mk f) h ≫ (underlyingIso f).hom instance {B A : C} (X : Subobject B) (f : A ⟶ B) [Mono f] (h : X ≤ mk f) : Mono (ofLEMk X f h) := by dsimp only [ofLEMk] infer_instance @[simp] theorem ofLEMk_comp {B A : C} {X : Subobject B} {f : A ⟶ B} [Mono f] (h : X ≤ mk f) : ofLEMk X f h ≫ f = X.arrow := by simp [ofLEMk] /-- An inequality of subobjects is witnessed by some morphism between the corresponding objects. -/ def ofMkLE {B A : C} (f : A ⟶ B) [Mono f] (X : Subobject B) (h : mk f ≤ X) : A ⟶ (X : C) := (underlyingIso f).inv ≫ ofLE (mk f) X h instance {B A : C} (f : A ⟶ B) [Mono f] (X : Subobject B) (h : mk f ≤ X) : Mono (ofMkLE f X h) := by dsimp only [ofMkLE] infer_instance @[simp] theorem ofMkLE_arrow {B A : C} {f : A ⟶ B} [Mono f] {X : Subobject B} (h : mk f ≤ X) : ofMkLE f X h ≫ X.arrow = f := by simp [ofMkLE] /-- An inequality of subobjects is witnessed by some morphism between the corresponding objects. -/ def ofMkLEMk {B A₁ A₂ : C} (f : A₁ ⟶ B) (g : A₂ ⟶ B) [Mono f] [Mono g] (h : mk f ≤ mk g) : A₁ ⟶ A₂ := (underlyingIso f).inv ≫ ofLE (mk f) (mk g) h ≫ (underlyingIso g).hom instance {B A₁ A₂ : C} (f : A₁ ⟶ B) (g : A₂ ⟶ B) [Mono f] [Mono g] (h : mk f ≤ mk g) : Mono (ofMkLEMk f g h) := by dsimp only [ofMkLEMk] infer_instance @[simp] theorem ofMkLEMk_comp {B A₁ A₂ : C} {f : A₁ ⟶ B} {g : A₂ ⟶ B} [Mono f] [Mono g] (h : mk f ≤ mk g) : ofMkLEMk f g h ≫ g = f := by simp [ofMkLEMk] @[reassoc (attr := simp)] theorem ofLE_comp_ofLE {B : C} (X Y Z : Subobject B) (h₁ : X ≤ Y) (h₂ : Y ≤ Z) : ofLE X Y h₁ ≫ ofLE Y Z h₂ = ofLE X Z (h₁.trans h₂) := by simp only [ofLE, ← Functor.map_comp underlying] congr 1 @[reassoc (attr := simp)] theorem ofLE_comp_ofLEMk {B A : C} (X Y : Subobject B) (f : A ⟶ B) [Mono f] (h₁ : X ≤ Y) (h₂ : Y ≤ mk f) : ofLE X Y h₁ ≫ ofLEMk Y f h₂ = ofLEMk X f (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ← Functor.map_comp_assoc underlying] congr 1 @[reassoc (attr := simp)] theorem ofLEMk_comp_ofMkLE {B A : C} (X : Subobject B) (f : A ⟶ B) [Mono f] (Y : Subobject B) (h₁ : X ≤ mk f) (h₂ : mk f ≤ Y) : ofLEMk X f h₁ ≫ ofMkLE f Y h₂ = ofLE X Y (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ← Functor.map_comp underlying, assoc, Iso.hom_inv_id_assoc] congr 1 @[reassoc (attr := simp)] theorem ofLEMk_comp_ofMkLEMk {B A₁ A₂ : C} (X : Subobject B) (f : A₁ ⟶ B) [Mono f] (g : A₂ ⟶ B) [Mono g] (h₁ : X ≤ mk f) (h₂ : mk f ≤ mk g) : ofLEMk X f h₁ ≫ ofMkLEMk f g h₂ = ofLEMk X g (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ofMkLEMk, ← Functor.map_comp_assoc underlying, assoc, Iso.hom_inv_id_assoc] congr 1 @[reassoc (attr := simp)] theorem ofMkLE_comp_ofLE {B A₁ : C} (f : A₁ ⟶ B) [Mono f] (X Y : Subobject B) (h₁ : mk f ≤ X) (h₂ : X ≤ Y) : ofMkLE f X h₁ ≫ ofLE X Y h₂ = ofMkLE f Y (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ofMkLEMk, ← Functor.map_comp underlying, assoc] congr 1 @[reassoc (attr := simp)] theorem ofMkLE_comp_ofLEMk {B A₁ A₂ : C} (f : A₁ ⟶ B) [Mono f] (X : Subobject B) (g : A₂ ⟶ B) [Mono g] (h₁ : mk f ≤ X) (h₂ : X ≤ mk g) : ofMkLE f X h₁ ≫ ofLEMk X g h₂ = ofMkLEMk f g (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ofMkLEMk, ← Functor.map_comp_assoc underlying, assoc] congr 1 @[reassoc (attr := simp)] theorem ofMkLEMk_comp_ofMkLE {B A₁ A₂ : C} (f : A₁ ⟶ B) [Mono f] (g : A₂ ⟶ B) [Mono g] (X : Subobject B) (h₁ : mk f ≤ mk g) (h₂ : mk g ≤ X) : ofMkLEMk f g h₁ ≫ ofMkLE g X h₂ = ofMkLE f X (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ofMkLEMk, ← Functor.map_comp underlying, assoc, Iso.hom_inv_id_assoc] congr 1 @[reassoc (attr := simp)] theorem ofMkLEMk_comp_ofMkLEMk {B A₁ A₂ A₃ : C} (f : A₁ ⟶ B) [Mono f] (g : A₂ ⟶ B) [Mono g] (h : A₃ ⟶ B) [Mono h] (h₁ : mk f ≤ mk g) (h₂ : mk g ≤ mk h) : ofMkLEMk f g h₁ ≫ ofMkLEMk g h h₂ = ofMkLEMk f h (h₁.trans h₂) := by simp only [ofMkLE, ofLEMk, ofLE, ofMkLEMk, ← Functor.map_comp_assoc underlying, assoc, Iso.hom_inv_id_assoc] congr 1 @[simp] theorem ofLE_refl {B : C} (X : Subobject B) : ofLE X X le_rfl = 𝟙 _ := by apply (cancel_mono X.arrow).mp simp @[simp] theorem ofMkLEMk_refl {B A₁ : C} (f : A₁ ⟶ B) [Mono f] : ofMkLEMk f f le_rfl = 𝟙 _ := by apply (cancel_mono f).mp simp -- As with `ofLE`, we have `X` and `Y` as explicit arguments for readability. /-- An equality of subobjects gives an isomorphism of the corresponding objects. (One could use `underlying.mapIso (eqToIso h))` here, but this is more readable.) -/ @[simps] def isoOfEq {B : C} (X Y : Subobject B) (h : X = Y) : (X : C) ≅ (Y : C) where hom := ofLE _ _ h.le inv := ofLE _ _ h.ge /-- An equality of subobjects gives an isomorphism of the corresponding objects. -/ @[simps] def isoOfEqMk {B A : C} (X : Subobject B) (f : A ⟶ B) [Mono f] (h : X = mk f) : (X : C) ≅ A where hom := ofLEMk X f h.le inv := ofMkLE f X h.ge /-- An equality of subobjects gives an isomorphism of the corresponding objects. -/ @[simps] def isoOfMkEq {B A : C} (f : A ⟶ B) [Mono f] (X : Subobject B) (h : mk f = X) : A ≅ (X : C) where hom := ofMkLE f X h.le inv := ofLEMk X f h.ge /-- An equality of subobjects gives an isomorphism of the corresponding objects. -/ @[simps] def isoOfMkEqMk {B A₁ A₂ : C} (f : A₁ ⟶ B) (g : A₂ ⟶ B) [Mono f] [Mono g] (h : mk f = mk g) : A₁ ≅ A₂ where hom := ofMkLEMk f g h.le inv := ofMkLEMk g f h.ge end Subobject open CategoryTheory.Limits namespace Subobject /-- Any functor `MonoOver X ⥤ MonoOver Y` descends to a functor `Subobject X ⥤ Subobject Y`, because `MonoOver Y` is thin. -/ def lower {Y : D} (F : MonoOver X ⥤ MonoOver Y) : Subobject X ⥤ Subobject Y := ThinSkeleton.map F /-- Isomorphic functors become equal when lowered to `Subobject`. (It's not as evil as usual to talk about equality between functors because the categories are thin and skeletal.) -/ theorem lower_iso (F₁ F₂ : MonoOver X ⥤ MonoOver Y) (h : F₁ ≅ F₂) : lower F₁ = lower F₂ := ThinSkeleton.map_iso_eq h /-- A ternary version of `Subobject.lower`. -/ def lower₂ (F : MonoOver X ⥤ MonoOver Y ⥤ MonoOver Z) : Subobject X ⥤ Subobject Y ⥤ Subobject Z := ThinSkeleton.map₂ F @[simp] theorem lower_comm (F : MonoOver Y ⥤ MonoOver X) : toThinSkeleton _ ⋙ lower F = F ⋙ toThinSkeleton _ := rfl /-- An adjunction between `MonoOver A` and `MonoOver B` gives an adjunction between `Subobject A` and `Subobject B`. -/ def lowerAdjunction {A : C} {B : D} {L : MonoOver A ⥤ MonoOver B} {R : MonoOver B ⥤ MonoOver A} (h : L ⊣ R) : lower L ⊣ lower R := ThinSkeleton.lowerAdjunction _ _ h /-- An equivalence between `MonoOver A` and `MonoOver B` gives an equivalence between `Subobject A` and `Subobject B`. -/ @[simps] def lowerEquivalence {A : C} {B : D} (e : MonoOver A ≌ MonoOver B) : Subobject A ≌ Subobject B where functor := lower e.functor inverse := lower e.inverse unitIso := by apply eqToIso convert ThinSkeleton.map_iso_eq e.unitIso · exact ThinSkeleton.map_id_eq.symm · exact (ThinSkeleton.map_comp_eq _ _).symm counitIso := by apply eqToIso convert ThinSkeleton.map_iso_eq e.counitIso · exact (ThinSkeleton.map_comp_eq _ _).symm · exact ThinSkeleton.map_id_eq.symm section Pullback variable [HasPullbacks C] /-- When `C` has pullbacks, a morphism `f : X ⟶ Y` induces a functor `Subobject Y ⥤ Subobject X`, by pulling back a monomorphism along `f`. -/ def pullback (f : X ⟶ Y) : Subobject Y ⥤ Subobject X := lower (MonoOver.pullback f) theorem pullback_id (x : Subobject X) : (pullback (𝟙 X)).obj x = x := by induction' x using Quotient.inductionOn' with f exact Quotient.sound ⟨MonoOver.pullbackId.app f⟩ theorem pullback_comp (f : X ⟶ Y) (g : Y ⟶ Z) (x : Subobject Z) : (pullback (f ≫ g)).obj x = (pullback f).obj ((pullback g).obj x) := by induction' x using Quotient.inductionOn' with t exact Quotient.sound ⟨(MonoOver.pullbackComp _ _).app t⟩ instance (f : X ⟶ Y) : (pullback f).Faithful where end Pullback section Map /-- We can map subobjects of `X` to subobjects of `Y` by post-composition with a monomorphism `f : X ⟶ Y`. -/ def map (f : X ⟶ Y) [Mono f] : Subobject X ⥤ Subobject Y := lower (MonoOver.map f) theorem map_id (x : Subobject X) : (map (𝟙 X)).obj x = x := by induction' x using Quotient.inductionOn' with f exact Quotient.sound ⟨(MonoOver.mapId _).app f⟩ theorem map_comp (f : X ⟶ Y) (g : Y ⟶ Z) [Mono f] [Mono g] (x : Subobject X) : (map (f ≫ g)).obj x = (map g).obj ((map f).obj x) := by induction' x using Quotient.inductionOn' with t exact Quotient.sound ⟨(MonoOver.mapComp _ _).app t⟩ /-- Isomorphic objects have equivalent subobject lattices. -/ def mapIso {A B : C} (e : A ≅ B) : Subobject A ≌ Subobject B := lowerEquivalence (MonoOver.mapIso e) -- Porting note: the note below doesn't seem true anymore -- @[simps] here generates a lemma `map_iso_to_order_iso_to_equiv_symm_apply` -- whose left hand side is not in simp normal form. /-- In fact, there's a type level bijection between the subobjects of isomorphic objects, which preserves the order. -/ def mapIsoToOrderIso (e : X ≅ Y) : Subobject X ≃o Subobject Y where toFun := (map e.hom).obj invFun := (map e.inv).obj left_inv g := by simp_rw [← map_comp, e.hom_inv_id, map_id] right_inv g := by simp_rw [← map_comp, e.inv_hom_id, map_id] map_rel_iff' {A B} := by dsimp constructor · intro h apply_fun (map e.inv).obj at h · simpa only [← map_comp, e.hom_inv_id, map_id] using h · apply Functor.monotone · intro h apply_fun (map e.hom).obj at h · exact h · apply Functor.monotone @[simp] theorem mapIsoToOrderIso_apply (e : X ≅ Y) (P : Subobject X) : mapIsoToOrderIso e P = (map e.hom).obj P := rfl @[simp] theorem mapIsoToOrderIso_symm_apply (e : X ≅ Y) (Q : Subobject Y) : (mapIsoToOrderIso e).symm Q = (map e.inv).obj Q := rfl /-- `map f : Subobject X ⥤ Subobject Y` is the left adjoint of `pullback f : Subobject Y ⥤ Subobject X`. -/ def mapPullbackAdj [HasPullbacks C] (f : X ⟶ Y) [Mono f] : map f ⊣ pullback f := lowerAdjunction (MonoOver.mapPullbackAdj f) @[simp] theorem pullback_map_self [HasPullbacks C] (f : X ⟶ Y) [Mono f] (g : Subobject X) : (pullback f).obj ((map f).obj g) = g := by revert g exact Quotient.ind (fun g' => Quotient.sound ⟨(MonoOver.pullbackMapSelf f).app _⟩) theorem map_pullback [HasPullbacks C] {X Y Z W : C} {f : X ⟶ Y} {g : X ⟶ Z} {h : Y ⟶ W} {k : Z ⟶ W} [Mono h] [Mono g] (comm : f ≫ h = g ≫ k) (t : IsLimit (PullbackCone.mk f g comm)) (p : Subobject Y) : (map g).obj ((pullback f).obj p) = (pullback k).obj ((map h).obj p) := by revert p apply Quotient.ind' intro a apply Quotient.sound apply ThinSkeleton.equiv_of_both_ways · refine MonoOver.homMk (pullback.lift (pullback.fst _ _) _ ?_) (pullback.lift_snd _ _ _) change _ ≫ a.arrow ≫ h = (pullback.snd _ _ ≫ g) ≫ _ rw [assoc, ← comm, pullback.condition_assoc] · refine MonoOver.homMk (pullback.lift (pullback.fst _ _) (PullbackCone.IsLimit.lift t (pullback.fst _ _ ≫ a.arrow) (pullback.snd _ _) _) (PullbackCone.IsLimit.lift_fst _ _ _ ?_).symm) ?_ · rw [← pullback.condition, assoc] rfl · dsimp rw [pullback.lift_snd_assoc] apply PullbackCone.IsLimit.lift_snd end Map section Exists variable [HasImages C] /-- The functor from subobjects of `X` to subobjects of `Y` given by sending the subobject `S` to its "image" under `f`, usually denoted $\exists_f$. For instance, when `C` is the category of types, viewing `Subobject X` as `Set X` this is just `Set.image f`. This functor is left adjoint to the `pullback f` functor (shown in `existsPullbackAdj`) provided both are defined, and generalises the `map f` functor, again provided it is defined. -/ def «exists» (f : X ⟶ Y) : Subobject X ⥤ Subobject Y := lower (MonoOver.exists f) /-- When `f : X ⟶ Y` is a monomorphism, `exists f` agrees with `map f`. -/ theorem exists_iso_map (f : X ⟶ Y) [Mono f] : «exists» f = map f := lower_iso _ _ (MonoOver.existsIsoMap f) /-- `exists f : Subobject X ⥤ Subobject Y` is left adjoint to `pullback f : Subobject Y ⥤ Subobject X`. -/ def existsPullbackAdj (f : X ⟶ Y) [HasPullbacks C] : «exists» f ⊣ pullback f := lowerAdjunction (MonoOver.existsPullbackAdj f) end Exists end Subobject end CategoryTheory
CategoryTheory\Subobject\Comma.lean
/- Copyright (c) 2022 Markus Himmel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Markus Himmel -/ import Mathlib.CategoryTheory.Subobject.WellPowered import Mathlib.CategoryTheory.Limits.Preserves.Finite import Mathlib.CategoryTheory.Limits.Shapes.FiniteLimits import Mathlib.CategoryTheory.Limits.Comma /-! # Subobjects in the category of structured arrows We compute the subobjects of an object `A` in the category `StructuredArrow S T` for `T : C ⥤ D` and `S : D` as a subtype of the subobjects of `A.right`. We deduce that `StructuredArrow S T` is well-powered if `C` is. ## Main declarations * `StructuredArrow.subobjectEquiv`: the order-equivalence between `Subobject A` and a subtype of `Subobject A.right`. ## Implementation notes Our computation requires that `C` has all limits and `T` preserves all limits. Furthermore, we require that the morphisms of `C` and `D` are in the same universe. It is possible that both of these requirements can be relaxed by refining the results about limits in comma categories. We also provide the dual results. As usual, we use `Subobject (op A)` for the quotient objects of `A`. -/ noncomputable section open CategoryTheory.Limits Opposite universe v u₁ u₂ namespace CategoryTheory variable {C : Type u₁} [Category.{v} C] {D : Type u₂} [Category.{v} D] namespace StructuredArrow variable {S : D} {T : C ⥤ D} /-- Every subobject of a structured arrow can be projected to a subobject of the underlying object. -/ def projectSubobject [HasLimits C] [PreservesLimits T] {A : StructuredArrow S T} : Subobject A → Subobject A.right := by refine Subobject.lift (fun P f hf => Subobject.mk f.right) ?_ intro P Q f g hf hg i hi refine Subobject.mk_eq_mk_of_comm _ _ ((proj S T).mapIso i) ?_ exact congr_arg CommaMorphism.right hi @[simp] theorem projectSubobject_mk [HasLimits C] [PreservesLimits T] {A P : StructuredArrow S T} (f : P ⟶ A) [Mono f] : projectSubobject (Subobject.mk f) = Subobject.mk f.right := rfl theorem projectSubobject_factors [HasLimits C] [PreservesLimits T] {A : StructuredArrow S T} : ∀ P : Subobject A, ∃ q, q ≫ T.map (projectSubobject P).arrow = A.hom := Subobject.ind _ fun P f hf => ⟨P.hom ≫ T.map (Subobject.underlyingIso _).inv, by dsimp simp [← T.map_comp]⟩ /-- A subobject of the underlying object of a structured arrow can be lifted to a subobject of the structured arrow, provided that there is a morphism making the subobject into a structured arrow. -/ @[simp] def liftSubobject {A : StructuredArrow S T} (P : Subobject A.right) {q} (hq : q ≫ T.map P.arrow = A.hom) : Subobject A := Subobject.mk (homMk P.arrow hq : mk q ⟶ A) /-- Projecting and then lifting a subobject recovers the original subobject, because there is at most one morphism making the projected subobject into a structured arrow. -/ theorem lift_projectSubobject [HasLimits C] [PreservesLimits T] {A : StructuredArrow S T} : ∀ (P : Subobject A) {q} (hq : q ≫ T.map (projectSubobject P).arrow = A.hom), liftSubobject (projectSubobject P) hq = P := Subobject.ind _ (by intro P f hf q hq fapply Subobject.mk_eq_mk_of_comm · fapply isoMk · exact Subobject.underlyingIso _ · exact (cancel_mono (T.map f.right)).1 (by dsimp; simpa [← T.map_comp] using hq) · exact ext _ _ (by dsimp; simp)) /-- If `A : S → T.obj B` is a structured arrow for `S : D` and `T : C ⥤ D`, then we can explicitly describe the subobjects of `A` as the subobjects `P` of `B` in `C` for which `A.hom` factors through the image of `P` under `T`. -/ @[simps!] def subobjectEquiv [HasLimits C] [PreservesLimits T] (A : StructuredArrow S T) : Subobject A ≃o { P : Subobject A.right // ∃ q, q ≫ T.map P.arrow = A.hom } where toFun P := ⟨projectSubobject P, projectSubobject_factors P⟩ invFun P := liftSubobject P.val P.prop.choose_spec left_inv P := lift_projectSubobject _ _ right_inv P := Subtype.ext (by simp only [liftSubobject, homMk_right, projectSubobject_mk, Subobject.mk_arrow, Subtype.coe_eta]) map_rel_iff' := by apply Subobject.ind₂ intro P Q f g hf hg refine ⟨fun h => Subobject.mk_le_mk_of_comm ?_ ?_, fun h => ?_⟩ · exact homMk (Subobject.ofMkLEMk _ _ h) ((cancel_mono (T.map g.right)).1 (by simp [← T.map_comp])) · aesop_cat · refine Subobject.mk_le_mk_of_comm (Subobject.ofMkLEMk _ _ h).right ?_ exact congr_arg CommaMorphism.right (Subobject.ofMkLEMk_comp h) -- These lemmas have always been bad (#7657), but leanprover/lean4#2644 made `simp` start noticing attribute [nolint simpNF] CategoryTheory.StructuredArrow.subobjectEquiv_symm_apply CategoryTheory.StructuredArrow.subobjectEquiv_apply_coe /-- If `C` is well-powered and complete and `T` preserves limits, then `StructuredArrow S T` is well-powered. -/ instance wellPowered_structuredArrow [WellPowered C] [HasLimits C] [PreservesLimits T] : WellPowered (StructuredArrow S T) where subobject_small X := small_map (subobjectEquiv X).toEquiv end StructuredArrow namespace CostructuredArrow variable {S : C ⥤ D} {T : D} /-- Every quotient of a costructured arrow can be projected to a quotient of the underlying object. -/ def projectQuotient [HasColimits C] [PreservesColimits S] {A : CostructuredArrow S T} : Subobject (op A) → Subobject (op A.left) := by refine Subobject.lift (fun P f hf => Subobject.mk f.unop.left.op) ?_ intro P Q f g hf hg i hi refine Subobject.mk_eq_mk_of_comm _ _ ((proj S T).mapIso i.unop).op (Quiver.Hom.unop_inj ?_) have := congr_arg Quiver.Hom.unop hi simpa using congr_arg CommaMorphism.left this @[simp] theorem projectQuotient_mk [HasColimits C] [PreservesColimits S] {A : CostructuredArrow S T} {P : (CostructuredArrow S T)ᵒᵖ} (f : P ⟶ op A) [Mono f] : projectQuotient (Subobject.mk f) = Subobject.mk f.unop.left.op := rfl theorem projectQuotient_factors [HasColimits C] [PreservesColimits S] {A : CostructuredArrow S T} : ∀ P : Subobject (op A), ∃ q, S.map (projectQuotient P).arrow.unop ≫ q = A.hom := Subobject.ind _ fun P f hf => ⟨S.map (Subobject.underlyingIso _).unop.inv ≫ P.unop.hom, by dsimp rw [← Category.assoc, ← S.map_comp, ← unop_comp] simp⟩ /-- A quotient of the underlying object of a costructured arrow can be lifted to a quotient of the costructured arrow, provided that there is a morphism making the quotient into a costructured arrow. -/ @[simp] def liftQuotient {A : CostructuredArrow S T} (P : Subobject (op A.left)) {q} (hq : S.map P.arrow.unop ≫ q = A.hom) : Subobject (op A) := Subobject.mk (homMk P.arrow.unop hq : A ⟶ mk q).op /-- Technical lemma for `lift_projectQuotient`. -/ @[simp] theorem unop_left_comp_underlyingIso_hom_unop {A : CostructuredArrow S T} {P : (CostructuredArrow S T)ᵒᵖ} (f : P ⟶ op A) [Mono f.unop.left.op] : f.unop.left ≫ (Subobject.underlyingIso f.unop.left.op).hom.unop = (Subobject.mk f.unop.left.op).arrow.unop := by conv_lhs => congr rw [← Quiver.Hom.unop_op f.unop.left] rw [← unop_comp, Subobject.underlyingIso_hom_comp_eq_mk] /-- Projecting and then lifting a quotient recovers the original quotient, because there is at most one morphism making the projected quotient into a costructured arrow. -/ theorem lift_projectQuotient [HasColimits C] [PreservesColimits S] {A : CostructuredArrow S T} : ∀ (P : Subobject (op A)) {q} (hq : S.map (projectQuotient P).arrow.unop ≫ q = A.hom), liftQuotient (projectQuotient P) hq = P := Subobject.ind _ (by intro P f hf q hq fapply Subobject.mk_eq_mk_of_comm · refine (Iso.op (isoMk ?_ ?_) : _ ≅ op (unop P)) · exact (Subobject.underlyingIso f.unop.left.op).unop · refine (cancel_epi (S.map f.unop.left)).1 ?_ simpa [← Category.assoc, ← S.map_comp] using hq · exact Quiver.Hom.unop_inj (by aesop_cat)) /-- Technical lemma for `quotientEquiv`. -/ theorem unop_left_comp_ofMkLEMk_unop {A : CostructuredArrow S T} {P Q : (CostructuredArrow S T)ᵒᵖ} {f : P ⟶ op A} {g : Q ⟶ op A} [Mono f.unop.left.op] [Mono g.unop.left.op] (h : Subobject.mk f.unop.left.op ≤ Subobject.mk g.unop.left.op) : g.unop.left ≫ (Subobject.ofMkLEMk f.unop.left.op g.unop.left.op h).unop = f.unop.left := by conv_lhs => congr rw [← Quiver.Hom.unop_op g.unop.left] rw [← unop_comp] simp only [Subobject.ofMkLEMk_comp, Quiver.Hom.unop_op] /-- If `A : S.obj B ⟶ T` is a costructured arrow for `S : C ⥤ D` and `T : D`, then we can explicitly describe the quotients of `A` as the quotients `P` of `B` in `C` for which `A.hom` factors through the image of `P` under `S`. -/ def quotientEquiv [HasColimits C] [PreservesColimits S] (A : CostructuredArrow S T) : Subobject (op A) ≃o { P : Subobject (op A.left) // ∃ q, S.map P.arrow.unop ≫ q = A.hom } where toFun P := ⟨projectQuotient P, projectQuotient_factors P⟩ invFun P := liftQuotient P.val P.prop.choose_spec left_inv P := lift_projectQuotient _ _ right_inv P := Subtype.ext (by simp only [liftQuotient, Quiver.Hom.unop_op, homMk_left, Quiver.Hom.op_unop, projectQuotient_mk, Subobject.mk_arrow]) map_rel_iff' := by apply Subobject.ind₂ intro P Q f g hf hg refine ⟨fun h => Subobject.mk_le_mk_of_comm ?_ ?_, fun h => ?_⟩ · refine (homMk (Subobject.ofMkLEMk _ _ h).unop ((cancel_epi (S.map g.unop.left)).1 ?_)).op dsimp simp only [← S.map_comp_assoc, unop_left_comp_ofMkLEMk_unop, unop_op, CommaMorphism.w, Functor.const_obj_obj, right_eq_id, Functor.const_obj_map, Category.comp_id] · apply Quiver.Hom.unop_inj ext exact unop_left_comp_ofMkLEMk_unop _ · refine Subobject.mk_le_mk_of_comm (Subobject.ofMkLEMk _ _ h).unop.left.op ?_ refine Quiver.Hom.unop_inj ?_ have := congr_arg Quiver.Hom.unop (Subobject.ofMkLEMk_comp h) simpa only [unop_op, Functor.id_obj, Functor.const_obj_obj, MonoOver.mk'_obj, Over.mk_left, MonoOver.mk'_arrow, unop_comp, Quiver.Hom.unop_op, comp_left] using congr_arg CommaMorphism.left this /-- If `C` is well-copowered and cocomplete and `S` preserves colimits, then `CostructuredArrow S T` is well-copowered. -/ instance well_copowered_costructuredArrow [WellPowered Cᵒᵖ] [HasColimits C] [PreservesColimits S] : WellPowered (CostructuredArrow S T)ᵒᵖ where subobject_small X := small_map (quotientEquiv (unop X)).toEquiv end CostructuredArrow end CategoryTheory
CategoryTheory\Subobject\FactorThru.lean
/- Copyright (c) 2020 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Scott Morrison -/ import Mathlib.CategoryTheory.Subobject.Basic import Mathlib.CategoryTheory.Preadditive.Basic /-! # Factoring through subobjects The predicate `h : P.Factors f`, for `P : Subobject Y` and `f : X ⟶ Y` asserts the existence of some `P.factorThru f : X ⟶ (P : C)` making the obvious diagram commute. -/ universe v₁ v₂ u₁ u₂ noncomputable section open CategoryTheory CategoryTheory.Category CategoryTheory.Limits variable {C : Type u₁} [Category.{v₁} C] {X Y Z : C} variable {D : Type u₂} [Category.{v₂} D] namespace CategoryTheory namespace MonoOver /-- When `f : X ⟶ Y` and `P : MonoOver Y`, `P.Factors f` expresses that there exists a factorisation of `f` through `P`. Given `h : P.Factors f`, you can recover the morphism as `P.factorThru f h`. -/ def Factors {X Y : C} (P : MonoOver Y) (f : X ⟶ Y) : Prop := ∃ g : X ⟶ (P : C), g ≫ P.arrow = f theorem factors_congr {X : C} {f g : MonoOver X} {Y : C} (h : Y ⟶ X) (e : f ≅ g) : f.Factors h ↔ g.Factors h := ⟨fun ⟨u, hu⟩ => ⟨u ≫ ((MonoOver.forget _).map e.hom).left, by simp [hu]⟩, fun ⟨u, hu⟩ => ⟨u ≫ ((MonoOver.forget _).map e.inv).left, by simp [hu]⟩⟩ /-- `P.factorThru f h` provides a factorisation of `f : X ⟶ Y` through some `P : MonoOver Y`, given the evidence `h : P.Factors f` that such a factorisation exists. -/ def factorThru {X Y : C} (P : MonoOver Y) (f : X ⟶ Y) (h : Factors P f) : X ⟶ (P : C) := Classical.choose h end MonoOver namespace Subobject /-- When `f : X ⟶ Y` and `P : Subobject Y`, `P.Factors f` expresses that there exists a factorisation of `f` through `P`. Given `h : P.Factors f`, you can recover the morphism as `P.factorThru f h`. -/ def Factors {X Y : C} (P : Subobject Y) (f : X ⟶ Y) : Prop := Quotient.liftOn' P (fun P => P.Factors f) (by rintro P Q ⟨h⟩ apply propext constructor · rintro ⟨i, w⟩ exact ⟨i ≫ h.hom.left, by erw [Category.assoc, Over.w h.hom, w]⟩ · rintro ⟨i, w⟩ exact ⟨i ≫ h.inv.left, by erw [Category.assoc, Over.w h.inv, w]⟩) @[simp] theorem mk_factors_iff {X Y Z : C} (f : Y ⟶ X) [Mono f] (g : Z ⟶ X) : (Subobject.mk f).Factors g ↔ (MonoOver.mk' f).Factors g := Iff.rfl theorem mk_factors_self (f : X ⟶ Y) [Mono f] : (mk f).Factors f := ⟨𝟙 _, by simp⟩ theorem factors_iff {X Y : C} (P : Subobject Y) (f : X ⟶ Y) : P.Factors f ↔ (representative.obj P).Factors f := Quot.inductionOn P fun _ => MonoOver.factors_congr _ (representativeIso _).symm theorem factors_self {X : C} (P : Subobject X) : P.Factors P.arrow := (factors_iff _ _).mpr ⟨𝟙 (P : C), by simp⟩ theorem factors_comp_arrow {X Y : C} {P : Subobject Y} (f : X ⟶ P) : P.Factors (f ≫ P.arrow) := (factors_iff _ _).mpr ⟨f, rfl⟩ theorem factors_of_factors_right {X Y Z : C} {P : Subobject Z} (f : X ⟶ Y) {g : Y ⟶ Z} (h : P.Factors g) : P.Factors (f ≫ g) := by induction' P using Quotient.ind' with P obtain ⟨g, rfl⟩ := h exact ⟨f ≫ g, by simp⟩ theorem factors_zero [HasZeroMorphisms C] {X Y : C} {P : Subobject Y} : P.Factors (0 : X ⟶ Y) := (factors_iff _ _).mpr ⟨0, by simp⟩ theorem factors_of_le {Y Z : C} {P Q : Subobject Y} (f : Z ⟶ Y) (h : P ≤ Q) : P.Factors f → Q.Factors f := by simp only [factors_iff] exact fun ⟨u, hu⟩ => ⟨u ≫ ofLE _ _ h, by simp [← hu]⟩ /-- `P.factorThru f h` provides a factorisation of `f : X ⟶ Y` through some `P : Subobject Y`, given the evidence `h : P.Factors f` that such a factorisation exists. -/ def factorThru {X Y : C} (P : Subobject Y) (f : X ⟶ Y) (h : Factors P f) : X ⟶ P := Classical.choose ((factors_iff _ _).mp h) @[reassoc (attr := simp)] theorem factorThru_arrow {X Y : C} (P : Subobject Y) (f : X ⟶ Y) (h : Factors P f) : P.factorThru f h ≫ P.arrow = f := Classical.choose_spec ((factors_iff _ _).mp h) @[simp] theorem factorThru_self {X : C} (P : Subobject X) (h) : P.factorThru P.arrow h = 𝟙 (P : C) := by ext simp @[simp] theorem factorThru_mk_self (f : X ⟶ Y) [Mono f] : (mk f).factorThru f (mk_factors_self f) = (underlyingIso f).inv := by ext simp @[simp] theorem factorThru_comp_arrow {X Y : C} {P : Subobject Y} (f : X ⟶ P) (h) : P.factorThru (f ≫ P.arrow) h = f := by ext simp @[simp] theorem factorThru_eq_zero [HasZeroMorphisms C] {X Y : C} {P : Subobject Y} {f : X ⟶ Y} {h : Factors P f} : P.factorThru f h = 0 ↔ f = 0 := by fconstructor · intro w replace w := w =≫ P.arrow simpa using w · rintro rfl ext simp theorem factorThru_right {X Y Z : C} {P : Subobject Z} (f : X ⟶ Y) (g : Y ⟶ Z) (h : P.Factors g) : f ≫ P.factorThru g h = P.factorThru (f ≫ g) (factors_of_factors_right f h) := by apply (cancel_mono P.arrow).mp simp @[simp] theorem factorThru_zero [HasZeroMorphisms C] {X Y : C} {P : Subobject Y} (h : P.Factors (0 : X ⟶ Y)) : P.factorThru 0 h = 0 := by simp -- `h` is an explicit argument here so we can use -- `rw factorThru_ofLE h`, obtaining a subgoal `P.Factors f`. -- (While the reverse direction looks plausible as a simp lemma, it seems to be unproductive.) theorem factorThru_ofLE {Y Z : C} {P Q : Subobject Y} {f : Z ⟶ Y} (h : P ≤ Q) (w : P.Factors f) : Q.factorThru f (factors_of_le f h w) = P.factorThru f w ≫ ofLE P Q h := by ext simp section Preadditive variable [Preadditive C] theorem factors_add {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (wf : P.Factors f) (wg : P.Factors g) : P.Factors (f + g) := (factors_iff _ _).mpr ⟨P.factorThru f wf + P.factorThru g wg, by simp⟩ -- This can't be a `simp` lemma as `wf` and `wg` may not exist. -- However you can `rw` by it to assert that `f` and `g` factor through `P` separately. theorem factorThru_add {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (w : P.Factors (f + g)) (wf : P.Factors f) (wg : P.Factors g) : P.factorThru (f + g) w = P.factorThru f wf + P.factorThru g wg := by ext simp theorem factors_left_of_factors_add {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (w : P.Factors (f + g)) (wg : P.Factors g) : P.Factors f := (factors_iff _ _).mpr ⟨P.factorThru (f + g) w - P.factorThru g wg, by simp⟩ @[simp] theorem factorThru_add_sub_factorThru_right {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (w : P.Factors (f + g)) (wg : P.Factors g) : P.factorThru (f + g) w - P.factorThru g wg = P.factorThru f (factors_left_of_factors_add f g w wg) := by ext simp theorem factors_right_of_factors_add {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (w : P.Factors (f + g)) (wf : P.Factors f) : P.Factors g := (factors_iff _ _).mpr ⟨P.factorThru (f + g) w - P.factorThru f wf, by simp⟩ @[simp] theorem factorThru_add_sub_factorThru_left {X Y : C} {P : Subobject Y} (f g : X ⟶ Y) (w : P.Factors (f + g)) (wf : P.Factors f) : P.factorThru (f + g) w - P.factorThru f wf = P.factorThru g (factors_right_of_factors_add f g w wf) := by ext simp end Preadditive end Subobject end CategoryTheory
CategoryTheory\Subobject\Lattice.lean
/- Copyright (c) 2020 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Scott Morrison -/ import Mathlib.CategoryTheory.Functor.Currying import Mathlib.CategoryTheory.Subobject.FactorThru import Mathlib.CategoryTheory.Subobject.WellPowered /-! # The lattice of subobjects We provide the `SemilatticeInf` with `OrderTop (subobject X)` instance when `[HasPullback C]`, and the `SemilatticeSup (Subobject X)` instance when `[HasImages C] [HasBinaryCoproducts C]`. -/ universe v₁ v₂ u₁ u₂ noncomputable section open CategoryTheory CategoryTheory.Category CategoryTheory.Limits variable {C : Type u₁} [Category.{v₁} C] {X Y Z : C} variable {D : Type u₂} [Category.{v₂} D] namespace CategoryTheory namespace MonoOver section Top instance {X : C} : Top (MonoOver X) where top := mk' (𝟙 _) instance {X : C} : Inhabited (MonoOver X) := ⟨⊤⟩ /-- The morphism to the top object in `MonoOver X`. -/ def leTop (f : MonoOver X) : f ⟶ ⊤ := homMk f.arrow (comp_id _) @[simp] theorem top_left (X : C) : ((⊤ : MonoOver X) : C) = X := rfl @[simp] theorem top_arrow (X : C) : (⊤ : MonoOver X).arrow = 𝟙 X := rfl /-- `map f` sends `⊤ : MonoOver X` to `⟨X, f⟩ : MonoOver Y`. -/ def mapTop (f : X ⟶ Y) [Mono f] : (map f).obj ⊤ ≅ mk' f := iso_of_both_ways (homMk (𝟙 _) rfl) (homMk (𝟙 _) (by simp [id_comp f])) section variable [HasPullbacks C] /-- The pullback of the top object in `MonoOver Y` is (isomorphic to) the top object in `MonoOver X`. -/ def pullbackTop (f : X ⟶ Y) : (pullback f).obj ⊤ ≅ ⊤ := iso_of_both_ways (leTop _) (homMk (pullback.lift f (𝟙 _) (by aesop_cat)) (pullback.lift_snd _ _ _)) /-- There is a morphism from `⊤ : MonoOver A` to the pullback of a monomorphism along itself; as the category is thin this is an isomorphism. -/ def topLEPullbackSelf {A B : C} (f : A ⟶ B) [Mono f] : (⊤ : MonoOver A) ⟶ (pullback f).obj (mk' f) := homMk _ (pullback.lift_snd _ _ rfl) /-- The pullback of a monomorphism along itself is isomorphic to the top object. -/ def pullbackSelf {A B : C} (f : A ⟶ B) [Mono f] : (pullback f).obj (mk' f) ≅ ⊤ := iso_of_both_ways (leTop _) (topLEPullbackSelf _) end end Top section Bot variable [HasInitial C] [InitialMonoClass C] instance {X : C} : Bot (MonoOver X) where bot := mk' (initial.to X) @[simp] theorem bot_left (X : C) : ((⊥ : MonoOver X) : C) = ⊥_ C := rfl @[simp] theorem bot_arrow {X : C} : (⊥ : MonoOver X).arrow = initial.to X := rfl /-- The (unique) morphism from `⊥ : MonoOver X` to any other `f : MonoOver X`. -/ def botLE {X : C} (f : MonoOver X) : ⊥ ⟶ f := homMk (initial.to _) /-- `map f` sends `⊥ : MonoOver X` to `⊥ : MonoOver Y`. -/ def mapBot (f : X ⟶ Y) [Mono f] : (map f).obj ⊥ ≅ ⊥ := iso_of_both_ways (homMk (initial.to _)) (homMk (𝟙 _)) end Bot section ZeroOrderBot variable [HasZeroObject C] open ZeroObject /-- The object underlying `⊥ : Subobject B` is (up to isomorphism) the zero object. -/ def botCoeIsoZero {B : C} : ((⊥ : MonoOver B) : C) ≅ 0 := initialIsInitial.uniqueUpToIso HasZeroObject.zeroIsInitial -- Porting note: removed @[simp] as the LHS simplifies theorem bot_arrow_eq_zero [HasZeroMorphisms C] {B : C} : (⊥ : MonoOver B).arrow = 0 := zero_of_source_iso_zero _ botCoeIsoZero end ZeroOrderBot section Inf variable [HasPullbacks C] /-- When `[HasPullbacks C]`, `MonoOver A` has "intersections", functorial in both arguments. As `MonoOver A` is only a preorder, this doesn't satisfy the axioms of `SemilatticeInf`, but we reuse all the names from `SemilatticeInf` because they will be used to construct `SemilatticeInf (subobject A)` shortly. -/ @[simps] def inf {A : C} : MonoOver A ⥤ MonoOver A ⥤ MonoOver A where obj f := pullback f.arrow ⋙ map f.arrow map k := { app := fun g => by apply homMk _ _ · apply pullback.lift (pullback.fst _ _) (pullback.snd _ _ ≫ k.left) _ rw [pullback.condition, assoc, w k] dsimp rw [pullback.lift_snd_assoc, assoc, w k] } /-- A morphism from the "infimum" of two objects in `MonoOver A` to the first object. -/ def infLELeft {A : C} (f g : MonoOver A) : (inf.obj f).obj g ⟶ f := homMk _ rfl /-- A morphism from the "infimum" of two objects in `MonoOver A` to the second object. -/ def infLERight {A : C} (f g : MonoOver A) : (inf.obj f).obj g ⟶ g := homMk _ pullback.condition /-- A morphism version of the `le_inf` axiom. -/ def leInf {A : C} (f g h : MonoOver A) : (h ⟶ f) → (h ⟶ g) → (h ⟶ (inf.obj f).obj g) := by intro k₁ k₂ refine homMk (pullback.lift k₂.left k₁.left ?_) ?_ · rw [w k₁, w k₂] · erw [pullback.lift_snd_assoc, w k₁] end Inf section Sup variable [HasImages C] [HasBinaryCoproducts C] /-- When `[HasImages C] [HasBinaryCoproducts C]`, `MonoOver A` has a `sup` construction, which is functorial in both arguments, and which on `Subobject A` will induce a `SemilatticeSup`. -/ def sup {A : C} : MonoOver A ⥤ MonoOver A ⥤ MonoOver A := curryObj ((forget A).prod (forget A) ⋙ uncurry.obj Over.coprod ⋙ image) /-- A morphism version of `le_sup_left`. -/ def leSupLeft {A : C} (f g : MonoOver A) : f ⟶ (sup.obj f).obj g := by refine homMk (coprod.inl ≫ factorThruImage _) ?_ erw [Category.assoc, image.fac, coprod.inl_desc] rfl /-- A morphism version of `le_sup_right`. -/ def leSupRight {A : C} (f g : MonoOver A) : g ⟶ (sup.obj f).obj g := by refine homMk (coprod.inr ≫ factorThruImage _) ?_ erw [Category.assoc, image.fac, coprod.inr_desc] rfl /-- A morphism version of `sup_le`. -/ def supLe {A : C} (f g h : MonoOver A) : (f ⟶ h) → (g ⟶ h) → ((sup.obj f).obj g ⟶ h) := by intro k₁ k₂ refine homMk ?_ ?_ · apply image.lift ⟨_, h.arrow, coprod.desc k₁.left k₂.left, _⟩ ext · simp [w k₁] · simp [w k₂] · apply image.lift_fac end Sup end MonoOver namespace Subobject section OrderTop instance orderTop {X : C} : OrderTop (Subobject X) where top := Quotient.mk'' ⊤ le_top := by refine Quotient.ind' fun f => ?_ exact ⟨MonoOver.leTop f⟩ instance {X : C} : Inhabited (Subobject X) := ⟨⊤⟩ theorem top_eq_id (B : C) : (⊤ : Subobject B) = Subobject.mk (𝟙 B) := rfl theorem underlyingIso_top_hom {B : C} : (underlyingIso (𝟙 B)).hom = (⊤ : Subobject B).arrow := by convert underlyingIso_hom_comp_eq_mk (𝟙 B) simp only [comp_id] instance top_arrow_isIso {B : C} : IsIso (⊤ : Subobject B).arrow := by rw [← underlyingIso_top_hom] infer_instance @[reassoc (attr := simp)] theorem underlyingIso_inv_top_arrow {B : C} : (underlyingIso _).inv ≫ (⊤ : Subobject B).arrow = 𝟙 B := underlyingIso_arrow _ @[simp] theorem map_top (f : X ⟶ Y) [Mono f] : (map f).obj ⊤ = Subobject.mk f := Quotient.sound' ⟨MonoOver.mapTop f⟩ theorem top_factors {A B : C} (f : A ⟶ B) : (⊤ : Subobject B).Factors f := ⟨f, comp_id _⟩ theorem isIso_iff_mk_eq_top {X Y : C} (f : X ⟶ Y) [Mono f] : IsIso f ↔ mk f = ⊤ := ⟨fun _ => mk_eq_mk_of_comm _ _ (asIso f) (Category.comp_id _), fun h => by rw [← ofMkLEMk_comp h.le, Category.comp_id] exact (isoOfMkEqMk _ _ h).isIso_hom⟩ theorem isIso_arrow_iff_eq_top {Y : C} (P : Subobject Y) : IsIso P.arrow ↔ P = ⊤ := by rw [isIso_iff_mk_eq_top, mk_arrow] instance isIso_top_arrow {Y : C} : IsIso (⊤ : Subobject Y).arrow := by rw [isIso_arrow_iff_eq_top] theorem mk_eq_top_of_isIso {X Y : C} (f : X ⟶ Y) [IsIso f] : mk f = ⊤ := (isIso_iff_mk_eq_top f).mp inferInstance theorem eq_top_of_isIso_arrow {Y : C} (P : Subobject Y) [IsIso P.arrow] : P = ⊤ := (isIso_arrow_iff_eq_top P).mp inferInstance section variable [HasPullbacks C] theorem pullback_top (f : X ⟶ Y) : (pullback f).obj ⊤ = ⊤ := Quotient.sound' ⟨MonoOver.pullbackTop f⟩ theorem pullback_self {A B : C} (f : A ⟶ B) [Mono f] : (pullback f).obj (mk f) = ⊤ := Quotient.sound' ⟨MonoOver.pullbackSelf f⟩ end end OrderTop section OrderBot variable [HasInitial C] [InitialMonoClass C] instance orderBot {X : C} : OrderBot (Subobject X) where bot := Quotient.mk'' ⊥ bot_le := by refine Quotient.ind' fun f => ?_ exact ⟨MonoOver.botLE f⟩ theorem bot_eq_initial_to {B : C} : (⊥ : Subobject B) = Subobject.mk (initial.to B) := rfl /-- The object underlying `⊥ : Subobject B` is (up to isomorphism) the initial object. -/ def botCoeIsoInitial {B : C} : ((⊥ : Subobject B) : C) ≅ ⊥_ C := underlyingIso _ theorem map_bot (f : X ⟶ Y) [Mono f] : (map f).obj ⊥ = ⊥ := Quotient.sound' ⟨MonoOver.mapBot f⟩ end OrderBot section ZeroOrderBot variable [HasZeroObject C] open ZeroObject /-- The object underlying `⊥ : Subobject B` is (up to isomorphism) the zero object. -/ def botCoeIsoZero {B : C} : ((⊥ : Subobject B) : C) ≅ 0 := botCoeIsoInitial ≪≫ initialIsInitial.uniqueUpToIso HasZeroObject.zeroIsInitial variable [HasZeroMorphisms C] theorem bot_eq_zero {B : C} : (⊥ : Subobject B) = Subobject.mk (0 : 0 ⟶ B) := mk_eq_mk_of_comm _ _ (initialIsInitial.uniqueUpToIso HasZeroObject.zeroIsInitial) (by simp [eq_iff_true_of_subsingleton]) @[simp] theorem bot_arrow {B : C} : (⊥ : Subobject B).arrow = 0 := zero_of_source_iso_zero _ botCoeIsoZero theorem bot_factors_iff_zero {A B : C} (f : A ⟶ B) : (⊥ : Subobject B).Factors f ↔ f = 0 := ⟨by rintro ⟨h, rfl⟩ simp only [MonoOver.bot_arrow_eq_zero, Functor.id_obj, Functor.const_obj_obj, MonoOver.bot_left, comp_zero], by rintro rfl exact ⟨0, by simp⟩⟩ theorem mk_eq_bot_iff_zero {f : X ⟶ Y} [Mono f] : Subobject.mk f = ⊥ ↔ f = 0 := ⟨fun h => by simpa [h, bot_factors_iff_zero] using mk_factors_self f, fun h => mk_eq_mk_of_comm _ _ ((isoZeroOfMonoEqZero h).trans HasZeroObject.zeroIsoInitial) (by simp [h])⟩ end ZeroOrderBot section Functor variable (C) /-- Sending `X : C` to `Subobject X` is a contravariant functor `Cᵒᵖ ⥤ Type`. -/ @[simps] def functor [HasPullbacks C] : Cᵒᵖ ⥤ Type max u₁ v₁ where obj X := Subobject X.unop map f := (pullback f.unop).obj map_id _ := funext pullback_id map_comp _ _ := funext (pullback_comp _ _) end Functor section SemilatticeInfTop variable [HasPullbacks C] /-- The functorial infimum on `MonoOver A` descends to an infimum on `Subobject A`. -/ def inf {A : C} : Subobject A ⥤ Subobject A ⥤ Subobject A := ThinSkeleton.map₂ MonoOver.inf theorem inf_le_left {A : C} (f g : Subobject A) : (inf.obj f).obj g ≤ f := Quotient.inductionOn₂' f g fun _ _ => ⟨MonoOver.infLELeft _ _⟩ theorem inf_le_right {A : C} (f g : Subobject A) : (inf.obj f).obj g ≤ g := Quotient.inductionOn₂' f g fun _ _ => ⟨MonoOver.infLERight _ _⟩ theorem le_inf {A : C} (h f g : Subobject A) : h ≤ f → h ≤ g → h ≤ (inf.obj f).obj g := Quotient.inductionOn₃' h f g (by rintro f g h ⟨k⟩ ⟨l⟩ exact ⟨MonoOver.leInf _ _ _ k l⟩) instance semilatticeInf {B : C} : SemilatticeInf (Subobject B) where inf := fun m n => (inf.obj m).obj n inf_le_left := inf_le_left inf_le_right := inf_le_right le_inf := le_inf theorem factors_left_of_inf_factors {A B : C} {X Y : Subobject B} {f : A ⟶ B} (h : (X ⊓ Y).Factors f) : X.Factors f := factors_of_le _ (inf_le_left _ _) h theorem factors_right_of_inf_factors {A B : C} {X Y : Subobject B} {f : A ⟶ B} (h : (X ⊓ Y).Factors f) : Y.Factors f := factors_of_le _ (inf_le_right _ _) h @[simp] theorem inf_factors {A B : C} {X Y : Subobject B} (f : A ⟶ B) : (X ⊓ Y).Factors f ↔ X.Factors f ∧ Y.Factors f := ⟨fun h => ⟨factors_left_of_inf_factors h, factors_right_of_inf_factors h⟩, by revert X Y apply Quotient.ind₂' rintro X Y ⟨⟨g₁, rfl⟩, ⟨g₂, hg₂⟩⟩ exact ⟨_, pullback.lift_snd_assoc _ _ hg₂ _⟩⟩ theorem inf_arrow_factors_left {B : C} (X Y : Subobject B) : X.Factors (X ⊓ Y).arrow := (factors_iff _ _).mpr ⟨ofLE (X ⊓ Y) X (inf_le_left X Y), by simp⟩ theorem inf_arrow_factors_right {B : C} (X Y : Subobject B) : Y.Factors (X ⊓ Y).arrow := (factors_iff _ _).mpr ⟨ofLE (X ⊓ Y) Y (inf_le_right X Y), by simp⟩ @[simp] theorem finset_inf_factors {I : Type*} {A B : C} {s : Finset I} {P : I → Subobject B} (f : A ⟶ B) : (s.inf P).Factors f ↔ ∀ i ∈ s, (P i).Factors f := by classical induction' s using Finset.induction_on with _ _ _ ih · simp [top_factors] · simp [ih] -- `i` is explicit here because often we'd like to defer a proof of `m` theorem finset_inf_arrow_factors {I : Type*} {B : C} (s : Finset I) (P : I → Subobject B) (i : I) (m : i ∈ s) : (P i).Factors (s.inf P).arrow := by classical revert i m induction' s using Finset.induction_on with _ _ _ ih · rintro _ ⟨⟩ · intro _ m rw [Finset.inf_insert] simp only [Finset.mem_insert] at m rcases m with (rfl | m) · rw [← factorThru_arrow _ _ (inf_arrow_factors_left _ _)] exact factors_comp_arrow _ · rw [← factorThru_arrow _ _ (inf_arrow_factors_right _ _)] apply factors_of_factors_right exact ih _ m theorem inf_eq_map_pullback' {A : C} (f₁ : MonoOver A) (f₂ : Subobject A) : (Subobject.inf.obj (Quotient.mk'' f₁)).obj f₂ = (Subobject.map f₁.arrow).obj ((Subobject.pullback f₁.arrow).obj f₂) := by induction' f₂ using Quotient.inductionOn' with f₂ rfl theorem inf_eq_map_pullback {A : C} (f₁ : MonoOver A) (f₂ : Subobject A) : (Quotient.mk'' f₁ ⊓ f₂ : Subobject A) = (map f₁.arrow).obj ((pullback f₁.arrow).obj f₂) := inf_eq_map_pullback' f₁ f₂ theorem prod_eq_inf {A : C} {f₁ f₂ : Subobject A} [HasBinaryProduct f₁ f₂] : (f₁ ⨯ f₂) = f₁ ⊓ f₂ := by apply le_antisymm · refine le_inf _ _ _ (Limits.prod.fst.le) (Limits.prod.snd.le) · apply leOfHom exact prod.lift (inf_le_left _ _).hom (inf_le_right _ _).hom theorem inf_def {B : C} (m m' : Subobject B) : m ⊓ m' = (inf.obj m).obj m' := rfl /-- `⊓` commutes with pullback. -/ theorem inf_pullback {X Y : C} (g : X ⟶ Y) (f₁ f₂) : (pullback g).obj (f₁ ⊓ f₂) = (pullback g).obj f₁ ⊓ (pullback g).obj f₂ := by revert f₁ apply Quotient.ind' intro f₁ erw [inf_def, inf_def, inf_eq_map_pullback', inf_eq_map_pullback', ← pullback_comp, ← map_pullback pullback.condition (pullbackIsPullback f₁.arrow g), ← pullback_comp, pullback.condition] rfl /-- `⊓` commutes with map. -/ theorem inf_map {X Y : C} (g : Y ⟶ X) [Mono g] (f₁ f₂) : (map g).obj (f₁ ⊓ f₂) = (map g).obj f₁ ⊓ (map g).obj f₂ := by revert f₁ apply Quotient.ind' intro f₁ erw [inf_def, inf_def, inf_eq_map_pullback', inf_eq_map_pullback', ← map_comp] dsimp rw [pullback_comp, pullback_map_self] end SemilatticeInfTop section SemilatticeSup variable [HasImages C] [HasBinaryCoproducts C] /-- The functorial supremum on `MonoOver A` descends to a supremum on `Subobject A`. -/ def sup {A : C} : Subobject A ⥤ Subobject A ⥤ Subobject A := ThinSkeleton.map₂ MonoOver.sup instance semilatticeSup {B : C} : SemilatticeSup (Subobject B) where sup := fun m n => (sup.obj m).obj n le_sup_left := fun m n => Quotient.inductionOn₂' m n fun _ _ => ⟨MonoOver.leSupLeft _ _⟩ le_sup_right := fun m n => Quotient.inductionOn₂' m n fun _ _ => ⟨MonoOver.leSupRight _ _⟩ sup_le := fun m n k => Quotient.inductionOn₃' m n k fun _ _ _ ⟨i⟩ ⟨j⟩ => ⟨MonoOver.supLe _ _ _ i j⟩ theorem sup_factors_of_factors_left {A B : C} {X Y : Subobject B} {f : A ⟶ B} (P : X.Factors f) : (X ⊔ Y).Factors f := factors_of_le f le_sup_left P theorem sup_factors_of_factors_right {A B : C} {X Y : Subobject B} {f : A ⟶ B} (P : Y.Factors f) : (X ⊔ Y).Factors f := factors_of_le f le_sup_right P variable [HasInitial C] [InitialMonoClass C] theorem finset_sup_factors {I : Type*} {A B : C} {s : Finset I} {P : I → Subobject B} {f : A ⟶ B} (h : ∃ i ∈ s, (P i).Factors f) : (s.sup P).Factors f := by classical revert h induction' s using Finset.induction_on with _ _ _ ih · rintro ⟨_, ⟨⟨⟩, _⟩⟩ · rintro ⟨j, ⟨m, h⟩⟩ simp only [Finset.sup_insert] simp at m rcases m with (rfl | m) · exact sup_factors_of_factors_left h · exact sup_factors_of_factors_right (ih ⟨j, ⟨m, h⟩⟩) end SemilatticeSup section Lattice instance boundedOrder [HasInitial C] [InitialMonoClass C] {B : C} : BoundedOrder (Subobject B) := { Subobject.orderTop, Subobject.orderBot with } variable [HasPullbacks C] [HasImages C] [HasBinaryCoproducts C] instance {B : C} : Lattice (Subobject B) := { Subobject.semilatticeInf, Subobject.semilatticeSup with } end Lattice section Inf variable [WellPowered C] /-- The "wide cospan" diagram, with a small indexing type, constructed from a set of subobjects. (This is just the diagram of all the subobjects pasted together, but using `WellPowered C` to make the diagram small.) -/ def wideCospan {A : C} (s : Set (Subobject A)) : WidePullbackShape (equivShrink _ '' s) ⥤ C := WidePullbackShape.wideCospan A (fun j : equivShrink _ '' s => ((equivShrink (Subobject A)).symm j : C)) fun j => ((equivShrink (Subobject A)).symm j).arrow @[simp] theorem wideCospan_map_term {A : C} (s : Set (Subobject A)) (j) : (wideCospan s).map (WidePullbackShape.Hom.term j) = ((equivShrink (Subobject A)).symm j).arrow := rfl /-- Auxiliary construction of a cone for `le_inf`. -/ def leInfCone {A : C} (s : Set (Subobject A)) (f : Subobject A) (k : ∀ g ∈ s, f ≤ g) : Cone (wideCospan s) := WidePullbackShape.mkCone f.arrow (fun j => underlying.map (homOfLE (k _ (by rcases j with ⟨-, ⟨g, ⟨m, rfl⟩⟩⟩ simpa using m)))) (by aesop_cat) @[simp] theorem leInfCone_π_app_none {A : C} (s : Set (Subobject A)) (f : Subobject A) (k : ∀ g ∈ s, f ≤ g) : (leInfCone s f k).π.app none = f.arrow := rfl variable [HasWidePullbacks.{v₁} C] /-- The limit of `wideCospan s`. (This will be the supremum of the set of subobjects.) -/ def widePullback {A : C} (s : Set (Subobject A)) : C := Limits.limit (wideCospan s) /-- The inclusion map from `widePullback s` to `A` -/ def widePullbackι {A : C} (s : Set (Subobject A)) : widePullback s ⟶ A := Limits.limit.π (wideCospan s) none instance widePullbackι_mono {A : C} (s : Set (Subobject A)) : Mono (widePullbackι s) := ⟨fun u v h => limit.hom_ext fun j => by cases j · exact h · apply (cancel_mono ((equivShrink (Subobject A)).symm _).arrow).1 rw [assoc, assoc] erw [limit.w (wideCospan s) (WidePullbackShape.Hom.term _)] exact h⟩ /-- When `[WellPowered C]` and `[HasWidePullbacks C]`, `Subobject A` has arbitrary infimums. -/ def sInf {A : C} (s : Set (Subobject A)) : Subobject A := Subobject.mk (widePullbackι s) theorem sInf_le {A : C} (s : Set (Subobject A)) (f) (hf : f ∈ s) : sInf s ≤ f := by fapply le_of_comm · exact (underlyingIso _).hom ≫ Limits.limit.π (wideCospan s) (some ⟨equivShrink (Subobject A) f, Set.mem_image_of_mem (equivShrink (Subobject A)) hf⟩) ≫ eqToHom (congr_arg (fun X : Subobject A => (X : C)) (Equiv.symm_apply_apply _ _)) · dsimp [sInf] simp only [Category.comp_id, Category.assoc, ← underlyingIso_hom_comp_eq_mk, Subobject.arrow_congr, congrArg_mpr_hom_left, Iso.cancel_iso_hom_left] convert limit.w (wideCospan s) (WidePullbackShape.Hom.term _) aesop_cat theorem le_sInf {A : C} (s : Set (Subobject A)) (f : Subobject A) (k : ∀ g ∈ s, f ≤ g) : f ≤ sInf s := by fapply le_of_comm · exact Limits.limit.lift _ (leInfCone s f k) ≫ (underlyingIso _).inv · dsimp [sInf] rw [assoc, underlyingIso_arrow, widePullbackι, limit.lift_π, leInfCone_π_app_none] instance completeSemilatticeInf {B : C} : CompleteSemilatticeInf (Subobject B) where sInf := sInf sInf_le := sInf_le le_sInf := le_sInf end Inf section Sup variable [WellPowered C] [HasCoproducts.{v₁} C] /-- The universal morphism out of the coproduct of a set of subobjects, after using `[WellPowered C]` to reindex by a small type. -/ def smallCoproductDesc {A : C} (s : Set (Subobject A)) := Limits.Sigma.desc fun j : equivShrink _ '' s => ((equivShrink (Subobject A)).symm j).arrow variable [HasImages C] /-- When `[WellPowered C] [HasImages C] [HasCoproducts C]`, `Subobject A` has arbitrary supremums. -/ def sSup {A : C} (s : Set (Subobject A)) : Subobject A := Subobject.mk (image.ι (smallCoproductDesc s)) theorem le_sSup {A : C} (s : Set (Subobject A)) (f) (hf : f ∈ s) : f ≤ sSup s := by fapply le_of_comm · refine eqToHom ?_ ≫ Sigma.ι _ ⟨equivShrink (Subobject A) f, by simpa [Set.mem_image] using hf⟩ ≫ factorThruImage _ ≫ (underlyingIso _).inv exact (congr_arg (fun X : Subobject A => (X : C)) (Equiv.symm_apply_apply _ _).symm) · simp [sSup, smallCoproductDesc] theorem symm_apply_mem_iff_mem_image {α β : Type*} (e : α ≃ β) (s : Set α) (x : β) : e.symm x ∈ s ↔ x ∈ e '' s := ⟨fun h => ⟨e.symm x, h, by simp⟩, by rintro ⟨a, m, rfl⟩ simpa using m⟩ theorem sSup_le {A : C} (s : Set (Subobject A)) (f : Subobject A) (k : ∀ g ∈ s, g ≤ f) : sSup s ≤ f := by fapply le_of_comm · refine(underlyingIso _).hom ≫ image.lift ⟨_, f.arrow, ?_, ?_⟩ · refine Sigma.desc ?_ rintro ⟨g, m⟩ refine underlying.map (homOfLE (k _ ?_)) simpa using m · ext dsimp [smallCoproductDesc] simp · dsimp [sSup] rw [assoc, image.lift_fac, underlyingIso_hom_comp_eq_mk] instance completeSemilatticeSup {B : C} : CompleteSemilatticeSup (Subobject B) where sSup := sSup le_sSup := le_sSup sSup_le := sSup_le end Sup section CompleteLattice variable [WellPowered C] [HasWidePullbacks.{v₁} C] [HasImages C] [HasCoproducts.{v₁} C] [InitialMonoClass C] attribute [local instance] has_smallest_coproducts_of_hasCoproducts instance {B : C} : CompleteLattice (Subobject B) := { Subobject.semilatticeInf, Subobject.semilatticeSup, Subobject.boundedOrder, Subobject.completeSemilatticeInf, Subobject.completeSemilatticeSup with } end CompleteLattice section ZeroObject variable [HasZeroMorphisms C] [HasZeroObject C] open ZeroObject /-- A nonzero object has nontrivial subobject lattice. -/ theorem nontrivial_of_not_isZero {X : C} (h : ¬IsZero X) : Nontrivial (Subobject X) := ⟨⟨mk (0 : 0 ⟶ X), mk (𝟙 X), fun w => h (IsZero.of_iso (isZero_zero C) (isoOfMkEqMk _ _ w).symm)⟩⟩ end ZeroObject section SubobjectSubobject /-- The subobject lattice of a subobject `Y` is order isomorphic to the interval `Set.Iic Y`. -/ def subobjectOrderIso {X : C} (Y : Subobject X) : Subobject (Y : C) ≃o Set.Iic Y where toFun Z := ⟨Subobject.mk (Z.arrow ≫ Y.arrow), Set.mem_Iic.mpr (le_of_comm ((underlyingIso _).hom ≫ Z.arrow) (by simp))⟩ invFun Z := Subobject.mk (ofLE _ _ Z.2) left_inv Z := mk_eq_of_comm _ (underlyingIso _) (by aesop_cat) right_inv Z := Subtype.ext (mk_eq_of_comm _ (underlyingIso _) (by dsimp simp [← Iso.eq_inv_comp])) map_rel_iff' {W Z} := by dsimp constructor · intro h exact le_of_comm (((underlyingIso _).inv ≫ ofLE _ _ (Subtype.mk_le_mk.mp h) ≫ (underlyingIso _).hom)) (by aesop_cat) · intro h exact Subtype.mk_le_mk.mpr (le_of_comm ((underlyingIso _).hom ≫ ofLE _ _ h ≫ (underlyingIso _).inv) (by simp)) end SubobjectSubobject end Subobject end CategoryTheory
CategoryTheory\Subobject\Limits.lean
/- Copyright (c) 2020 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Scott Morrison -/ import Mathlib.CategoryTheory.Subobject.Lattice /-! # Specific subobjects We define `equalizerSubobject`, `kernelSubobject` and `imageSubobject`, which are the subobjects represented by the equalizer, kernel and image of (a pair of) morphism(s) and provide conditions for `P.factors f`, where `P` is one of these special subobjects. TODO: Add conditions for when `P` is a pullback subobject. TODO: an iff characterisation of `(imageSubobject f).Factors h` -/ universe v u noncomputable section open CategoryTheory CategoryTheory.Category CategoryTheory.Limits CategoryTheory.Subobject Opposite variable {C : Type u} [Category.{v} C] {X Y Z : C} namespace CategoryTheory namespace Limits section Equalizer variable (f g : X ⟶ Y) [HasEqualizer f g] /-- The equalizer of morphisms `f g : X ⟶ Y` as a `Subobject X`. -/ abbrev equalizerSubobject : Subobject X := Subobject.mk (equalizer.ι f g) /-- The underlying object of `equalizerSubobject f g` is (up to isomorphism!) the same as the chosen object `equalizer f g`. -/ def equalizerSubobjectIso : (equalizerSubobject f g : C) ≅ equalizer f g := Subobject.underlyingIso (equalizer.ι f g) @[reassoc (attr := simp)] theorem equalizerSubobject_arrow : (equalizerSubobjectIso f g).hom ≫ equalizer.ι f g = (equalizerSubobject f g).arrow := by simp [equalizerSubobjectIso] @[reassoc (attr := simp)] theorem equalizerSubobject_arrow' : (equalizerSubobjectIso f g).inv ≫ (equalizerSubobject f g).arrow = equalizer.ι f g := by simp [equalizerSubobjectIso] @[reassoc] theorem equalizerSubobject_arrow_comp : (equalizerSubobject f g).arrow ≫ f = (equalizerSubobject f g).arrow ≫ g := by rw [← equalizerSubobject_arrow, Category.assoc, Category.assoc, equalizer.condition] theorem equalizerSubobject_factors {W : C} (h : W ⟶ X) (w : h ≫ f = h ≫ g) : (equalizerSubobject f g).Factors h := ⟨equalizer.lift h w, by simp⟩ theorem equalizerSubobject_factors_iff {W : C} (h : W ⟶ X) : (equalizerSubobject f g).Factors h ↔ h ≫ f = h ≫ g := ⟨fun w => by rw [← Subobject.factorThru_arrow _ _ w, Category.assoc, equalizerSubobject_arrow_comp, Category.assoc], equalizerSubobject_factors f g h⟩ end Equalizer section Kernel variable [HasZeroMorphisms C] (f : X ⟶ Y) [HasKernel f] /-- The kernel of a morphism `f : X ⟶ Y` as a `Subobject X`. -/ abbrev kernelSubobject : Subobject X := Subobject.mk (kernel.ι f) /-- The underlying object of `kernelSubobject f` is (up to isomorphism!) the same as the chosen object `kernel f`. -/ def kernelSubobjectIso : (kernelSubobject f : C) ≅ kernel f := Subobject.underlyingIso (kernel.ι f) @[reassoc (attr := simp), elementwise (attr := simp)] theorem kernelSubobject_arrow : (kernelSubobjectIso f).hom ≫ kernel.ι f = (kernelSubobject f).arrow := by simp [kernelSubobjectIso] @[reassoc (attr := simp), elementwise (attr := simp)] theorem kernelSubobject_arrow' : (kernelSubobjectIso f).inv ≫ (kernelSubobject f).arrow = kernel.ι f := by simp [kernelSubobjectIso] @[reassoc (attr := simp), elementwise (attr := simp)] theorem kernelSubobject_arrow_comp : (kernelSubobject f).arrow ≫ f = 0 := by rw [← kernelSubobject_arrow] simp only [Category.assoc, kernel.condition, comp_zero] theorem kernelSubobject_factors {W : C} (h : W ⟶ X) (w : h ≫ f = 0) : (kernelSubobject f).Factors h := ⟨kernel.lift _ h w, by simp⟩ theorem kernelSubobject_factors_iff {W : C} (h : W ⟶ X) : (kernelSubobject f).Factors h ↔ h ≫ f = 0 := ⟨fun w => by rw [← Subobject.factorThru_arrow _ _ w, Category.assoc, kernelSubobject_arrow_comp, comp_zero], kernelSubobject_factors f h⟩ /-- A factorisation of `h : W ⟶ X` through `kernelSubobject f`, assuming `h ≫ f = 0`. -/ def factorThruKernelSubobject {W : C} (h : W ⟶ X) (w : h ≫ f = 0) : W ⟶ kernelSubobject f := (kernelSubobject f).factorThru h (kernelSubobject_factors f h w) @[simp] theorem factorThruKernelSubobject_comp_arrow {W : C} (h : W ⟶ X) (w : h ≫ f = 0) : factorThruKernelSubobject f h w ≫ (kernelSubobject f).arrow = h := by dsimp [factorThruKernelSubobject] simp @[simp] theorem factorThruKernelSubobject_comp_kernelSubobjectIso {W : C} (h : W ⟶ X) (w : h ≫ f = 0) : factorThruKernelSubobject f h w ≫ (kernelSubobjectIso f).hom = kernel.lift f h w := (cancel_mono (kernel.ι f)).1 <| by simp section variable {f} {X' Y' : C} {f' : X' ⟶ Y'} [HasKernel f'] /-- A commuting square induces a morphism between the kernel subobjects. -/ def kernelSubobjectMap (sq : Arrow.mk f ⟶ Arrow.mk f') : (kernelSubobject f : C) ⟶ (kernelSubobject f' : C) := Subobject.factorThru _ ((kernelSubobject f).arrow ≫ sq.left) (kernelSubobject_factors _ _ (by simp [sq.w])) @[reassoc (attr := simp), elementwise (attr := simp)] theorem kernelSubobjectMap_arrow (sq : Arrow.mk f ⟶ Arrow.mk f') : kernelSubobjectMap sq ≫ (kernelSubobject f').arrow = (kernelSubobject f).arrow ≫ sq.left := by simp [kernelSubobjectMap] @[simp] theorem kernelSubobjectMap_id : kernelSubobjectMap (𝟙 (Arrow.mk f)) = 𝟙 _ := by aesop_cat @[simp] theorem kernelSubobjectMap_comp {X'' Y'' : C} {f'' : X'' ⟶ Y''} [HasKernel f''] (sq : Arrow.mk f ⟶ Arrow.mk f') (sq' : Arrow.mk f' ⟶ Arrow.mk f'') : kernelSubobjectMap (sq ≫ sq') = kernelSubobjectMap sq ≫ kernelSubobjectMap sq' := by aesop_cat @[reassoc] theorem kernel_map_comp_kernelSubobjectIso_inv (sq : Arrow.mk f ⟶ Arrow.mk f') : kernel.map f f' sq.1 sq.2 sq.3.symm ≫ (kernelSubobjectIso _).inv = (kernelSubobjectIso _).inv ≫ kernelSubobjectMap sq := by aesop_cat @[reassoc] theorem kernelSubobjectIso_comp_kernel_map (sq : Arrow.mk f ⟶ Arrow.mk f') : (kernelSubobjectIso _).hom ≫ kernel.map f f' sq.1 sq.2 sq.3.symm = kernelSubobjectMap sq ≫ (kernelSubobjectIso _).hom := by simp [← Iso.comp_inv_eq, kernel_map_comp_kernelSubobjectIso_inv] end @[simp] theorem kernelSubobject_zero {A B : C} : kernelSubobject (0 : A ⟶ B) = ⊤ := (isIso_iff_mk_eq_top _).mp (by infer_instance) instance isIso_kernelSubobject_zero_arrow : IsIso (kernelSubobject (0 : X ⟶ Y)).arrow := (isIso_arrow_iff_eq_top _).mpr kernelSubobject_zero theorem le_kernelSubobject (A : Subobject X) (h : A.arrow ≫ f = 0) : A ≤ kernelSubobject f := Subobject.le_mk_of_comm (kernel.lift f A.arrow h) (by simp) /-- The isomorphism between the kernel of `f ≫ g` and the kernel of `g`, when `f` is an isomorphism. -/ def kernelSubobjectIsoComp {X' : C} (f : X' ⟶ X) [IsIso f] (g : X ⟶ Y) [HasKernel g] : (kernelSubobject (f ≫ g) : C) ≅ (kernelSubobject g : C) := kernelSubobjectIso _ ≪≫ kernelIsIsoComp f g ≪≫ (kernelSubobjectIso _).symm @[simp] theorem kernelSubobjectIsoComp_hom_arrow {X' : C} (f : X' ⟶ X) [IsIso f] (g : X ⟶ Y) [HasKernel g] : (kernelSubobjectIsoComp f g).hom ≫ (kernelSubobject g).arrow = (kernelSubobject (f ≫ g)).arrow ≫ f := by simp [kernelSubobjectIsoComp] @[simp] theorem kernelSubobjectIsoComp_inv_arrow {X' : C} (f : X' ⟶ X) [IsIso f] (g : X ⟶ Y) [HasKernel g] : (kernelSubobjectIsoComp f g).inv ≫ (kernelSubobject (f ≫ g)).arrow = (kernelSubobject g).arrow ≫ inv f := by simp [kernelSubobjectIsoComp] /-- The kernel of `f` is always a smaller subobject than the kernel of `f ≫ h`. -/ theorem kernelSubobject_comp_le (f : X ⟶ Y) [HasKernel f] {Z : C} (h : Y ⟶ Z) [HasKernel (f ≫ h)] : kernelSubobject f ≤ kernelSubobject (f ≫ h) := le_kernelSubobject _ _ (by simp) /-- Postcomposing by a monomorphism does not change the kernel subobject. -/ @[simp] theorem kernelSubobject_comp_mono (f : X ⟶ Y) [HasKernel f] {Z : C} (h : Y ⟶ Z) [Mono h] : kernelSubobject (f ≫ h) = kernelSubobject f := le_antisymm (le_kernelSubobject _ _ ((cancel_mono h).mp (by simp))) (kernelSubobject_comp_le f h) instance kernelSubobject_comp_mono_isIso (f : X ⟶ Y) [HasKernel f] {Z : C} (h : Y ⟶ Z) [Mono h] : IsIso (Subobject.ofLE _ _ (kernelSubobject_comp_le f h)) := by rw [ofLE_mk_le_mk_of_comm (kernelCompMono f h).inv] · infer_instance · simp /-- Taking cokernels is an order-reversing map from the subobjects of `X` to the quotient objects of `X`. -/ @[simps] def cokernelOrderHom [HasCokernels C] (X : C) : Subobject X →o (Subobject (op X))ᵒᵈ where toFun := Subobject.lift (fun A f _ => Subobject.mk (cokernel.π f).op) (by rintro A B f g hf hg i rfl refine Subobject.mk_eq_mk_of_comm _ _ (Iso.op ?_) (Quiver.Hom.unop_inj ?_) · exact (IsColimit.coconePointUniqueUpToIso (colimit.isColimit _) (isCokernelEpiComp (colimit.isColimit _) i.hom rfl)).symm · simp only [Iso.comp_inv_eq, Iso.op_hom, Iso.symm_hom, unop_comp, Quiver.Hom.unop_op, colimit.comp_coconePointUniqueUpToIso_hom, Cofork.ofπ_ι_app, coequalizer.cofork_π]) monotone' := Subobject.ind₂ _ <| by intro A B f g hf hg h dsimp only [Subobject.lift_mk] refine Subobject.mk_le_mk_of_comm (cokernel.desc f (cokernel.π g) ?_).op ?_ · rw [← Subobject.ofMkLEMk_comp h, Category.assoc, cokernel.condition, comp_zero] · exact Quiver.Hom.unop_inj (cokernel.π_desc _ _ _) /-- Taking kernels is an order-reversing map from the quotient objects of `X` to the subobjects of `X`. -/ @[simps] def kernelOrderHom [HasKernels C] (X : C) : (Subobject (op X))ᵒᵈ →o Subobject X where toFun := Subobject.lift (fun A f _ => Subobject.mk (kernel.ι f.unop)) (by rintro A B f g hf hg i rfl refine Subobject.mk_eq_mk_of_comm _ _ ?_ ?_ · exact IsLimit.conePointUniqueUpToIso (limit.isLimit _) (isKernelCompMono (limit.isLimit (parallelPair g.unop 0)) i.unop.hom rfl) · dsimp simp only [← Iso.eq_inv_comp, limit.conePointUniqueUpToIso_inv_comp, Fork.ofι_π_app]) monotone' := Subobject.ind₂ _ <| by intro A B f g hf hg h dsimp only [Subobject.lift_mk] refine Subobject.mk_le_mk_of_comm (kernel.lift g.unop (kernel.ι f.unop) ?_) ?_ · rw [← Subobject.ofMkLEMk_comp h, unop_comp, kernel.condition_assoc, zero_comp] · exact Quiver.Hom.op_inj (by simp) end Kernel section Image variable (f : X ⟶ Y) [HasImage f] /-- The image of a morphism `f g : X ⟶ Y` as a `Subobject Y`. -/ abbrev imageSubobject : Subobject Y := Subobject.mk (image.ι f) /-- The underlying object of `imageSubobject f` is (up to isomorphism!) the same as the chosen object `image f`. -/ def imageSubobjectIso : (imageSubobject f : C) ≅ image f := Subobject.underlyingIso (image.ι f) @[reassoc (attr := simp)] theorem imageSubobject_arrow : (imageSubobjectIso f).hom ≫ image.ι f = (imageSubobject f).arrow := by simp [imageSubobjectIso] @[reassoc (attr := simp)] theorem imageSubobject_arrow' : (imageSubobjectIso f).inv ≫ (imageSubobject f).arrow = image.ι f := by simp [imageSubobjectIso] /-- A factorisation of `f : X ⟶ Y` through `imageSubobject f`. -/ def factorThruImageSubobject : X ⟶ imageSubobject f := factorThruImage f ≫ (imageSubobjectIso f).inv instance [HasEqualizers C] : Epi (factorThruImageSubobject f) := by dsimp [factorThruImageSubobject] apply epi_comp @[reassoc (attr := simp), elementwise (attr := simp)] theorem imageSubobject_arrow_comp : factorThruImageSubobject f ≫ (imageSubobject f).arrow = f := by simp [factorThruImageSubobject, imageSubobject_arrow] theorem imageSubobject_arrow_comp_eq_zero [HasZeroMorphisms C] {X Y Z : C} {f : X ⟶ Y} {g : Y ⟶ Z} [HasImage f] [Epi (factorThruImageSubobject f)] (h : f ≫ g = 0) : (imageSubobject f).arrow ≫ g = 0 := zero_of_epi_comp (factorThruImageSubobject f) <| by simp [h] theorem imageSubobject_factors_comp_self {W : C} (k : W ⟶ X) : (imageSubobject f).Factors (k ≫ f) := ⟨k ≫ factorThruImage f, by simp⟩ @[simp] theorem factorThruImageSubobject_comp_self {W : C} (k : W ⟶ X) (h) : (imageSubobject f).factorThru (k ≫ f) h = k ≫ factorThruImageSubobject f := by ext simp @[simp] theorem factorThruImageSubobject_comp_self_assoc {W W' : C} (k : W ⟶ W') (k' : W' ⟶ X) (h) : (imageSubobject f).factorThru (k ≫ k' ≫ f) h = k ≫ k' ≫ factorThruImageSubobject f := by ext simp /-- The image of `h ≫ f` is always a smaller subobject than the image of `f`. -/ theorem imageSubobject_comp_le {X' : C} (h : X' ⟶ X) (f : X ⟶ Y) [HasImage f] [HasImage (h ≫ f)] : imageSubobject (h ≫ f) ≤ imageSubobject f := Subobject.mk_le_mk_of_comm (image.preComp h f) (by simp) section open ZeroObject variable [HasZeroMorphisms C] [HasZeroObject C] @[simp] theorem imageSubobject_zero_arrow : (imageSubobject (0 : X ⟶ Y)).arrow = 0 := by rw [← imageSubobject_arrow] simp @[simp] theorem imageSubobject_zero {A B : C} : imageSubobject (0 : A ⟶ B) = ⊥ := Subobject.eq_of_comm (imageSubobjectIso _ ≪≫ imageZero ≪≫ Subobject.botCoeIsoZero.symm) (by simp) end section variable [HasEqualizers C] attribute [local instance] epi_comp /-- The morphism `imageSubobject (h ≫ f) ⟶ imageSubobject f` is an epimorphism when `h` is an epimorphism. In general this does not imply that `imageSubobject (h ≫ f) = imageSubobject f`, although it will when the ambient category is abelian. -/ instance imageSubobject_comp_le_epi_of_epi {X' : C} (h : X' ⟶ X) [Epi h] (f : X ⟶ Y) [HasImage f] [HasImage (h ≫ f)] : Epi (Subobject.ofLE _ _ (imageSubobject_comp_le h f)) := by rw [ofLE_mk_le_mk_of_comm (image.preComp h f)] · infer_instance · simp end section variable [HasEqualizers C] /-- Postcomposing by an isomorphism gives an isomorphism between image subobjects. -/ def imageSubobjectCompIso (f : X ⟶ Y) [HasImage f] {Y' : C} (h : Y ⟶ Y') [IsIso h] : (imageSubobject (f ≫ h) : C) ≅ (imageSubobject f : C) := imageSubobjectIso _ ≪≫ (image.compIso _ _).symm ≪≫ (imageSubobjectIso _).symm @[reassoc (attr := simp)] theorem imageSubobjectCompIso_hom_arrow (f : X ⟶ Y) [HasImage f] {Y' : C} (h : Y ⟶ Y') [IsIso h] : (imageSubobjectCompIso f h).hom ≫ (imageSubobject f).arrow = (imageSubobject (f ≫ h)).arrow ≫ inv h := by simp [imageSubobjectCompIso] @[reassoc (attr := simp)] theorem imageSubobjectCompIso_inv_arrow (f : X ⟶ Y) [HasImage f] {Y' : C} (h : Y ⟶ Y') [IsIso h] : (imageSubobjectCompIso f h).inv ≫ (imageSubobject (f ≫ h)).arrow = (imageSubobject f).arrow ≫ h := by simp [imageSubobjectCompIso] end theorem imageSubobject_mono (f : X ⟶ Y) [Mono f] : imageSubobject f = Subobject.mk f := eq_of_comm (imageSubobjectIso f ≪≫ imageMonoIsoSource f ≪≫ (underlyingIso f).symm) (by simp) /-- Precomposing by an isomorphism does not change the image subobject. -/ theorem imageSubobject_iso_comp [HasEqualizers C] {X' : C} (h : X' ⟶ X) [IsIso h] (f : X ⟶ Y) [HasImage f] : imageSubobject (h ≫ f) = imageSubobject f := le_antisymm (imageSubobject_comp_le h f) (Subobject.mk_le_mk_of_comm (inv (image.preComp h f)) (by simp)) theorem imageSubobject_le {A B : C} {X : Subobject B} (f : A ⟶ B) [HasImage f] (h : A ⟶ X) (w : h ≫ X.arrow = f) : imageSubobject f ≤ X := Subobject.le_of_comm ((imageSubobjectIso f).hom ≫ image.lift { I := (X : C) e := h m := X.arrow }) (by rw [assoc, image.lift_fac, imageSubobject_arrow]) theorem imageSubobject_le_mk {A B : C} {X : C} (g : X ⟶ B) [Mono g] (f : A ⟶ B) [HasImage f] (h : A ⟶ X) (w : h ≫ g = f) : imageSubobject f ≤ Subobject.mk g := imageSubobject_le f (h ≫ (Subobject.underlyingIso g).inv) (by simp [w]) /-- Given a commutative square between morphisms `f` and `g`, we have a morphism in the category from `imageSubobject f` to `imageSubobject g`. -/ def imageSubobjectMap {W X Y Z : C} {f : W ⟶ X} [HasImage f] {g : Y ⟶ Z} [HasImage g] (sq : Arrow.mk f ⟶ Arrow.mk g) [HasImageMap sq] : (imageSubobject f : C) ⟶ (imageSubobject g : C) := (imageSubobjectIso f).hom ≫ image.map sq ≫ (imageSubobjectIso g).inv @[reassoc (attr := simp)] theorem imageSubobjectMap_arrow {W X Y Z : C} {f : W ⟶ X} [HasImage f] {g : Y ⟶ Z} [HasImage g] (sq : Arrow.mk f ⟶ Arrow.mk g) [HasImageMap sq] : imageSubobjectMap sq ≫ (imageSubobject g).arrow = (imageSubobject f).arrow ≫ sq.right := by simp only [imageSubobjectMap, Category.assoc, imageSubobject_arrow'] erw [image.map_ι, ← Category.assoc, imageSubobject_arrow] theorem image_map_comp_imageSubobjectIso_inv {W X Y Z : C} {f : W ⟶ X} [HasImage f] {g : Y ⟶ Z} [HasImage g] (sq : Arrow.mk f ⟶ Arrow.mk g) [HasImageMap sq] : image.map sq ≫ (imageSubobjectIso _).inv = (imageSubobjectIso _).inv ≫ imageSubobjectMap sq := by ext simpa using image.map_ι sq theorem imageSubobjectIso_comp_image_map {W X Y Z : C} {f : W ⟶ X} [HasImage f] {g : Y ⟶ Z} [HasImage g] (sq : Arrow.mk f ⟶ Arrow.mk g) [HasImageMap sq] : (imageSubobjectIso _).hom ≫ image.map sq = imageSubobjectMap sq ≫ (imageSubobjectIso _).hom := by erw [← Iso.comp_inv_eq, Category.assoc, ← (imageSubobjectIso f).eq_inv_comp, image_map_comp_imageSubobjectIso_inv sq] end Image end Limits end CategoryTheory
CategoryTheory\Subobject\MonoOver.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Scott Morrison -/ import Mathlib.CategoryTheory.Adjunction.Over import Mathlib.CategoryTheory.Adjunction.Reflective import Mathlib.CategoryTheory.Adjunction.Restrict import Mathlib.CategoryTheory.Limits.Shapes.Images /-! # Monomorphisms over a fixed object As preparation for defining `Subobject X`, we set up the theory for `MonoOver X := { f : Over X // Mono f.hom}`. Here `MonoOver X` is a thin category (a pair of objects has at most one morphism between them), so we can think of it as a preorder. However as it is not skeletal, it is not yet a partial order. `Subobject X` will be defined as the skeletalization of `MonoOver X`. We provide * `def pullback [HasPullbacks C] (f : X ⟶ Y) : MonoOver Y ⥤ MonoOver X` * `def map (f : X ⟶ Y) [Mono f] : MonoOver X ⥤ MonoOver Y` * `def «exists» [HasImages C] (f : X ⟶ Y) : MonoOver X ⥤ MonoOver Y` and prove their basic properties and relationships. ## Notes This development originally appeared in Bhavik Mehta's "Topos theory for Lean" repository, and was ported to mathlib by Scott Morrison. -/ universe v₁ v₂ u₁ u₂ noncomputable section namespace CategoryTheory open CategoryTheory CategoryTheory.Category CategoryTheory.Limits variable {C : Type u₁} [Category.{v₁} C] {X Y Z : C} variable {D : Type u₂} [Category.{v₂} D] /-- The category of monomorphisms into `X` as a full subcategory of the over category. This isn't skeletal, so it's not a partial order. Later we define `Subobject X` as the quotient of this by isomorphisms. -/ def MonoOver (X : C) := FullSubcategory fun f : Over X => Mono f.hom instance (X : C) : Category (MonoOver X) := FullSubcategory.category _ namespace MonoOver /-- Construct a `MonoOver X`. -/ @[simps] def mk' {X A : C} (f : A ⟶ X) [hf : Mono f] : MonoOver X where obj := Over.mk f property := hf /-- The inclusion from monomorphisms over X to morphisms over X. -/ def forget (X : C) : MonoOver X ⥤ Over X := fullSubcategoryInclusion _ instance : CoeOut (MonoOver X) C where coe Y := Y.obj.left @[simp] theorem forget_obj_left {f} : ((forget X).obj f).left = (f : C) := rfl @[simp] theorem mk'_coe' {X A : C} (f : A ⟶ X) [Mono f] : (mk' f : C) = A := rfl /-- Convenience notation for the underlying arrow of a monomorphism over X. -/ abbrev arrow (f : MonoOver X) : (f : C) ⟶ X := ((forget X).obj f).hom @[simp] theorem mk'_arrow {X A : C} (f : A ⟶ X) [Mono f] : (mk' f).arrow = f := rfl @[simp] theorem forget_obj_hom {f} : ((forget X).obj f).hom = f.arrow := rfl /-- The forget functor `MonoOver X ⥤ Over X` is fully faithful. -/ def fullyFaithfulForget (X : C) : (forget X).FullyFaithful := fullyFaithfulFullSubcategoryInclusion _ instance : (forget X).Full := FullSubcategory.full _ instance : (forget X).Faithful := FullSubcategory.faithful _ instance mono (f : MonoOver X) : Mono f.arrow := f.property /-- The category of monomorphisms over X is a thin category, which makes defining its skeleton easy. -/ instance isThin {X : C} : Quiver.IsThin (MonoOver X) := fun f g => ⟨by intro h₁ h₂ apply Over.OverMorphism.ext erw [← cancel_mono g.arrow, Over.w h₁, Over.w h₂]⟩ @[reassoc] theorem w {f g : MonoOver X} (k : f ⟶ g) : k.left ≫ g.arrow = f.arrow := Over.w _ /-- Convenience constructor for a morphism in monomorphisms over `X`. -/ abbrev homMk {f g : MonoOver X} (h : f.obj.left ⟶ g.obj.left) (w : h ≫ g.arrow = f.arrow := by aesop_cat) : f ⟶ g := Over.homMk h w /-- Convenience constructor for an isomorphism in monomorphisms over `X`. -/ @[simps] def isoMk {f g : MonoOver X} (h : f.obj.left ≅ g.obj.left) (w : h.hom ≫ g.arrow = f.arrow := by aesop_cat) : f ≅ g where hom := homMk h.hom w inv := homMk h.inv (by rw [h.inv_comp_eq, w]) /-- If `f : MonoOver X`, then `mk' f.arrow` is of course just `f`, but not definitionally, so we package it as an isomorphism. -/ @[simp] def mk'ArrowIso {X : C} (f : MonoOver X) : mk' f.arrow ≅ f := isoMk (Iso.refl _) /-- Lift a functor between over categories to a functor between `MonoOver` categories, given suitable evidence that morphisms are taken to monomorphisms. -/ @[simps] def lift {Y : D} (F : Over Y ⥤ Over X) (h : ∀ f : MonoOver Y, Mono (F.obj ((MonoOver.forget Y).obj f)).hom) : MonoOver Y ⥤ MonoOver X where obj f := ⟨_, h f⟩ map k := (MonoOver.forget Y ⋙ F).map k /-- Isomorphic functors `Over Y ⥤ Over X` lift to isomorphic functors `MonoOver Y ⥤ MonoOver X`. -/ def liftIso {Y : D} {F₁ F₂ : Over Y ⥤ Over X} (h₁ h₂) (i : F₁ ≅ F₂) : lift F₁ h₁ ≅ lift F₂ h₂ := Functor.fullyFaithfulCancelRight (MonoOver.forget X) (isoWhiskerLeft (MonoOver.forget Y) i) /-- `MonoOver.lift` commutes with composition of functors. -/ def liftComp {X Z : C} {Y : D} (F : Over X ⥤ Over Y) (G : Over Y ⥤ Over Z) (h₁ h₂) : lift F h₁ ⋙ lift G h₂ ≅ lift (F ⋙ G) fun f => h₂ ⟨_, h₁ f⟩ := Functor.fullyFaithfulCancelRight (MonoOver.forget _) (Iso.refl _) /-- `MonoOver.lift` preserves the identity functor. -/ def liftId : (lift (𝟭 (Over X)) fun f => f.2) ≅ 𝟭 _ := Functor.fullyFaithfulCancelRight (MonoOver.forget _) (Iso.refl _) @[simp] theorem lift_comm (F : Over Y ⥤ Over X) (h : ∀ f : MonoOver Y, Mono (F.obj ((MonoOver.forget Y).obj f)).hom) : lift F h ⋙ MonoOver.forget X = MonoOver.forget Y ⋙ F := rfl @[simp] theorem lift_obj_arrow {Y : D} (F : Over Y ⥤ Over X) (h : ∀ f : MonoOver Y, Mono (F.obj ((MonoOver.forget Y).obj f)).hom) (f : MonoOver Y) : ((lift F h).obj f).arrow = (F.obj ((forget Y).obj f)).hom := rfl /-- Monomorphisms over an object `f : Over A` in an over category are equivalent to monomorphisms over the source of `f`. -/ def slice {A : C} {f : Over A} (h₁ : ∀ (g : MonoOver f), Mono ((Over.iteratedSliceEquiv f).functor.obj ((forget f).obj g)).hom) (h₂ : ∀ (g : MonoOver f.left), Mono ((Over.iteratedSliceEquiv f).inverse.obj ((forget f.left).obj g)).hom) : MonoOver f ≌ MonoOver f.left where functor := MonoOver.lift f.iteratedSliceEquiv.functor h₁ inverse := MonoOver.lift f.iteratedSliceEquiv.inverse h₂ unitIso := MonoOver.liftId.symm ≪≫ MonoOver.liftIso _ _ f.iteratedSliceEquiv.unitIso ≪≫ (MonoOver.liftComp _ _ _ _).symm counitIso := MonoOver.liftComp _ _ _ _ ≪≫ MonoOver.liftIso _ _ f.iteratedSliceEquiv.counitIso ≪≫ MonoOver.liftId section Pullback variable [HasPullbacks C] /-- When `C` has pullbacks, a morphism `f : X ⟶ Y` induces a functor `MonoOver Y ⥤ MonoOver X`, by pulling back a monomorphism along `f`. -/ def pullback (f : X ⟶ Y) : MonoOver Y ⥤ MonoOver X := MonoOver.lift (Over.pullback f) (fun g => by haveI : Mono ((forget Y).obj g).hom := (inferInstance : Mono g.arrow) apply pullback.snd_of_mono) /-- pullback commutes with composition (up to a natural isomorphism) -/ def pullbackComp (f : X ⟶ Y) (g : Y ⟶ Z) : pullback (f ≫ g) ≅ pullback g ⋙ pullback f := liftIso _ _ (Over.pullbackComp _ _) ≪≫ (liftComp _ _ _ _).symm /-- pullback preserves the identity (up to a natural isomorphism) -/ def pullbackId : pullback (𝟙 X) ≅ 𝟭 _ := liftIso _ _ Over.pullbackId ≪≫ liftId @[simp] theorem pullback_obj_left (f : X ⟶ Y) (g : MonoOver Y) : ((pullback f).obj g : C) = Limits.pullback g.arrow f := rfl @[simp] theorem pullback_obj_arrow (f : X ⟶ Y) (g : MonoOver Y) : ((pullback f).obj g).arrow = pullback.snd _ _ := rfl end Pullback section Map attribute [instance] mono_comp /-- We can map monomorphisms over `X` to monomorphisms over `Y` by post-composition with a monomorphism `f : X ⟶ Y`. -/ def map (f : X ⟶ Y) [Mono f] : MonoOver X ⥤ MonoOver Y := lift (Over.map f) fun g => by apply mono_comp g.arrow f /-- `MonoOver.map` commutes with composition (up to a natural isomorphism). -/ def mapComp (f : X ⟶ Y) (g : Y ⟶ Z) [Mono f] [Mono g] : map (f ≫ g) ≅ map f ⋙ map g := liftIso _ _ (Over.mapComp _ _) ≪≫ (liftComp _ _ _ _).symm variable (X) /-- `MonoOver.map` preserves the identity (up to a natural isomorphism). -/ def mapId : map (𝟙 X) ≅ 𝟭 _ := liftIso _ _ (Over.mapId X) ≪≫ liftId variable {X} @[simp] theorem map_obj_left (f : X ⟶ Y) [Mono f] (g : MonoOver X) : ((map f).obj g : C) = g.obj.left := rfl @[simp] theorem map_obj_arrow (f : X ⟶ Y) [Mono f] (g : MonoOver X) : ((map f).obj g).arrow = g.arrow ≫ f := rfl instance full_map (f : X ⟶ Y) [Mono f] : Functor.Full (map f) where map_surjective {g h} e := by refine ⟨homMk e.left ?_, rfl⟩ · rw [← cancel_mono f, assoc] apply w e instance faithful_map (f : X ⟶ Y) [Mono f] : Functor.Faithful (map f) where /-- Isomorphic objects have equivalent `MonoOver` categories. -/ @[simps] def mapIso {A B : C} (e : A ≅ B) : MonoOver A ≌ MonoOver B where functor := map e.hom inverse := map e.inv unitIso := ((mapComp _ _).symm ≪≫ eqToIso (by simp) ≪≫ (mapId _)).symm counitIso := (mapComp _ _).symm ≪≫ eqToIso (by simp) ≪≫ (mapId _) section variable (X) /-- An equivalence of categories `e` between `C` and `D` induces an equivalence between `MonoOver X` and `MonoOver (e.functor.obj X)` whenever `X` is an object of `C`. -/ @[simps] def congr (e : C ≌ D) : MonoOver X ≌ MonoOver (e.functor.obj X) where functor := lift (Over.post e.functor) fun f => by dsimp infer_instance inverse := (lift (Over.post e.inverse) fun f => by dsimp infer_instance) ⋙ (mapIso (e.unitIso.symm.app X)).functor unitIso := NatIso.ofComponents fun Y => isoMk (e.unitIso.app Y) counitIso := NatIso.ofComponents fun Y => isoMk (e.counitIso.app Y) end section variable [HasPullbacks C] /-- `map f` is left adjoint to `pullback f` when `f` is a monomorphism -/ def mapPullbackAdj (f : X ⟶ Y) [Mono f] : map f ⊣ pullback f := (Over.mapPullbackAdj f).restrictFullyFaithful (fullyFaithfulForget X) (fullyFaithfulForget Y) (Iso.refl _) (Iso.refl _) /-- `MonoOver.map f` followed by `MonoOver.pullback f` is the identity. -/ def pullbackMapSelf (f : X ⟶ Y) [Mono f] : map f ⋙ pullback f ≅ 𝟭 _ := (asIso (MonoOver.mapPullbackAdj f).unit).symm end end Map section Image variable (f : X ⟶ Y) [HasImage f] /-- The `MonoOver Y` for the image inclusion for a morphism `f : X ⟶ Y`. -/ def imageMonoOver (f : X ⟶ Y) [HasImage f] : MonoOver Y := MonoOver.mk' (image.ι f) @[simp] theorem imageMonoOver_arrow (f : X ⟶ Y) [HasImage f] : (imageMonoOver f).arrow = image.ι f := rfl end Image section Image variable [HasImages C] /-- Taking the image of a morphism gives a functor `Over X ⥤ MonoOver X`. -/ @[simps] def image : Over X ⥤ MonoOver X where obj f := imageMonoOver f.hom map {f g} k := by apply (forget X).preimage _ apply Over.homMk _ _ · exact image.lift { I := Limits.image _ m := image.ι g.hom e := k.left ≫ factorThruImage g.hom } · apply image.lift_fac /-- `MonoOver.image : Over X ⥤ MonoOver X` is left adjoint to `MonoOver.forget : MonoOver X ⥤ Over X` -/ def imageForgetAdj : image ⊣ forget X := Adjunction.mkOfHomEquiv { homEquiv := fun f g => { toFun := fun k => by apply Over.homMk (factorThruImage f.hom ≫ k.left) _ change (factorThruImage f.hom ≫ k.left) ≫ _ = f.hom rw [assoc, Over.w k] apply image.fac invFun := fun k => by refine Over.homMk ?_ ?_ · exact image.lift { I := g.obj.left m := g.arrow e := k.left fac := Over.w k } · apply image.lift_fac left_inv := fun k => Subsingleton.elim _ _ right_inv := fun k => by ext1 change factorThruImage _ ≫ image.lift _ = _ rw [← cancel_mono g.arrow, assoc, image.lift_fac, image.fac f.hom] exact (Over.w k).symm } } instance : (forget X).IsRightAdjoint := ⟨_, ⟨imageForgetAdj⟩⟩ instance reflective : Reflective (forget X) where adj := imageForgetAdj /-- Forgetting that a monomorphism over `X` is a monomorphism, then taking its image, is the identity functor. -/ def forgetImage : forget X ⋙ image ≅ 𝟭 (MonoOver X) := asIso (Adjunction.counit imageForgetAdj) end Image section Exists variable [HasImages C] /-- In the case where `f` is not a monomorphism but `C` has images, we can still take the "forward map" under it, which agrees with `MonoOver.map f`. -/ def «exists» (f : X ⟶ Y) : MonoOver X ⥤ MonoOver Y := forget _ ⋙ Over.map f ⋙ image instance faithful_exists (f : X ⟶ Y) : Functor.Faithful («exists» f) where /-- When `f : X ⟶ Y` is a monomorphism, `exists f` agrees with `map f`. -/ def existsIsoMap (f : X ⟶ Y) [Mono f] : «exists» f ≅ map f := NatIso.ofComponents (by intro Z suffices (forget _).obj ((«exists» f).obj Z) ≅ (forget _).obj ((map f).obj Z) by apply (forget _).preimageIso this apply Over.isoMk _ _ · apply imageMonoIsoSource (Z.arrow ≫ f) · apply imageMonoIsoSource_hom_self) /-- `exists` is adjoint to `pullback` when images exist -/ def existsPullbackAdj (f : X ⟶ Y) [HasPullbacks C] : «exists» f ⊣ pullback f := ((Over.mapPullbackAdj f).comp imageForgetAdj).restrictFullyFaithful (fullyFaithfulForget X) (Functor.FullyFaithful.id _) (Iso.refl _) (Iso.refl _) end Exists end MonoOver end CategoryTheory
CategoryTheory\Subobject\Types.lean
/- Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.Subobject.WellPowered import Mathlib.CategoryTheory.Types import Mathlib.Data.Set.Subsingleton /-! # `Type u` is well-powered By building a categorical equivalence `MonoOver α ≌ Set α` for any `α : Type u`, we deduce that `Subobject α ≃o Set α` and that `Type u` is well-powered. One would hope that for a particular concrete category `C` (`AddCommGroup`, etc) it's viable to prove `[WellPowered C]` without explicitly aligning `Subobject X` with the "hand-rolled" definition of subobjects. This may be possible using Lawvere theories, but it remains to be seen whether this just pushes lumps around in the carpet. -/ universe u open CategoryTheory open CategoryTheory.Subobject theorem subtype_val_mono {α : Type u} (s : Set α) : Mono (↾(Subtype.val : s → α)) := (mono_iff_injective _).mpr Subtype.val_injective attribute [local instance] subtype_val_mono /-- The category of `MonoOver α`, for `α : Type u`, is equivalent to the partial order `Set α`. -/ @[simps] noncomputable def Types.monoOverEquivalenceSet (α : Type u) : MonoOver α ≌ Set α where functor := { obj := fun f => Set.range f.1.hom map := fun {f g} t => homOfLE (by rintro a ⟨x, rfl⟩ exact ⟨t.1 x, congr_fun t.w x⟩) } inverse := { obj := fun s => MonoOver.mk' (Subtype.val : s → α) map := fun {s t} b => MonoOver.homMk (fun w => ⟨w.1, Set.mem_of_mem_of_subset w.2 b.le⟩) } unitIso := NatIso.ofComponents fun f => MonoOver.isoMk (Equiv.ofInjective f.1.hom ((mono_iff_injective _).mp f.2)).toIso counitIso := NatIso.ofComponents fun s => eqToIso Subtype.range_val instance : WellPowered (Type u) := wellPowered_of_essentiallySmall_monoOver fun α => EssentiallySmall.mk' (Types.monoOverEquivalenceSet α) /-- For `α : Type u`, `Subobject α` is order isomorphic to `Set α`. -/ noncomputable def Types.subobjectEquivSet (α : Type u) : Subobject α ≃o Set α := (Types.monoOverEquivalenceSet α).thinSkeletonOrderIso
CategoryTheory\Subobject\WellPowered.lean
/- Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.Subobject.Basic import Mathlib.CategoryTheory.EssentiallySmall /-! # Well-powered categories A category `(C : Type u) [Category.{v} C]` is `[WellPowered C]` if for every `X : C`, we have `Small.{v} (Subobject X)`. (Note that in this situation `Subobject X : Type (max u v)`, so this is a nontrivial condition for large categories, but automatic for small categories.) This is equivalent to the category `MonoOver X` being `EssentiallySmall.{v}` for all `X : C`. When a category is well-powered, you can obtain nonconstructive witnesses as `Shrink (Subobject X) : Type v` and `equivShrink (Subobject X) : Subobject X ≃ Shrink (subobject X)`. -/ universe v u₁ u₂ namespace CategoryTheory variable (C : Type u₁) [Category.{v} C] /-- A category (with morphisms in `Type v`) is well-powered if `Subobject X` is `v`-small for every `X`. We show in `wellPowered_of_essentiallySmall_monoOver` and `essentiallySmall_monoOver` that this is the case if and only if `MonoOver X` is `v`-essentially small for every `X`. -/ class WellPowered : Prop where subobject_small : ∀ X : C, Small.{v} (Subobject X) := by infer_instance instance small_subobject [WellPowered C] (X : C) : Small.{v} (Subobject X) := WellPowered.subobject_small X instance (priority := 100) wellPowered_of_smallCategory (C : Type u₁) [SmallCategory C] : WellPowered C where variable {C} theorem essentiallySmall_monoOver_iff_small_subobject (X : C) : EssentiallySmall.{v} (MonoOver X) ↔ Small.{v} (Subobject X) := essentiallySmall_iff_of_thin theorem wellPowered_of_essentiallySmall_monoOver (h : ∀ X : C, EssentiallySmall.{v} (MonoOver X)) : WellPowered C := { subobject_small := fun X => (essentiallySmall_monoOver_iff_small_subobject X).mp (h X) } section variable [WellPowered C] instance essentiallySmall_monoOver (X : C) : EssentiallySmall.{v} (MonoOver X) := (essentiallySmall_monoOver_iff_small_subobject X).mpr (WellPowered.subobject_small X) end section Equivalence variable {D : Type u₂} [Category.{v} D] theorem wellPowered_of_equiv (e : C ≌ D) [WellPowered C] : WellPowered D := wellPowered_of_essentiallySmall_monoOver fun X => (essentiallySmall_congr (MonoOver.congr X e.symm)).2 <| by infer_instance /-- Being well-powered is preserved by equivalences, as long as the two categories involved have their morphisms in the same universe. -/ theorem wellPowered_congr (e : C ≌ D) : WellPowered C ↔ WellPowered D := ⟨fun _ => wellPowered_of_equiv e, fun _ => wellPowered_of_equiv e.symm⟩ end Equivalence end CategoryTheory
CategoryTheory\Sums\Associator.lean
/- Copyright (c) 2019 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.Sums.Basic /-! # Associator for binary disjoint union of categories. The associator functor `((C ⊕ D) ⊕ E) ⥤ (C ⊕ (D ⊕ E))` and its inverse form an equivalence. -/ universe v u open CategoryTheory open Sum namespace CategoryTheory.sum variable (C : Type u) [Category.{v} C] (D : Type u) [Category.{v} D] (E : Type u) [Category.{v} E] /-- The associator functor `(C ⊕ D) ⊕ E ⥤ C ⊕ (D ⊕ E)` for sums of categories. -/ def associator : (C ⊕ D) ⊕ E ⥤ C ⊕ (D ⊕ E) where obj X := match X with | inl (inl X) => inl X | inl (inr X) => inr (inl X) | inr X => inr (inr X) map {X Y} f := match X, Y, f with | inl (inl _), inl (inl _), f => f | inl (inr _), inl (inr _), f => f | inr _, inr _, f => f map_id := by rintro ((_|_)|_) <;> rfl map_comp := by rintro ((_|_)|_) ((_|_)|_) ((_|_)|_) f g <;> first | cases f | cases g | aesop_cat @[simp] theorem associator_obj_inl_inl (X) : (associator C D E).obj (inl (inl X)) = inl X := rfl @[simp] theorem associator_obj_inl_inr (X) : (associator C D E).obj (inl (inr X)) = inr (inl X) := rfl @[simp] theorem associator_obj_inr (X) : (associator C D E).obj (inr X) = inr (inr X) := rfl @[simp] theorem associator_map_inl_inl {X Y : C} (f : inl (inl X) ⟶ inl (inl Y)) : (associator C D E).map f = f := rfl @[simp] theorem associator_map_inl_inr {X Y : D} (f : inl (inr X) ⟶ inl (inr Y)) : (associator C D E).map f = f := rfl @[simp] theorem associator_map_inr {X Y : E} (f : inr X ⟶ inr Y) : (associator C D E).map f = f := rfl /-- The inverse associator functor `C ⊕ (D ⊕ E) ⥤ (C ⊕ D) ⊕ E` for sums of categories. -/ def inverseAssociator : C ⊕ (D ⊕ E) ⥤ (C ⊕ D) ⊕ E where obj X := match X with | inl X => inl (inl X) | inr (inl X) => inl (inr X) | inr (inr X) => inr X map {X Y} f := match X, Y, f with | inl _, inl _, f => f | inr (inl _), inr (inl _), f => f | inr (inr _), inr (inr _), f => f map_id := by rintro (_|(_|_)) <;> rfl map_comp := by rintro (_|(_|_)) (_|(_|_)) (_|(_|_)) f g <;> first | cases f | cases g | aesop_cat @[simp] theorem inverseAssociator_obj_inl (X) : (inverseAssociator C D E).obj (inl X) = inl (inl X) := rfl @[simp] theorem inverseAssociator_obj_inr_inl (X) : (inverseAssociator C D E).obj (inr (inl X)) = inl (inr X) := rfl @[simp] theorem inverseAssociator_obj_inr_inr (X) : (inverseAssociator C D E).obj (inr (inr X)) = inr X := rfl @[simp] theorem inverseAssociator_map_inl {X Y : C} (f : inl X ⟶ inl Y) : (inverseAssociator C D E).map f = f := rfl @[simp] theorem inverseAssociator_map_inr_inl {X Y : D} (f : inr (inl X) ⟶ inr (inl Y)) : (inverseAssociator C D E).map f = f := rfl @[simp] theorem inverseAssociator_map_inr_inr {X Y : E} (f : inr (inr X) ⟶ inr (inr Y)) : (inverseAssociator C D E).map f = f := rfl /-- The equivalence of categories expressing associativity of sums of categories. -/ def associativity : (C ⊕ D) ⊕ E ≌ C ⊕ (D ⊕ E) := Equivalence.mk (associator C D E) (inverseAssociator C D E) (NatIso.ofComponents (fun X => eqToIso (by rcases X with ((_|_)|_) <;> rfl)) -- Porting note: aesop_cat fails (by rintro ((_|_)|_) ((_|_)|_) f <;> first | cases f | aesop_cat)) (NatIso.ofComponents (fun X => eqToIso (by rcases X with (_|(_|_)) <;> rfl)) -- Porting note: aesop_cat fails (by rintro (_|(_|_)) (_|(_|_)) f <;> first | cases f | aesop_cat)) instance associatorIsEquivalence : (associator C D E).IsEquivalence := (by infer_instance : (associativity C D E).functor.IsEquivalence) instance inverseAssociatorIsEquivalence : (inverseAssociator C D E).IsEquivalence := (by infer_instance : (associativity C D E).inverse.IsEquivalence) -- TODO unitors? -- TODO pentagon natural transformation? ...satisfying? end CategoryTheory.sum
CategoryTheory\Sums\Basic.lean
/- Copyright (c) 2019 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.CategoryTheory.EqToHom /-! # Binary disjoint unions of categories We define the category instance on `C ⊕ D` when `C` and `D` are categories. We define: * `inl_` : the functor `C ⥤ C ⊕ D` * `inr_` : the functor `D ⥤ C ⊕ D` * `swap` : the functor `C ⊕ D ⥤ D ⊕ C` (and the fact this is an equivalence) We further define sums of functors and natural transformations, written `F.sum G` and `α.sum β`. -/ namespace CategoryTheory universe v₁ u₁ -- morphism levels before object levels. See note [category_theory universes]. open Sum section variable (C : Type u₁) [Category.{v₁} C] (D : Type u₁) [Category.{v₁} D] /- Porting note: `aesop_cat` not firing on `assoc` where autotac in Lean 3 did-/ /-- `sum C D` gives the direct sum of two categories. -/ instance sum : Category.{v₁} (C ⊕ D) where Hom X Y := match X, Y with | inl X, inl Y => X ⟶ Y | inl _, inr _ => PEmpty | inr _, inl _ => PEmpty | inr X, inr Y => X ⟶ Y id X := match X with | inl X => 𝟙 X | inr X => 𝟙 X comp {X Y Z} f g := match X, Y, Z, f, g with | inl X, inl Y, inl Z, f, g => f ≫ g | inr X, inr Y, inr Z, f, g => f ≫ g assoc {W X Y Z} f g h := match X, Y, Z, W with | inl X, inl Y, inl Z, inl W => Category.assoc f g h | inr X, inr Y, inr Z, inr W => Category.assoc f g h @[aesop norm -10 destruct (rule_sets := [CategoryTheory])] theorem hom_inl_inr_false {X : C} {Y : D} (f : Sum.inl X ⟶ Sum.inr Y) : False := by cases f @[aesop norm -10 destruct (rule_sets := [CategoryTheory])] theorem hom_inr_inl_false {X : C} {Y : D} (f : Sum.inr X ⟶ Sum.inl Y) : False := by cases f theorem sum_comp_inl {P Q R : C} (f : (inl P : C ⊕ D) ⟶ inl Q) (g : (inl Q : C ⊕ D) ⟶ inl R) : @CategoryStruct.comp _ _ P Q R (f : P ⟶ Q) (g : Q ⟶ R) = @CategoryStruct.comp _ _ (inl P) (inl Q) (inl R) (f : P ⟶ Q) (g : Q ⟶ R) := rfl theorem sum_comp_inr {P Q R : D} (f : (inr P : C ⊕ D) ⟶ inr Q) (g : (inr Q : C ⊕ D) ⟶ inr R) : @CategoryStruct.comp _ _ P Q R (f : P ⟶ Q) (g : Q ⟶ R) = @CategoryStruct.comp _ _ (inr P) (inr Q) (inr R) (f : P ⟶ Q) (g : Q ⟶ R) := rfl end namespace Sum variable (C : Type u₁) [Category.{v₁} C] (D : Type u₁) [Category.{v₁} D] -- Unfortunate naming here, suggestions welcome. /-- `inl_` is the functor `X ↦ inl X`. -/ @[simps] def inl_ : C ⥤ C ⊕ D where obj X := inl X map {X Y} f := f /-- `inr_` is the functor `X ↦ inr X`. -/ @[simps] def inr_ : D ⥤ C ⊕ D where obj X := inr X map {X Y} f := f /- Porting note: `aesop_cat` not firing on `map_comp` where autotac in Lean 3 did but `map_id` was ok. -/ /-- The functor exchanging two direct summand categories. -/ def swap : C ⊕ D ⥤ D ⊕ C where obj X := match X with | inl X => inr X | inr X => inl X map := @fun X Y f => match X, Y, f with | inl _, inl _, f => f | inr _, inr _, f => f map_comp := fun {X} {Y} {Z} _ _ => match X, Y, Z with | inl X, inl Y, inl Z => by rfl | inr X, inr Y, inr Z => by rfl @[simp] theorem swap_obj_inl (X : C) : (swap C D).obj (inl X) = inr X := rfl @[simp] theorem swap_obj_inr (X : D) : (swap C D).obj (inr X) = inl X := rfl @[simp] theorem swap_map_inl {X Y : C} {f : inl X ⟶ inl Y} : (swap C D).map f = f := rfl @[simp] theorem swap_map_inr {X Y : D} {f : inr X ⟶ inr Y} : (swap C D).map f = f := rfl namespace Swap /-- `swap` gives an equivalence between `C ⊕ D` and `D ⊕ C`. -/ def equivalence : C ⊕ D ≌ D ⊕ C := Equivalence.mk (swap C D) (swap D C) (NatIso.ofComponents (fun X => eqToIso (by cases X <;> rfl))) (NatIso.ofComponents (fun X => eqToIso (by cases X <;> rfl))) instance isEquivalence : (swap C D).IsEquivalence := (by infer_instance : (equivalence C D).functor.IsEquivalence) /-- The double swap on `C ⊕ D` is naturally isomorphic to the identity functor. -/ def symmetry : swap C D ⋙ swap D C ≅ 𝟭 (C ⊕ D) := (equivalence C D).unitIso.symm end Swap end Sum variable {A : Type u₁} [Category.{v₁} A] {B : Type u₁} [Category.{v₁} B] {C : Type u₁} [Category.{v₁} C] {D : Type u₁} [Category.{v₁} D] namespace Functor /-- The sum of two functors. -/ def sum (F : A ⥤ B) (G : C ⥤ D) : A ⊕ C ⥤ B ⊕ D where obj X := match X with | inl X => inl (F.obj X) | inr X => inr (G.obj X) map {X Y} f := match X, Y, f with | inl X, inl Y, f => F.map f | inr X, inr Y, f => G.map f map_id {X} := by cases X <;> (erw [Functor.map_id]; rfl) map_comp {X Y Z} f g := match X, Y, Z, f, g with | inl X, inl Y, inl Z, f, g => by erw [F.map_comp]; rfl | inr X, inr Y, inr Z, f, g => by erw [G.map_comp]; rfl /-- Similar to `sum`, but both functors land in the same category `C` -/ def sum' (F : A ⥤ C) (G : B ⥤ C) : A ⊕ B ⥤ C where obj X := match X with | inl X => F.obj X | inr X => G.obj X map {X Y} f := match X, Y, f with | inl _, inl _, f => F.map f | inr _, inr _, f => G.map f map_id {X} := by cases X <;> erw [Functor.map_id] map_comp {X Y Z} f g := match X, Y, Z, f, g with | inl _, inl _, inl _, f, g => by erw [F.map_comp] | inr _, inr _, inr _, f, g => by erw [G.map_comp] /-- The sum `F.sum' G` precomposed with the left inclusion functor is isomorphic to `F` -/ @[simps!] def inlCompSum' (F : A ⥤ C) (G : B ⥤ C) : Sum.inl_ A B ⋙ F.sum' G ≅ F := NatIso.ofComponents fun X => Iso.refl _ /-- The sum `F.sum' G` precomposed with the right inclusion functor is isomorphic to `G` -/ @[simps!] def inrCompSum' (F : A ⥤ C) (G : B ⥤ C) : Sum.inr_ A B ⋙ F.sum' G ≅ G := NatIso.ofComponents fun X => Iso.refl _ @[simp] theorem sum_obj_inl (F : A ⥤ B) (G : C ⥤ D) (a : A) : (F.sum G).obj (inl a) = inl (F.obj a) := rfl @[simp] theorem sum_obj_inr (F : A ⥤ B) (G : C ⥤ D) (c : C) : (F.sum G).obj (inr c) = inr (G.obj c) := rfl @[simp] theorem sum_map_inl (F : A ⥤ B) (G : C ⥤ D) {a a' : A} (f : inl a ⟶ inl a') : (F.sum G).map f = F.map f := rfl @[simp] theorem sum_map_inr (F : A ⥤ B) (G : C ⥤ D) {c c' : C} (f : inr c ⟶ inr c') : (F.sum G).map f = G.map f := rfl end Functor namespace NatTrans /-- The sum of two natural transformations. -/ def sum {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) : F.sum H ⟶ G.sum I where app X := match X with | inl X => α.app X | inr X => β.app X naturality X Y f := match X, Y, f with | inl X, inl Y, f => by erw [α.naturality]; rfl | inr X, inr Y, f => by erw [β.naturality]; rfl @[simp] theorem sum_app_inl {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) (a : A) : (sum α β).app (inl a) = α.app a := rfl @[simp] theorem sum_app_inr {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) (c : C) : (sum α β).app (inr c) = β.app c := rfl end NatTrans end CategoryTheory
CategoryTheory\Triangulated\Basic.lean
/- Copyright (c) 2021 Luke Kershaw. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Luke Kershaw -/ import Mathlib.CategoryTheory.Adjunction.Limits import Mathlib.CategoryTheory.Limits.Preserves.Shapes.Products import Mathlib.CategoryTheory.Limits.Shapes.Biproducts import Mathlib.CategoryTheory.Shift.Basic /-! # Triangles This file contains the definition of triangles in an additive category with an additive shift. It also defines morphisms between these triangles. TODO: generalise this to n-angles in n-angulated categories as in https://arxiv.org/abs/1006.4592 -/ noncomputable section open CategoryTheory Limits universe v v₀ v₁ v₂ u u₀ u₁ u₂ namespace CategoryTheory.Pretriangulated open CategoryTheory.Category /- We work in a category `C` equipped with a shift. -/ variable (C : Type u) [Category.{v} C] [HasShift C ℤ] /-- A triangle in `C` is a sextuple `(X,Y,Z,f,g,h)` where `X,Y,Z` are objects of `C`, and `f : X ⟶ Y`, `g : Y ⟶ Z`, `h : Z ⟶ X⟦1⟧` are morphisms in `C`. See <https://stacks.math.columbia.edu/tag/0144>. -/ structure Triangle where mk' :: /-- the first object of a triangle -/ obj₁ : C /-- the second object of a triangle -/ obj₂ : C /-- the third object of a triangle -/ obj₃ : C /-- the first morphism of a triangle -/ mor₁ : obj₁ ⟶ obj₂ /-- the second morphism of a triangle -/ mor₂ : obj₂ ⟶ obj₃ /-- the third morphism of a triangle -/ mor₃ : obj₃ ⟶ obj₁⟦(1 : ℤ)⟧ variable {C} /-- A triangle `(X,Y,Z,f,g,h)` in `C` is defined by the morphisms `f : X ⟶ Y`, `g : Y ⟶ Z` and `h : Z ⟶ X⟦1⟧`. -/ @[simps] def Triangle.mk {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1 : ℤ)⟧) : Triangle C where obj₁ := X obj₂ := Y obj₃ := Z mor₁ := f mor₂ := g mor₃ := h section variable [HasZeroObject C] [HasZeroMorphisms C] open ZeroObject instance : Inhabited (Triangle C) := ⟨⟨0, 0, 0, 0, 0, 0⟩⟩ /-- For each object in `C`, there is a triangle of the form `(X,X,0,𝟙 X,0,0)` -/ @[simps!] def contractibleTriangle (X : C) : Triangle C := Triangle.mk (𝟙 X) (0 : X ⟶ 0) 0 end /-- A morphism of triangles `(X,Y,Z,f,g,h) ⟶ (X',Y',Z',f',g',h')` in `C` is a triple of morphisms `a : X ⟶ X'`, `b : Y ⟶ Y'`, `c : Z ⟶ Z'` such that `a ≫ f' = f ≫ b`, `b ≫ g' = g ≫ c`, and `a⟦1⟧' ≫ h = h' ≫ c`. In other words, we have a commutative diagram: ``` f g h X ───> Y ───> Z ───> X⟦1⟧ │ │ │ │ │a │b │c │a⟦1⟧' V V V V X' ───> Y' ───> Z' ───> X'⟦1⟧ f' g' h' ``` See <https://stacks.math.columbia.edu/tag/0144>. -/ @[ext] structure TriangleMorphism (T₁ : Triangle C) (T₂ : Triangle C) where /-- the first morphism in a triangle morphism -/ hom₁ : T₁.obj₁ ⟶ T₂.obj₁ /-- the second morphism in a triangle morphism -/ hom₂ : T₁.obj₂ ⟶ T₂.obj₂ /-- the third morphism in a triangle morphism -/ hom₃ : T₁.obj₃ ⟶ T₂.obj₃ /-- the first commutative square of a triangle morphism -/ comm₁ : T₁.mor₁ ≫ hom₂ = hom₁ ≫ T₂.mor₁ := by aesop_cat /-- the second commutative square of a triangle morphism -/ comm₂ : T₁.mor₂ ≫ hom₃ = hom₂ ≫ T₂.mor₂ := by aesop_cat /-- the third commutative square of a triangle morphism -/ comm₃ : T₁.mor₃ ≫ hom₁⟦1⟧' = hom₃ ≫ T₂.mor₃ := by aesop_cat attribute [reassoc (attr := simp)] TriangleMorphism.comm₁ TriangleMorphism.comm₂ TriangleMorphism.comm₃ /-- The identity triangle morphism. -/ @[simps] def triangleMorphismId (T : Triangle C) : TriangleMorphism T T where hom₁ := 𝟙 T.obj₁ hom₂ := 𝟙 T.obj₂ hom₃ := 𝟙 T.obj₃ instance (T : Triangle C) : Inhabited (TriangleMorphism T T) := ⟨triangleMorphismId T⟩ variable {T₁ T₂ T₃ : Triangle C} /-- Composition of triangle morphisms gives a triangle morphism. -/ @[simps] def TriangleMorphism.comp (f : TriangleMorphism T₁ T₂) (g : TriangleMorphism T₂ T₃) : TriangleMorphism T₁ T₃ where hom₁ := f.hom₁ ≫ g.hom₁ hom₂ := f.hom₂ ≫ g.hom₂ hom₃ := f.hom₃ ≫ g.hom₃ /-- Triangles with triangle morphisms form a category. -/ @[simps] instance triangleCategory : Category (Triangle C) where Hom A B := TriangleMorphism A B id A := triangleMorphismId A comp f g := f.comp g @[ext] lemma Triangle.hom_ext {A B : Triangle C} (f g : A ⟶ B) (h₁ : f.hom₁ = g.hom₁) (h₂ : f.hom₂ = g.hom₂) (h₃ : f.hom₃ = g.hom₃) : f = g := TriangleMorphism.ext h₁ h₂ h₃ @[simp] lemma id_hom₁ (A : Triangle C) : TriangleMorphism.hom₁ (𝟙 A) = 𝟙 _ := rfl @[simp] lemma id_hom₂ (A : Triangle C) : TriangleMorphism.hom₂ (𝟙 A) = 𝟙 _ := rfl @[simp] lemma id_hom₃ (A : Triangle C) : TriangleMorphism.hom₃ (𝟙 A) = 𝟙 _ := rfl @[simp, reassoc] lemma comp_hom₁ {X Y Z : Triangle C} (f : X ⟶ Y) (g : Y ⟶ Z) : (f ≫ g).hom₁ = f.hom₁ ≫ g.hom₁ := rfl @[simp, reassoc] lemma comp_hom₂ {X Y Z : Triangle C} (f : X ⟶ Y) (g : Y ⟶ Z) : (f ≫ g).hom₂ = f.hom₂ ≫ g.hom₂ := rfl @[simp, reassoc] lemma comp_hom₃ {X Y Z : Triangle C} (f : X ⟶ Y) (g : Y ⟶ Z) : (f ≫ g).hom₃ = f.hom₃ ≫ g.hom₃ := rfl @[simps] def Triangle.homMk (A B : Triangle C) (hom₁ : A.obj₁ ⟶ B.obj₁) (hom₂ : A.obj₂ ⟶ B.obj₂) (hom₃ : A.obj₃ ⟶ B.obj₃) (comm₁ : A.mor₁ ≫ hom₂ = hom₁ ≫ B.mor₁ := by aesop_cat) (comm₂ : A.mor₂ ≫ hom₃ = hom₂ ≫ B.mor₂ := by aesop_cat) (comm₃ : A.mor₃ ≫ hom₁⟦1⟧' = hom₃ ≫ B.mor₃ := by aesop_cat) : A ⟶ B where hom₁ := hom₁ hom₂ := hom₂ hom₃ := hom₃ comm₁ := comm₁ comm₂ := comm₂ comm₃ := comm₃ @[simps] def Triangle.isoMk (A B : Triangle C) (iso₁ : A.obj₁ ≅ B.obj₁) (iso₂ : A.obj₂ ≅ B.obj₂) (iso₃ : A.obj₃ ≅ B.obj₃) (comm₁ : A.mor₁ ≫ iso₂.hom = iso₁.hom ≫ B.mor₁ := by aesop_cat) (comm₂ : A.mor₂ ≫ iso₃.hom = iso₂.hom ≫ B.mor₂ := by aesop_cat) (comm₃ : A.mor₃ ≫ iso₁.hom⟦1⟧' = iso₃.hom ≫ B.mor₃ := by aesop_cat) : A ≅ B where hom := Triangle.homMk _ _ iso₁.hom iso₂.hom iso₃.hom comm₁ comm₂ comm₃ inv := Triangle.homMk _ _ iso₁.inv iso₂.inv iso₃.inv (by simp only [← cancel_mono iso₂.hom, assoc, Iso.inv_hom_id, comp_id, comm₁, Iso.inv_hom_id_assoc]) (by simp only [← cancel_mono iso₃.hom, assoc, Iso.inv_hom_id, comp_id, comm₂, Iso.inv_hom_id_assoc]) (by simp only [← cancel_mono (iso₁.hom⟦(1 : ℤ)⟧'), Category.assoc, comm₃, Iso.inv_hom_id_assoc, ← Functor.map_comp, Iso.inv_hom_id, Functor.map_id, Category.comp_id]) lemma Triangle.isIso_of_isIsos {A B : Triangle C} (f : A ⟶ B) (h₁ : IsIso f.hom₁) (h₂ : IsIso f.hom₂) (h₃ : IsIso f.hom₃) : IsIso f := by let e := Triangle.isoMk A B (asIso f.hom₁) (asIso f.hom₂) (asIso f.hom₃) (by simp) (by simp) (by simp) exact (inferInstance : IsIso e.hom) @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.hom_inv_id_triangle_hom₁ {A B : Triangle C} (e : A ≅ B) : e.hom.hom₁ ≫ e.inv.hom₁ = 𝟙 _ := by rw [← comp_hom₁, e.hom_inv_id, id_hom₁] @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.hom_inv_id_triangle_hom₂ {A B : Triangle C} (e : A ≅ B) : e.hom.hom₂ ≫ e.inv.hom₂ = 𝟙 _ := by rw [← comp_hom₂, e.hom_inv_id, id_hom₂] @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.hom_inv_id_triangle_hom₃ {A B : Triangle C} (e : A ≅ B) : e.hom.hom₃ ≫ e.inv.hom₃ = 𝟙 _ := by rw [← comp_hom₃, e.hom_inv_id, id_hom₃] @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.inv_hom_id_triangle_hom₁ {A B : Triangle C} (e : A ≅ B) : e.inv.hom₁ ≫ e.hom.hom₁ = 𝟙 _ := by rw [← comp_hom₁, e.inv_hom_id, id_hom₁] @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.inv_hom_id_triangle_hom₂ {A B : Triangle C} (e : A ≅ B) : e.inv.hom₂ ≫ e.hom.hom₂ = 𝟙 _ := by rw [← comp_hom₂, e.inv_hom_id, id_hom₂] @[reassoc (attr := simp)] lemma _root_.CategoryTheory.Iso.inv_hom_id_triangle_hom₃ {A B : Triangle C} (e : A ≅ B) : e.inv.hom₃ ≫ e.hom.hom₃ = 𝟙 _ := by rw [← comp_hom₃, e.inv_hom_id, id_hom₃] lemma Triangle.eqToHom_hom₁ {A B : Triangle C} (h : A = B) : (eqToHom h).hom₁ = eqToHom (by subst h; rfl) := by subst h; rfl lemma Triangle.eqToHom_hom₂ {A B : Triangle C} (h : A = B) : (eqToHom h).hom₂ = eqToHom (by subst h; rfl) := by subst h; rfl lemma Triangle.eqToHom_hom₃ {A B : Triangle C} (h : A = B) : (eqToHom h).hom₃ = eqToHom (by subst h; rfl) := by subst h; rfl /-- The obvious triangle `X₁ ⟶ X₁ ⊞ X₂ ⟶ X₂ ⟶ X₁⟦1⟧`. -/ @[simps!] def binaryBiproductTriangle (X₁ X₂ : C) [HasZeroMorphisms C] [HasBinaryBiproduct X₁ X₂] : Triangle C := Triangle.mk biprod.inl (Limits.biprod.snd : X₁ ⊞ X₂ ⟶ _) 0 /-- The obvious triangle `X₁ ⟶ X₁ ⨯ X₂ ⟶ X₂ ⟶ X₁⟦1⟧`. -/ @[simps!] def binaryProductTriangle (X₁ X₂ : C) [HasZeroMorphisms C] [HasBinaryProduct X₁ X₂] : Triangle C := Triangle.mk ((Limits.prod.lift (𝟙 X₁) 0)) (Limits.prod.snd : X₁ ⨯ X₂ ⟶ _) 0 /-- The canonical isomorphism of triangles `binaryProductTriangle X₁ X₂ ≅ binaryBiproductTriangle X₁ X₂`. -/ @[simps!] def binaryProductTriangleIsoBinaryBiproductTriangle (X₁ X₂ : C) [HasZeroMorphisms C] [HasBinaryBiproduct X₁ X₂] : binaryProductTriangle X₁ X₂ ≅ binaryBiproductTriangle X₁ X₂ := Triangle.isoMk _ _ (Iso.refl _) (biprod.isoProd X₁ X₂).symm (Iso.refl _) (by aesop_cat) (by aesop_cat) (by aesop_cat) section variable {J : Type*} (T : J → Triangle C) [HasProduct (fun j => (T j).obj₁)] [HasProduct (fun j => (T j).obj₂)] [HasProduct (fun j => (T j).obj₃)] [HasProduct (fun j => (T j).obj₁⟦(1 : ℤ)⟧)] /-- The product of a family of triangles. -/ @[simps!] def productTriangle : Triangle C := Triangle.mk (Pi.map (fun j => (T j).mor₁)) (Pi.map (fun j => (T j).mor₂)) (Pi.map (fun j => (T j).mor₃) ≫ inv (piComparison _ _)) /-- A projection from the product of a family of triangles. -/ @[simps] def productTriangle.π (j : J) : productTriangle T ⟶ T j where hom₁ := Pi.π _ j hom₂ := Pi.π _ j hom₃ := Pi.π _ j comm₃ := by dsimp rw [← piComparison_comp_π, assoc, IsIso.inv_hom_id_assoc] simp only [limMap_π, Discrete.natTrans_app] /-- The fan given by `productTriangle T`. -/ @[simp] def productTriangle.fan : Fan T := Fan.mk (productTriangle T) (productTriangle.π T) /-- A family of morphisms `T' ⟶ T j` lifts to a morphism `T' ⟶ productTriangle T`. -/ @[simps] def productTriangle.lift {T' : Triangle C} (φ : ∀ j, T' ⟶ T j) : T' ⟶ productTriangle T where hom₁ := Pi.lift (fun j => (φ j).hom₁) hom₂ := Pi.lift (fun j => (φ j).hom₂) hom₃ := Pi.lift (fun j => (φ j).hom₃) comm₃ := by dsimp rw [← cancel_mono (piComparison _ _), assoc, assoc, assoc, IsIso.inv_hom_id, comp_id] aesop_cat /-- The triangle `productTriangle T` satisfies the universal property of the categorical product of the triangles `T`. -/ def productTriangle.isLimitFan : IsLimit (productTriangle.fan T) := mkFanLimit _ (fun s => productTriangle.lift T s.proj) (fun s j => by aesop_cat) (by intro s m hm ext1 all_goals exact Pi.hom_ext _ _ (fun j => (by simp [← hm]))) lemma productTriangle.zero₃₁ [HasZeroMorphisms C] (h : ∀ j, (T j).mor₃ ≫ (T j).mor₁⟦(1 : ℤ)⟧' = 0) : (productTriangle T).mor₃ ≫ (productTriangle T).mor₁⟦1⟧' = 0 := by have : HasProduct (fun j => (T j).obj₂⟦(1 : ℤ)⟧) := ⟨_, isLimitFanMkObjOfIsLimit (shiftFunctor C (1 : ℤ)) _ _ (productIsProduct (fun j => (T j).obj₂))⟩ dsimp change _ ≫ (Pi.lift (fun j => Pi.π _ j ≫ (T j).mor₁))⟦(1 : ℤ)⟧' = 0 rw [assoc, ← cancel_mono (piComparison _ _), zero_comp, assoc, assoc] ext j simp only [map_lift_piComparison, assoc, limit.lift_π, Fan.mk_π_app, zero_comp, Functor.map_comp, ← piComparison_comp_π_assoc, IsIso.inv_hom_id_assoc, limMap_π_assoc, Discrete.natTrans_app, h j, comp_zero] end variable (C) in /-- The functor `C ⥤ Triangle C` which sends `X` to `contractibleTriangle X`. -/ @[simps] def contractibleTriangleFunctor [HasZeroObject C] [HasZeroMorphisms C] : C ⥤ Triangle C where obj X := contractibleTriangle X map f := { hom₁ := f hom₂ := f hom₃ := 0 } namespace Triangle /-- The first projection `Triangle C ⥤ C`. -/ @[simps] def π₁ : Triangle C ⥤ C where obj T := T.obj₁ map f := f.hom₁ /-- The second projection `Triangle C ⥤ C`. -/ @[simps] def π₂ : Triangle C ⥤ C where obj T := T.obj₂ map f := f.hom₂ /-- The third projection `Triangle C ⥤ C`. -/ @[simps] def π₃ : Triangle C ⥤ C where obj T := T.obj₃ map f := f.hom₃ section variable {A B : Triangle C} (φ : A ⟶ B) [IsIso φ] instance : IsIso φ.hom₁ := (inferInstance : IsIso (π₁.map φ)) instance : IsIso φ.hom₂ := (inferInstance : IsIso (π₂.map φ)) instance : IsIso φ.hom₃ := (inferInstance : IsIso (π₃.map φ)) end end Triangle end CategoryTheory.Pretriangulated
CategoryTheory\Triangulated\Functor.lean
/- Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Triangulated.Triangulated import Mathlib.CategoryTheory.ComposableArrows import Mathlib.CategoryTheory.Shift.CommShift /-! # Triangulated functors In this file, when `C` and `D` are categories equipped with a shift by `ℤ` and `F : C ⥤ D` is a functor which commutes with the shift, we define the induced functor `F.mapTriangle : Triangle C ⥤ Triangle D` on the categories of triangles. When `C` and `D` are pretriangulated, a triangulated functor is such a functor `F` which also sends distinguished triangles to distinguished triangles: this defines the typeclass `Functor.IsTriangulated`. -/ namespace CategoryTheory open Category Limits Pretriangulated Preadditive namespace Functor variable {C D E : Type*} [Category C] [Category D] [Category E] [HasShift C ℤ] [HasShift D ℤ] [HasShift E ℤ] (F : C ⥤ D) [F.CommShift ℤ] (G : D ⥤ E) [G.CommShift ℤ] /-- The functor `Triangle C ⥤ Triangle D` that is induced by a functor `F : C ⥤ D` which commutes with shift by `ℤ`. -/ @[simps] def mapTriangle : Triangle C ⥤ Triangle D where obj T := Triangle.mk (F.map T.mor₁) (F.map T.mor₂) (F.map T.mor₃ ≫ (F.commShiftIso (1 : ℤ)).hom.app T.obj₁) map f := { hom₁ := F.map f.hom₁ hom₂ := F.map f.hom₂ hom₃ := F.map f.hom₃ comm₁ := by dsimp; simp only [← F.map_comp, f.comm₁] comm₂ := by dsimp; simp only [← F.map_comp, f.comm₂] comm₃ := by dsimp [Functor.comp] simp only [Category.assoc, ← NatTrans.naturality, ← F.map_comp_assoc, f.comm₃] } instance [Faithful F] : Faithful F.mapTriangle where map_injective {X Y} f g h := by ext <;> apply F.map_injective · exact congr_arg TriangleMorphism.hom₁ h · exact congr_arg TriangleMorphism.hom₂ h · exact congr_arg TriangleMorphism.hom₃ h instance [Full F] [Faithful F] : Full F.mapTriangle where map_surjective {X Y} f := ⟨{ hom₁ := F.preimage f.hom₁ hom₂ := F.preimage f.hom₂ hom₃ := F.preimage f.hom₃ comm₁ := F.map_injective (by simpa only [mapTriangle_obj, map_comp, map_preimage] using f.comm₁) comm₂ := F.map_injective (by simpa only [mapTriangle_obj, map_comp, map_preimage] using f.comm₂) comm₃ := F.map_injective (by rw [← cancel_mono ((F.commShiftIso (1 : ℤ)).hom.app Y.obj₁)] simpa only [mapTriangle_obj, map_comp, assoc, commShiftIso_hom_naturality, map_preimage, Triangle.mk_mor₃] using f.comm₃) }, by aesop_cat⟩ section Additive variable [Preadditive C] [Preadditive D] [F.Additive] /-- The functor `F.mapTriangle` commutes with the shift. -/ @[simps!] noncomputable def mapTriangleCommShiftIso (n : ℤ) : Triangle.shiftFunctor C n ⋙ F.mapTriangle ≅ F.mapTriangle ⋙ Triangle.shiftFunctor D n := NatIso.ofComponents (fun T => Triangle.isoMk _ _ ((F.commShiftIso n).app _) ((F.commShiftIso n).app _) ((F.commShiftIso n).app _) (by aesop_cat) (by aesop_cat) (by dsimp simp only [map_units_smul, map_comp, Linear.units_smul_comp, assoc, Linear.comp_units_smul, ← F.commShiftIso_hom_naturality_assoc] rw [F.map_shiftFunctorComm_hom_app T.obj₁ 1 n] simp only [comp_obj, assoc, Iso.inv_hom_id_app_assoc, ← Functor.map_comp, Iso.inv_hom_id_app, map_id, comp_id])) (by aesop_cat) attribute [local simp] map_zsmul comp_zsmul zsmul_comp commShiftIso_zero commShiftIso_add commShiftIso_comp_hom_app shiftFunctorAdd'_eq_shiftFunctorAdd set_option maxHeartbeats 400000 in noncomputable instance [∀ (n : ℤ), (shiftFunctor C n).Additive] [∀ (n : ℤ), (shiftFunctor D n).Additive] : (F.mapTriangle).CommShift ℤ where iso := F.mapTriangleCommShiftIso /-- `F.mapTriangle` commutes with the rotation of triangles. -/ @[simps!] def mapTriangleRotateIso : F.mapTriangle ⋙ Pretriangulated.rotate D ≅ Pretriangulated.rotate C ⋙ F.mapTriangle := NatIso.ofComponents (fun T => Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) ((F.commShiftIso (1 : ℤ)).symm.app _) (by aesop_cat) (by aesop_cat) (by aesop_cat)) (by aesop_cat) /-- `F.mapTriangle` commutes with the inverse of the rotation of triangles. -/ @[simps!] noncomputable def mapTriangleInvRotateIso [F.Additive] : F.mapTriangle ⋙ Pretriangulated.invRotate D ≅ Pretriangulated.invRotate C ⋙ F.mapTriangle := NatIso.ofComponents (fun T => Triangle.isoMk _ _ ((F.commShiftIso (-1 : ℤ)).symm.app _) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by aesop_cat)) (by aesop_cat) variable (C) in /-- The canonical isomorphism `(𝟭 C).mapTriangle ≅ 𝟭 (Triangle C)`. -/ @[simps!] def mapTriangleIdIso : (𝟭 C).mapTriangle ≅ 𝟭 _ := NatIso.ofComponents (fun T ↦ Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _)) /-- The canonical isomorphism `(F ⋙ G).mapTriangle ≅ F.mapTriangle ⋙ G.mapTriangle`. -/ @[simps!] def mapTriangleCompIso : (F ⋙ G).mapTriangle ≅ F.mapTriangle ⋙ G.mapTriangle := NatIso.ofComponents (fun T => Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _)) /-- Two isomorphic functors `F₁` and `F₂` induce isomorphic functors `F₁.mapTriangle` and `F₂.mapTriangle` if the isomorphism `F₁ ≅ F₂` is compatible with the shifts. -/ @[simps!] def mapTriangleIso {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) [F₁.CommShift ℤ] [F₂.CommShift ℤ] [NatTrans.CommShift e.hom ℤ] : F₁.mapTriangle ≅ F₂.mapTriangle := NatIso.ofComponents (fun T => Triangle.isoMk _ _ (e.app _) (e.app _) (e.app _) (by simp) (by simp) (by dsimp simp only [assoc, NatTrans.CommShift.comm_app e.hom (1 : ℤ) T.obj₁, NatTrans.naturality_assoc])) (by aesop_cat) end Additive variable [HasZeroObject C] [HasZeroObject D] [HasZeroObject E] [Preadditive C] [Preadditive D] [Preadditive E] [∀ (n : ℤ), (shiftFunctor C n).Additive] [∀ (n : ℤ), (shiftFunctor D n).Additive] [∀ (n : ℤ), (shiftFunctor E n).Additive] [Pretriangulated C] [Pretriangulated D] [Pretriangulated E] /-- A functor which commutes with the shift by `ℤ` is triangulated if it sends distinguished triangles to distinguished triangles. -/ class IsTriangulated : Prop where map_distinguished (T : Triangle C) : (T ∈ distTriang C) → F.mapTriangle.obj T ∈ distTriang D lemma map_distinguished [F.IsTriangulated] (T : Triangle C) (hT : T ∈ distTriang C) : F.mapTriangle.obj T ∈ distTriang D := IsTriangulated.map_distinguished _ hT namespace IsTriangulated open ZeroObject instance (priority := 100) [F.IsTriangulated] : PreservesZeroMorphisms F where map_zero X Y := by have h₁ : (0 : X ⟶ Y) = 0 ≫ 𝟙 0 ≫ 0 := by simp have h₂ : 𝟙 (F.obj 0) = 0 := by rw [← IsZero.iff_id_eq_zero] apply Triangle.isZero₃_of_isIso₁ _ (F.map_distinguished _ (contractible_distinguished (0 : C))) dsimp infer_instance rw [h₁, F.map_comp, F.map_comp, F.map_id, h₂, zero_comp, comp_zero] noncomputable instance [F.IsTriangulated] : PreservesLimitsOfShape (Discrete WalkingPair) F := by suffices ∀ (X₁ X₃ : C), IsIso (prodComparison F X₁ X₃) by have := fun (X₁ X₃ : C) ↦ PreservesLimitPair.ofIsoProdComparison F X₁ X₃ exact ⟨fun {K} ↦ preservesLimitOfIsoDiagram F (diagramIsoPair K).symm⟩ intro X₁ X₃ let φ : F.mapTriangle.obj (binaryProductTriangle X₁ X₃) ⟶ binaryProductTriangle (F.obj X₁) (F.obj X₃) := { hom₁ := 𝟙 _ hom₂ := prodComparison F X₁ X₃ hom₃ := 𝟙 _ comm₁ := by dsimp ext · simp only [assoc, prodComparison_fst, prod.comp_lift, comp_id, comp_zero, limit.lift_π, BinaryFan.mk_pt, BinaryFan.π_app_left, BinaryFan.mk_fst, ← F.map_comp, F.map_id] · simp only [assoc, prodComparison_snd, prod.comp_lift, comp_id, comp_zero, limit.lift_π, BinaryFan.mk_pt, BinaryFan.π_app_right, BinaryFan.mk_snd, ← F.map_comp, F.map_zero] comm₂ := by simp comm₃ := by simp } exact isIso₂_of_isIso₁₃ φ (F.map_distinguished _ (binaryProductTriangle_distinguished X₁ X₃)) (binaryProductTriangle_distinguished _ _) (by dsimp; infer_instance) (by dsimp; infer_instance) instance (priority := 100) [F.IsTriangulated] : F.Additive := F.additive_of_preserves_binary_products instance : (𝟭 C).IsTriangulated where map_distinguished T hT := isomorphic_distinguished _ hT _ ((mapTriangleIdIso C).app T) instance [F.IsTriangulated] [G.IsTriangulated] : (F ⋙ G).IsTriangulated where map_distinguished T hT := isomorphic_distinguished _ (G.map_distinguished _ (F.map_distinguished T hT)) _ ((mapTriangleCompIso F G).app T) end IsTriangulated lemma isTriangulated_of_iso {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) [F₁.CommShift ℤ] [F₂.CommShift ℤ] [NatTrans.CommShift e.hom ℤ] [F₁.IsTriangulated] : F₂.IsTriangulated where map_distinguished T hT := isomorphic_distinguished _ (F₁.map_distinguished T hT) _ ((mapTriangleIso e).app T).symm lemma isTriangulated_iff_of_iso {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) [F₁.CommShift ℤ] [F₂.CommShift ℤ] [NatTrans.CommShift e.hom ℤ] : F₁.IsTriangulated ↔ F₂.IsTriangulated := by constructor · intro exact isTriangulated_of_iso e · intro have : NatTrans.CommShift e.symm.hom ℤ := inferInstanceAs (NatTrans.CommShift e.inv ℤ) exact isTriangulated_of_iso e.symm lemma mem_mapTriangle_essImage_of_distinguished [F.IsTriangulated] [F.mapArrow.EssSurj] (T : Triangle D) (hT : T ∈ distTriang D) : ∃ (T' : Triangle C) (_ : T' ∈ distTriang C), Nonempty (F.mapTriangle.obj T' ≅ T) := by obtain ⟨X, Y, f, e₁, e₂, w⟩ : ∃ (X Y : C) (f : X ⟶ Y) (e₁ : F.obj X ≅ T.obj₁) (e₂ : F.obj Y ≅ T.obj₂), F.map f ≫ e₂.hom = e₁.hom ≫ T.mor₁ := by let e := F.mapArrow.objObjPreimageIso (Arrow.mk T.mor₁) exact ⟨_, _, _, Arrow.leftFunc.mapIso e, Arrow.rightFunc.mapIso e, e.hom.w.symm⟩ obtain ⟨W, g, h, H⟩ := distinguished_cocone_triangle f exact ⟨_, H, ⟨isoTriangleOfIso₁₂ _ _ (F.map_distinguished _ H) hT e₁ e₂ w⟩⟩ lemma isTriangulated_of_precomp [(F ⋙ G).IsTriangulated] [F.IsTriangulated] [F.mapArrow.EssSurj] : G.IsTriangulated where map_distinguished T hT := by obtain ⟨T', hT', ⟨e⟩⟩ := F.mem_mapTriangle_essImage_of_distinguished T hT exact isomorphic_distinguished _ ((F ⋙ G).map_distinguished T' hT') _ (G.mapTriangle.mapIso e.symm ≪≫ (mapTriangleCompIso F G).symm.app _) variable {F G} in lemma isTriangulated_of_precomp_iso {H : C ⥤ E} (e : F ⋙ G ≅ H) [H.CommShift ℤ] [H.IsTriangulated] [F.IsTriangulated] [F.mapArrow.EssSurj] [NatTrans.CommShift e.hom ℤ] : G.IsTriangulated := by have := (isTriangulated_iff_of_iso e).2 inferInstance exact isTriangulated_of_precomp F G end Functor variable {C D : Type*} [Category C] [Category D] [HasShift C ℤ] [HasShift D ℤ] [HasZeroObject C] [HasZeroObject D] [Preadditive C] [Preadditive D] [∀ (n : ℤ), (shiftFunctor C n).Additive] [∀ (n : ℤ), (shiftFunctor D n).Additive] [Pretriangulated C] [Pretriangulated D] namespace Triangulated namespace Octahedron variable {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} {comm : u₁₂ ≫ u₂₃ = u₁₃} {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C} {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} {h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C} {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C} (h : Octahedron comm h₁₂ h₂₃ h₁₃) (F : C ⥤ D) [F.CommShift ℤ] [F.IsTriangulated] /-- The image of an octahedron by a triangulated functor. -/ @[simps] def map : Octahedron (by dsimp; rw [← F.map_comp, comm]) (F.map_distinguished _ h₁₂) (F.map_distinguished _ h₂₃) (F.map_distinguished _ h₁₃) where m₁ := F.map h.m₁ m₃ := F.map h.m₃ comm₁ := by simpa using F.congr_map h.comm₁ comm₂ := by simpa using F.congr_map h.comm₂ =≫ (F.commShiftIso 1).hom.app X₁ comm₃ := by simpa using F.congr_map h.comm₃ comm₄ := by simpa using F.congr_map h.comm₄ =≫ (F.commShiftIso 1).hom.app X₂ mem := isomorphic_distinguished _ (F.map_distinguished _ h.mem) _ (Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _)) end Octahedron end Triangulated open Triangulated /-- If `F : C ⥤ D` is a triangulated functor from a triangulated category, then `D` is also triangulated if tuples of composables arrows in `D` can be lifted to `C`. -/ lemma isTriangulated_of_essSurj_mapComposableArrows_two (F : C ⥤ D) [F.CommShift ℤ] [F.IsTriangulated] [(F.mapComposableArrows 2).EssSurj] [IsTriangulated C] : IsTriangulated D := by apply IsTriangulated.mk intro Y₁ Y₂ Y₃ Z₁₂ Z₂₃ Z₁₃ u₁₂ u₂₃ u₁₃ comm v₁₂ w₁₂ h₁₂ v₂₃ w₂₃ h₂₃ v₁₃ w₁₃ h₁₃ obtain ⟨α, ⟨e⟩⟩ : ∃ (α : ComposableArrows C 2), Nonempty ((F.mapComposableArrows 2).obj α ≅ ComposableArrows.mk₂ u₁₂ u₂₃) := ⟨_, ⟨Functor.objObjPreimageIso _ _⟩⟩ obtain ⟨X₁, X₂, X₃, f, g, rfl⟩ := ComposableArrows.mk₂_surjective α obtain ⟨_, _, _, h₁₂'⟩ := distinguished_cocone_triangle f obtain ⟨_, _, _, h₂₃'⟩ := distinguished_cocone_triangle g obtain ⟨_, _, _, h₁₃'⟩ := distinguished_cocone_triangle (f ≫ g) exact ⟨Octahedron.ofIso (e₁ := (e.app 0).symm) (e₂ := (e.app 1).symm) (e₃ := (e.app 2).symm) (comm₁₂ := ComposableArrows.naturality' e.inv 0 1) (comm₂₃ := ComposableArrows.naturality' e.inv 1 2) (H := (someOctahedron rfl h₁₂' h₂₃' h₁₃').map F) _ _ _ _ _⟩ end CategoryTheory
CategoryTheory\Triangulated\HomologicalFunctor.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.Algebra.Homology.ShortComplex.Exact import Mathlib.CategoryTheory.Shift.ShiftSequence import Mathlib.CategoryTheory.Triangulated.Functor import Mathlib.CategoryTheory.Triangulated.Subcategory import Mathlib.Algebra.Homology.ExactSequence /-! # Homological functors In this file, given a functor `F : C ⥤ A` from a pretriangulated category to an abelian category, we define the type class `F.IsHomological`, which is the property that `F` sends distinguished triangles in `C` to exact sequences in `A`. If `F` has been endowed with `[F.ShiftSequence ℤ]`, then we may think of the functor `F` as a `H^0`, and then the `H^n` functors are the functors `F.shift n : C ⥤ A`: we have isomorphisms `(F.shift n).obj X ≅ F.obj (X⟦n⟧)`, but through the choice of this "shift sequence", the user may provide functors with better definitional properties. Given a triangle `T` in `C`, we define a connecting homomorphism `F.homologySequenceδ T n₀ n₁ h : (F.shift n₀).obj T.obj₃ ⟶ (F.shift n₁).obj T.obj₁` under the assumption `h : n₀ + 1 = n₁`. When `T` is distinguished, this connecting homomorphism is part of a long exact sequence `... ⟶ (F.shift n₀).obj T.obj₁ ⟶ (F.shift n₀).obj T.obj₂ ⟶ (F.shift n₀).obj T.obj₃ ⟶ ...` The exactness of this long exact sequence is given by three lemmas `F.homologySequence_exact₁`, `F.homologySequence_exact₂` and `F.homologySequence_exact₃`. If `F` is a homological functor, we define the strictly full triangulated subcategory `F.homologicalKernel`: it consists of objects `X : C` such that for all `n : ℤ`, `(F.shift n).obj X` (or `F.obj (X⟦n⟧)`) is zero. We show that a morphism `f` in `C` belongs to `F.homologicalKernel.W` (i.e. the cone of `f` is in this kernel) iff `(F.shift n).map f` is an isomorphism for all `n : ℤ`. Note: depending on the sources, homological functors are sometimes called cohomological functors, while certain authors use "cohomological functors" for "contravariant" functors (i.e. functors `Cᵒᵖ ⥤ A`). ## TODO * The long exact sequence in homology attached to an homological functor. ## References * [Jean-Louis Verdier, *Des catégories dérivées des catégories abéliennes*][verdier1996] -/ namespace CategoryTheory open Category Limits Pretriangulated ZeroObject Preadditive variable {C D A : Type*} [Category C] [HasShift C ℤ] [Category D] [HasZeroObject D] [HasShift D ℤ] [Preadditive D] [∀ (n : ℤ), (CategoryTheory.shiftFunctor D n).Additive] [Pretriangulated D] [Category A] namespace Functor variable (F : C ⥤ A) section Pretriangulated variable [HasZeroObject C] [Preadditive C] [∀ (n : ℤ), (CategoryTheory.shiftFunctor C n).Additive] [Pretriangulated C] [Abelian A] /-- A functor from a pretriangulated category to an abelian category is an homological functor if it sends distinguished triangles to exact sequences. -/ class IsHomological extends F.PreservesZeroMorphisms : Prop where exact (T : Triangle C) (hT : T ∈ distTriang C) : ((shortComplexOfDistTriangle T hT).map F).Exact lemma map_distinguished_exact [F.IsHomological] (T : Triangle C) (hT : T ∈ distTriang C) : ((shortComplexOfDistTriangle T hT).map F).Exact := IsHomological.exact _ hT instance (L : C ⥤ D) (F : D ⥤ A) [L.CommShift ℤ] [L.IsTriangulated] [F.IsHomological] : (L ⋙ F).IsHomological where exact T hT := F.map_distinguished_exact _ (L.map_distinguished T hT) lemma IsHomological.mk' [F.PreservesZeroMorphisms] (hF : ∀ (T : Pretriangulated.Triangle C) (hT : T ∈ distTriang C), ∃ (T' : Pretriangulated.Triangle C) (e : T ≅ T'), ((shortComplexOfDistTriangle T' (isomorphic_distinguished _ hT _ e.symm)).map F).Exact) : F.IsHomological where exact T hT := by obtain ⟨T', e, h'⟩ := hF T hT exact (ShortComplex.exact_iff_of_iso (F.mapShortComplex.mapIso ((shortComplexOfDistTriangleIsoOfIso e hT)))).2 h' lemma IsHomological.of_iso {F₁ F₂ : C ⥤ A} [F₁.IsHomological] (e : F₁ ≅ F₂) : F₂.IsHomological := have := preservesZeroMorphisms_of_iso e ⟨fun T hT => ShortComplex.exact_of_iso (ShortComplex.mapNatIso _ e) (F₁.map_distinguished_exact T hT)⟩ /-- The kernel of a homological functor `F : C ⥤ A` is the strictly full triangulated subcategory consisting of objects `X` such that for all `n : ℤ`, `F.obj (X⟦n⟧)` is zero. -/ def homologicalKernel [F.IsHomological] : Triangulated.Subcategory C := Triangulated.Subcategory.mk' (fun X => ∀ (n : ℤ), IsZero (F.obj (X⟦n⟧))) (fun n => by rw [IsZero.iff_id_eq_zero, ← F.map_id, ← Functor.map_id, id_zero, Functor.map_zero, Functor.map_zero]) (fun X a hX b => IsZero.of_iso (hX (a + b)) (F.mapIso ((shiftFunctorAdd C a b).app X).symm)) (fun T hT h₁ h₃ n => (F.map_distinguished_exact _ (Triangle.shift_distinguished T hT n)).isZero_of_both_zeros (IsZero.eq_of_src (h₁ n) _ _) (IsZero.eq_of_tgt (h₃ n) _ _)) instance [F.IsHomological] : ClosedUnderIsomorphisms F.homologicalKernel.P := by dsimp only [homologicalKernel] infer_instance lemma mem_homologicalKernel_iff [F.IsHomological] [F.ShiftSequence ℤ] (X : C) : F.homologicalKernel.P X ↔ ∀ (n : ℤ), IsZero ((F.shift n).obj X) := by simp only [← fun (n : ℤ) => Iso.isZero_iff ((F.isoShift n).app X)] rfl noncomputable instance (priority := 100) [F.IsHomological] : PreservesLimitsOfShape (Discrete WalkingPair) F := by suffices ∀ (X₁ X₂ : C), PreservesLimit (pair X₁ X₂) F from ⟨fun {X} => preservesLimitOfIsoDiagram F (diagramIsoPair X).symm⟩ intro X₁ X₂ have : HasBinaryBiproduct (F.obj X₁) (F.obj X₂) := HasBinaryBiproducts.has_binary_biproduct _ _ have : Mono (F.biprodComparison X₁ X₂) := by rw [mono_iff_cancel_zero] intro Z f hf let S := (ShortComplex.mk _ _ (biprod.inl_snd (X := X₁) (Y := X₂))).map F have : Mono S.f := by dsimp [S]; infer_instance have ex : S.Exact := F.map_distinguished_exact _ (binaryBiproductTriangle_distinguished X₁ X₂) obtain ⟨g, rfl⟩ := ex.lift' f (by simpa using hf =≫ biprod.snd) dsimp [S] at hf ⊢ replace hf := hf =≫ biprod.fst simp only [assoc, biprodComparison_fst, zero_comp, ← F.map_comp, biprod.inl_fst, F.map_id, comp_id] at hf rw [hf, zero_comp] have : PreservesBinaryBiproduct X₁ X₂ F := preservesBinaryBiproductOfMonoBiprodComparison _ apply Limits.preservesBinaryProductOfPreservesBinaryBiproduct instance (priority := 100) [F.IsHomological] : F.Additive := F.additive_of_preserves_binary_products lemma isHomological_of_localization (L : C ⥤ D) [L.CommShift ℤ] [L.IsTriangulated] [L.mapArrow.EssSurj] (F : D ⥤ A) (G : C ⥤ A) (e : L ⋙ F ≅ G) [G.IsHomological] : F.IsHomological := by have : F.PreservesZeroMorphisms := preservesZeroMorphisms_of_map_zero_object (F.mapIso L.mapZeroObject.symm ≪≫ e.app _ ≪≫ G.mapZeroObject) have : (L ⋙ F).IsHomological := IsHomological.of_iso e.symm refine IsHomological.mk' _ (fun T hT => ?_) rw [L.distTriang_iff] at hT obtain ⟨T₀, e, hT₀⟩ := hT exact ⟨L.mapTriangle.obj T₀, e, (L ⋙ F).map_distinguished_exact _ hT₀⟩ end Pretriangulated section /-- The connecting homomorphism in the long exact sequence attached to an homological functor and a distinguished triangle. -/ noncomputable def homologySequenceδ [F.ShiftSequence ℤ] (T : Triangle C) (n₀ n₁ : ℤ) (h : n₀ + 1 = n₁) : (F.shift n₀).obj T.obj₃ ⟶ (F.shift n₁).obj T.obj₁ := F.shiftMap T.mor₃ n₀ n₁ (by rw [add_comm 1, h]) variable {T T'} @[reassoc] lemma homologySequenceδ_naturality [F.ShiftSequence ℤ] (T T' : Triangle C) (φ : T ⟶ T') (n₀ n₁ : ℤ) (h : n₀ + 1 = n₁) : (F.shift n₀).map φ.hom₃ ≫ F.homologySequenceδ T' n₀ n₁ h = F.homologySequenceδ T n₀ n₁ h ≫ (F.shift n₁).map φ.hom₁ := by dsimp only [homologySequenceδ] rw [← shiftMap_comp', ← φ.comm₃, shiftMap_comp] variable (T) variable [HasZeroObject C] [Preadditive C] [∀ (n : ℤ), (CategoryTheory.shiftFunctor C n).Additive] [Pretriangulated C] [Abelian A] [F.IsHomological] variable [F.ShiftSequence ℤ] (T T' : Triangle C) (hT : T ∈ distTriang C) (hT' : T' ∈ distTriang C) (φ : T ⟶ T') (n₀ n₁ : ℤ) (h : n₀ + 1 = n₁) @[reassoc] lemma comp_homologySequenceδ : (F.shift n₀).map T.mor₂ ≫ F.homologySequenceδ T n₀ n₁ h = 0 := by dsimp only [homologySequenceδ] rw [← F.shiftMap_comp', comp_distTriang_mor_zero₂₃ _ hT, shiftMap_zero] @[reassoc] lemma homologySequenceδ_comp : F.homologySequenceδ T n₀ n₁ h ≫ (F.shift n₁).map T.mor₁ = 0 := by dsimp only [homologySequenceδ] rw [← F.shiftMap_comp, comp_distTriang_mor_zero₃₁ _ hT, shiftMap_zero] @[reassoc] lemma homologySequence_comp : (F.shift n₀).map T.mor₁ ≫ (F.shift n₀).map T.mor₂ = 0 := by rw [← Functor.map_comp, comp_distTriang_mor_zero₁₂ _ hT, Functor.map_zero] attribute [local simp] smul_smul lemma homologySequence_exact₂ : (ShortComplex.mk _ _ (F.homologySequence_comp T hT n₀)).Exact := by refine ShortComplex.exact_of_iso ?_ (F.map_distinguished_exact _ (Triangle.shift_distinguished _ hT n₀)) exact ShortComplex.isoMk ((F.isoShift n₀).app _) (n₀.negOnePow • ((F.isoShift n₀).app _)) ((F.isoShift n₀).app _) (by simp) (by simp) lemma homologySequence_exact₃ : (ShortComplex.mk _ _ (F.comp_homologySequenceδ T hT _ _ h)).Exact := by refine ShortComplex.exact_of_iso ?_ (F.homologySequence_exact₂ _ (rot_of_distTriang _ hT) n₀) exact ShortComplex.isoMk (Iso.refl _) (Iso.refl _) ((F.shiftIso 1 n₀ n₁ (by linarith)).app _) (by simp) (by simp [homologySequenceδ, shiftMap]) lemma homologySequence_exact₁ : (ShortComplex.mk _ _ (F.homologySequenceδ_comp T hT _ _ h)).Exact := by refine ShortComplex.exact_of_iso ?_ (F.homologySequence_exact₂ _ (inv_rot_of_distTriang _ hT) n₁) refine ShortComplex.isoMk (-((F.shiftIso (-1) n₁ n₀ (by linarith)).app _)) (Iso.refl _) (Iso.refl _) ?_ (by simp) dsimp simp only [homologySequenceδ, neg_comp, map_neg, comp_id, F.shiftIso_hom_app_comp_shiftMap_of_add_eq_zero T.mor₃ (-1) (neg_add_self 1) n₀ n₁ (by omega)] lemma homologySequence_epi_shift_map_mor₁_iff : Epi ((F.shift n₀).map T.mor₁) ↔ (F.shift n₀).map T.mor₂ = 0 := (F.homologySequence_exact₂ T hT n₀).epi_f_iff lemma homologySequence_mono_shift_map_mor₁_iff : Mono ((F.shift n₁).map T.mor₁) ↔ F.homologySequenceδ T n₀ n₁ h = 0 := (F.homologySequence_exact₁ T hT n₀ n₁ h).mono_g_iff lemma homologySequence_epi_shift_map_mor₂_iff : Epi ((F.shift n₀).map T.mor₂) ↔ F.homologySequenceδ T n₀ n₁ h = 0 := (F.homologySequence_exact₃ T hT n₀ n₁ h).epi_f_iff lemma homologySequence_mono_shift_map_mor₂_iff : Mono ((F.shift n₀).map T.mor₂) ↔ (F.shift n₀).map T.mor₁ = 0 := (F.homologySequence_exact₂ T hT n₀).mono_g_iff lemma mem_homologicalKernel_W_iff {X Y : C} (f : X ⟶ Y) : F.homologicalKernel.W f ↔ ∀ (n : ℤ), IsIso ((F.shift n).map f) := by obtain ⟨Z, g, h, hT⟩ := distinguished_cocone_triangle f apply (F.homologicalKernel.mem_W_iff_of_distinguished _ hT).trans have h₁ := fun n => (F.homologySequence_exact₃ _ hT n _ rfl).isZero_X₂_iff have h₂ := fun n => F.homologySequence_mono_shift_map_mor₁_iff _ hT n _ rfl have h₃ := fun n => F.homologySequence_epi_shift_map_mor₁_iff _ hT n dsimp at h₁ h₂ h₃ ⊢ simp only [mem_homologicalKernel_iff, h₁, ← h₂, ← h₃] constructor · intro h n obtain ⟨m, rfl⟩ : ∃ (m : ℤ), n = m + 1 := ⟨n - 1, by simp⟩ have := (h (m + 1)).1 have := (h m).2 apply isIso_of_mono_of_epi · intros constructor <;> infer_instance open ComposableArrows /-- The exact sequence with six terms starting from `(F.shift n₀).obj T.obj₁` until `(F.shift n₁).obj T.obj₃` when `T` is a distinguished triangle and `F` a homological functor. -/ @[simp] noncomputable def homologySequenceComposableArrows₅ : ComposableArrows A 5 := mk₅ ((F.shift n₀).map T.mor₁) ((F.shift n₀).map T.mor₂) (F.homologySequenceδ T n₀ n₁ h) ((F.shift n₁).map T.mor₁) ((F.shift n₁).map T.mor₂) lemma homologySequenceComposableArrows₅_exact : (F.homologySequenceComposableArrows₅ T n₀ n₁ h).Exact := exact_of_δ₀ (F.homologySequence_exact₂ T hT n₀).exact_toComposableArrows (exact_of_δ₀ (F.homologySequence_exact₃ T hT n₀ n₁ h).exact_toComposableArrows (exact_of_δ₀ (F.homologySequence_exact₁ T hT n₀ n₁ h).exact_toComposableArrows (F.homologySequence_exact₂ T hT n₁).exact_toComposableArrows)) end end Functor end CategoryTheory
CategoryTheory\Triangulated\Opposite.lean
/- Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Shift.Opposite import Mathlib.CategoryTheory.Shift.Pullback import Mathlib.CategoryTheory.Triangulated.HomologicalFunctor import Mathlib.Tactic.Linarith /-! # The (pre)triangulated structure on the opposite category In this file, we shall construct the (pre)triangulated structure on the opposite category `Cᵒᵖ` of a (pre)triangulated category `C`. The shift on `Cᵒᵖ` is obtained by combining the constructions in the files `CategoryTheory.Shift.Opposite` and `CategoryTheory.Shift.Pullback`. When the user opens `CategoryTheory.Pretriangulated.Opposite`, the category `Cᵒᵖ` is equipped with the shift by `ℤ` such that shifting by `n : ℤ` on `Cᵒᵖ` corresponds to the shift by `-n` on `C`. This is actually a definitional equality, but the user should not rely on this, and instead use the isomorphism `shiftFunctorOpIso C n m hnm : shiftFunctor Cᵒᵖ n ≅ (shiftFunctor C m).op` where `hnm : n + m = 0`. Some compatibilities between the shifts on `C` and `Cᵒᵖ` are also expressed through the equivalence of categories `opShiftFunctorEquivalence C n : Cᵒᵖ ≌ Cᵒᵖ` whose functor is `shiftFunctor Cᵒᵖ n` and whose inverse functor is `(shiftFunctor C n).op`. If `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` is a distinguished triangle in `C`, then the triangle `op Z ⟶ op Y ⟶ op X ⟶ (op Z)⟦1⟧` that is deduced *without introducing signs* shall be a distinguished triangle in `Cᵒᵖ`. This is equivalent to the definition in [Verdiers's thesis, p. 96][verdier1996] which would require that the triangle `(op X)⟦-1⟧ ⟶ op Z ⟶ op Y ⟶ op X` (without signs) is *antidistinguished*. ## References * [Jean-Louis Verdier, *Des catégories dérivées des catégories abéliennes*][verdier1996] -/ namespace CategoryTheory open Category Limits Preadditive ZeroObject variable (C : Type*) [Category C] namespace Pretriangulated variable [HasShift C ℤ] namespace Opposite /-- As it is unclear whether the opposite category `Cᵒᵖ` should always be equipped with the shift by `ℤ` such that shifting by `n` on `Cᵒᵖ` corresponds to shifting by `-n` on `C`, the user shall have to do `open CategoryTheory.Pretriangulated.Opposite` in order to get this shift and the (pre)triangulated structure on `Cᵒᵖ`. -/ private abbrev OppositeShiftAux := PullbackShift (OppositeShift C ℤ) (AddMonoidHom.mk' (fun (n : ℤ) => -n) (by intros; dsimp; omega)) /-- The category `Cᵒᵖ` is equipped with the shift such that the shift by `n` on `Cᵒᵖ` corresponds to the shift by `-n` on `C`. -/ noncomputable scoped instance : HasShift Cᵒᵖ ℤ := (inferInstance : HasShift (OppositeShiftAux C) ℤ) instance [Preadditive C] [∀ (n : ℤ), (shiftFunctor C n).Additive] (n : ℤ) : (shiftFunctor Cᵒᵖ n).Additive := (inferInstance : (shiftFunctor (OppositeShiftAux C) n).Additive) end Opposite open Opposite /-- The shift functor on the opposite category identifies to the opposite functor of a shift functor on the original category. -/ noncomputable def shiftFunctorOpIso (n m : ℤ) (hnm : n + m = 0) : shiftFunctor Cᵒᵖ n ≅ (shiftFunctor C m).op := eqToIso (by obtain rfl : m = -n := by omega rfl) variable {C} lemma shiftFunctorZero_op_hom_app (X : Cᵒᵖ) : (shiftFunctorZero Cᵒᵖ ℤ).hom.app X = (shiftFunctorOpIso C 0 0 (zero_add 0)).hom.app X ≫ ((shiftFunctorZero C ℤ).inv.app X.unop).op := by erw [@pullbackShiftFunctorZero_hom_app (OppositeShift C ℤ), oppositeShiftFunctorZero_hom_app] rfl lemma shiftFunctorZero_op_inv_app (X : Cᵒᵖ) : (shiftFunctorZero Cᵒᵖ ℤ).inv.app X = ((shiftFunctorZero C ℤ).hom.app X.unop).op ≫ (shiftFunctorOpIso C 0 0 (zero_add 0)).inv.app X := by rw [← cancel_epi ((shiftFunctorZero Cᵒᵖ ℤ).hom.app X), Iso.hom_inv_id_app, shiftFunctorZero_op_hom_app, assoc, ← op_comp_assoc, Iso.hom_inv_id_app, op_id, id_comp, Iso.hom_inv_id_app] lemma shiftFunctorAdd'_op_hom_app (X : Cᵒᵖ) (a₁ a₂ a₃ : ℤ) (h : a₁ + a₂ = a₃) (b₁ b₂ b₃ : ℤ) (h₁ : a₁ + b₁ = 0) (h₂ : a₂ + b₂ = 0) (h₃ : a₃ + b₃ = 0) : (shiftFunctorAdd' Cᵒᵖ a₁ a₂ a₃ h).hom.app X = (shiftFunctorOpIso C _ _ h₃).hom.app X ≫ ((shiftFunctorAdd' C b₁ b₂ b₃ (by omega)).inv.app X.unop).op ≫ (shiftFunctorOpIso C _ _ h₂).inv.app _ ≫ (shiftFunctor Cᵒᵖ a₂).map ((shiftFunctorOpIso C _ _ h₁).inv.app X) := by erw [@pullbackShiftFunctorAdd'_hom_app (OppositeShift C ℤ) _ _ _ _ _ _ _ X a₁ a₂ a₃ h b₁ b₂ b₃ (by dsimp; omega) (by dsimp; omega) (by dsimp; omega)] erw [oppositeShiftFunctorAdd'_hom_app] obtain rfl : b₁ = -a₁ := by omega obtain rfl : b₂ = -a₂ := by omega obtain rfl : b₃ = -a₃ := by omega rfl lemma shiftFunctorAdd'_op_inv_app (X : Cᵒᵖ) (a₁ a₂ a₃ : ℤ) (h : a₁ + a₂ = a₃) (b₁ b₂ b₃ : ℤ) (h₁ : a₁ + b₁ = 0) (h₂ : a₂ + b₂ = 0) (h₃ : a₃ + b₃ = 0) : (shiftFunctorAdd' Cᵒᵖ a₁ a₂ a₃ h).inv.app X = (shiftFunctor Cᵒᵖ a₂).map ((shiftFunctorOpIso C _ _ h₁).hom.app X) ≫ (shiftFunctorOpIso C _ _ h₂).hom.app _ ≫ ((shiftFunctorAdd' C b₁ b₂ b₃ (by omega)).hom.app X.unop).op ≫ (shiftFunctorOpIso C _ _ h₃).inv.app X := by rw [← cancel_epi ((shiftFunctorAdd' Cᵒᵖ a₁ a₂ a₃ h).hom.app X), Iso.hom_inv_id_app, shiftFunctorAdd'_op_hom_app X a₁ a₂ a₃ h b₁ b₂ b₃ h₁ h₂ h₃, assoc, assoc, assoc, ← Functor.map_comp_assoc, Iso.inv_hom_id_app] erw [Functor.map_id, id_comp, Iso.inv_hom_id_app_assoc] rw [← op_comp_assoc, Iso.hom_inv_id_app, op_id, id_comp, Iso.hom_inv_id_app] lemma shiftFunctor_op_map (n m : ℤ) (hnm : n + m = 0) {K L : Cᵒᵖ} (φ : K ⟶ L) : (shiftFunctor Cᵒᵖ n).map φ = (shiftFunctorOpIso C n m hnm).hom.app K ≫ ((shiftFunctor C m).map φ.unop).op ≫ (shiftFunctorOpIso C n m hnm).inv.app L := (NatIso.naturality_2 (shiftFunctorOpIso C n m hnm) φ).symm variable (C) /-- The autoequivalence `Cᵒᵖ ≌ Cᵒᵖ` whose functor is `shiftFunctor Cᵒᵖ n` and whose inverse functor is `(shiftFunctor C n).op`. Do not unfold the definitions of the unit and counit isomorphisms: the compatibilities they satisfy are stated as separate lemmas. -/ @[simps functor inverse] noncomputable def opShiftFunctorEquivalence (n : ℤ) : Cᵒᵖ ≌ Cᵒᵖ where functor := shiftFunctor Cᵒᵖ n inverse := (shiftFunctor C n).op unitIso := NatIso.op (shiftFunctorCompIsoId C (-n) n n.add_left_neg) ≪≫ isoWhiskerRight (shiftFunctorOpIso C n (-n) n.add_right_neg).symm (shiftFunctor C n).op counitIso := isoWhiskerLeft _ (shiftFunctorOpIso C n (-n) n.add_right_neg) ≪≫ NatIso.op (shiftFunctorCompIsoId C n (-n) n.add_right_neg).symm functor_unitIso_comp X := Quiver.Hom.unop_inj (by dsimp [shiftFunctorOpIso] erw [comp_id, Functor.map_id, comp_id] change (shiftFunctorCompIsoId C n (-n) (add_neg_self n)).inv.app (X.unop⟦-n⟧) ≫ ((shiftFunctorCompIsoId C (-n) n (neg_add_self n)).hom.app X.unop)⟦-n⟧' = 𝟙 _ rw [shift_shiftFunctorCompIsoId_neg_add_self_hom_app n X.unop, Iso.inv_hom_id_app]) /-! The naturality of the unit and counit isomorphisms are restated in the following lemmas so as to mitigate the need for `erw`. -/ @[reassoc (attr := simp)] lemma opShiftFunctorEquivalence_unitIso_hom_naturality (n : ℤ) {X Y : Cᵒᵖ} (f : X ⟶ Y) : f ≫ (opShiftFunctorEquivalence C n).unitIso.hom.app Y = (opShiftFunctorEquivalence C n).unitIso.hom.app X ≫ (f⟦n⟧').unop⟦n⟧'.op := (opShiftFunctorEquivalence C n).unitIso.hom.naturality f @[reassoc (attr := simp)] lemma opShiftFunctorEquivalence_unitIso_inv_naturality (n : ℤ) {X Y : Cᵒᵖ} (f : X ⟶ Y) : (f⟦n⟧').unop⟦n⟧'.op ≫ (opShiftFunctorEquivalence C n).unitIso.inv.app Y = (opShiftFunctorEquivalence C n).unitIso.inv.app X ≫ f := (opShiftFunctorEquivalence C n).unitIso.inv.naturality f @[reassoc (attr := simp)] lemma opShiftFunctorEquivalence_counitIso_hom_naturality (n : ℤ) {X Y : Cᵒᵖ} (f : X ⟶ Y) : f.unop⟦n⟧'.op⟦n⟧' ≫ (opShiftFunctorEquivalence C n).counitIso.hom.app Y = (opShiftFunctorEquivalence C n).counitIso.hom.app X ≫ f := (opShiftFunctorEquivalence C n).counitIso.hom.naturality f @[reassoc (attr := simp)] lemma opShiftFunctorEquivalence_counitIso_inv_naturality (n : ℤ) {X Y : Cᵒᵖ} (f : X ⟶ Y) : f ≫ (opShiftFunctorEquivalence C n).counitIso.inv.app Y = (opShiftFunctorEquivalence C n).counitIso.inv.app X ≫ f.unop⟦n⟧'.op⟦n⟧' := (opShiftFunctorEquivalence C n).counitIso.inv.naturality f variable {C} lemma shift_unop_opShiftFunctorEquivalence_counitIso_inv_app (X : Cᵒᵖ) (n : ℤ) : ((opShiftFunctorEquivalence C n).counitIso.inv.app X).unop⟦n⟧' = ((opShiftFunctorEquivalence C n).unitIso.hom.app ((Opposite.op ((X.unop)⟦n⟧)))).unop := Quiver.Hom.op_inj ((opShiftFunctorEquivalence C n).unit_app_inverse X).symm lemma shift_unop_opShiftFunctorEquivalence_counitIso_hom_app (X : Cᵒᵖ) (n : ℤ) : ((opShiftFunctorEquivalence C n).counitIso.hom.app X).unop⟦n⟧' = ((opShiftFunctorEquivalence C n).unitIso.inv.app ((Opposite.op (X.unop⟦n⟧)))).unop := Quiver.Hom.op_inj ((opShiftFunctorEquivalence C n).unitInv_app_inverse X).symm lemma opShiftFunctorEquivalence_counitIso_inv_app_shift (X : Cᵒᵖ) (n : ℤ) : (opShiftFunctorEquivalence C n).counitIso.inv.app (X⟦n⟧) = ((opShiftFunctorEquivalence C n).unitIso.hom.app X)⟦n⟧' := (opShiftFunctorEquivalence C n).counitInv_app_functor X lemma opShiftFunctorEquivalence_counitIso_hom_app_shift (X : Cᵒᵖ) (n : ℤ) : (opShiftFunctorEquivalence C n).counitIso.hom.app (X⟦n⟧) = ((opShiftFunctorEquivalence C n).unitIso.inv.app X)⟦n⟧' := (opShiftFunctorEquivalence C n).counit_app_functor X variable (C) namespace TriangleOpEquivalence /-- The functor which sends a triangle `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` in `C` to the triangle `op Z ⟶ op Y ⟶ op X ⟶ (op Z)⟦1⟧` in `Cᵒᵖ` (without introducing signs). -/ @[simps] noncomputable def functor : (Triangle C)ᵒᵖ ⥤ Triangle Cᵒᵖ where obj T := Triangle.mk T.unop.mor₂.op T.unop.mor₁.op ((opShiftFunctorEquivalence C 1).counitIso.inv.app (Opposite.op T.unop.obj₁) ≫ T.unop.mor₃.op⟦(1 : ℤ)⟧') map {T₁ T₂} φ := { hom₁ := φ.unop.hom₃.op hom₂ := φ.unop.hom₂.op hom₃ := φ.unop.hom₁.op comm₁ := Quiver.Hom.unop_inj φ.unop.comm₂.symm comm₂ := Quiver.Hom.unop_inj φ.unop.comm₁.symm comm₃ := by dsimp rw [assoc, ← Functor.map_comp, ← op_comp, ← φ.unop.comm₃, op_comp, Functor.map_comp, opShiftFunctorEquivalence_counitIso_inv_naturality_assoc] rfl } /-- The functor which sends a triangle `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` in `Cᵒᵖ` to the triangle `Z.unop ⟶ Y.unop ⟶ X.unop ⟶ Z.unop⟦1⟧` in `C` (without introducing signs). -/ @[simps] noncomputable def inverse : Triangle Cᵒᵖ ⥤ (Triangle C)ᵒᵖ where obj T := Opposite.op (Triangle.mk T.mor₂.unop T.mor₁.unop (((opShiftFunctorEquivalence C 1).unitIso.inv.app T.obj₁).unop ≫ T.mor₃.unop⟦(1 : ℤ)⟧')) map {T₁ T₂} φ := Quiver.Hom.op { hom₁ := φ.hom₃.unop hom₂ := φ.hom₂.unop hom₃ := φ.hom₁.unop comm₁ := Quiver.Hom.op_inj φ.comm₂.symm comm₂ := Quiver.Hom.op_inj φ.comm₁.symm comm₃ := Quiver.Hom.op_inj (by dsimp rw [assoc, ← opShiftFunctorEquivalence_unitIso_inv_naturality, ← op_comp_assoc, ← Functor.map_comp, ← unop_comp, ← φ.comm₃, unop_comp, Functor.map_comp, op_comp, assoc]) } /-- The unit isomorphism of the equivalence `triangleOpEquivalence C : (Triangle C)ᵒᵖ ≌ Triangle Cᵒᵖ` . -/ @[simps!] noncomputable def unitIso : 𝟭 _ ≅ functor C ⋙ inverse C := NatIso.ofComponents (fun T => Iso.op (Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (Quiver.Hom.op_inj (by simp [shift_unop_opShiftFunctorEquivalence_counitIso_inv_app])))) (fun {T₁ T₂} f => Quiver.Hom.unop_inj (by aesop_cat)) /-- The counit isomorphism of the equivalence `triangleOpEquivalence C : (Triangle C)ᵒᵖ ≌ Triangle Cᵒᵖ` . -/ @[simps!] noncomputable def counitIso : inverse C ⋙ functor C ≅ 𝟭 _ := NatIso.ofComponents (fun T => by refine Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _) ?_ ?_ ?_ · aesop_cat · aesop_cat · dsimp rw [Functor.map_id, comp_id, id_comp, Functor.map_comp, ← opShiftFunctorEquivalence_counitIso_inv_naturality_assoc, opShiftFunctorEquivalence_counitIso_inv_app_shift, ← Functor.map_comp, Iso.hom_inv_id_app, Functor.map_id] simp only [Functor.id_obj, comp_id]) (by aesop_cat) end TriangleOpEquivalence /-- An anti-equivalence between the categories of triangles in `C` and in `Cᵒᵖ`. A triangle in `Cᵒᵖ` shall be distinguished iff it correspond to a distinguished triangle in `C` via this equivalence. -/ @[simps] noncomputable def triangleOpEquivalence : (Triangle C)ᵒᵖ ≌ Triangle Cᵒᵖ where functor := TriangleOpEquivalence.functor C inverse := TriangleOpEquivalence.inverse C unitIso := TriangleOpEquivalence.unitIso C counitIso := TriangleOpEquivalence.counitIso C variable [HasZeroObject C] [Preadditive C] [∀ (n : ℤ), (shiftFunctor C n).Additive] [Pretriangulated C] namespace Opposite /-- A triangle in `Cᵒᵖ` shall be distinguished iff it corresponds to a distinguished triangle in `C` via the equivalence `triangleOpEquivalence C : (Triangle C)ᵒᵖ ≌ Triangle Cᵒᵖ`. -/ def distinguishedTriangles : Set (Triangle Cᵒᵖ) := fun T => ((triangleOpEquivalence C).inverse.obj T).unop ∈ distTriang C variable {C} lemma mem_distinguishedTriangles_iff (T : Triangle Cᵒᵖ) : T ∈ distinguishedTriangles C ↔ ((triangleOpEquivalence C).inverse.obj T).unop ∈ distTriang C := by rfl lemma mem_distinguishedTriangles_iff' (T : Triangle Cᵒᵖ) : T ∈ distinguishedTriangles C ↔ ∃ (T' : Triangle C) (_ : T' ∈ distTriang C), Nonempty (T ≅ (triangleOpEquivalence C).functor.obj (Opposite.op T')) := by rw [mem_distinguishedTriangles_iff] constructor · intro hT exact ⟨_ ,hT, ⟨(triangleOpEquivalence C).counitIso.symm.app T⟩⟩ · rintro ⟨T', hT', ⟨e⟩⟩ refine isomorphic_distinguished _ hT' _ ?_ exact Iso.unop ((triangleOpEquivalence C).unitIso.app (Opposite.op T') ≪≫ (triangleOpEquivalence C).inverse.mapIso e.symm) lemma isomorphic_distinguished (T₁ : Triangle Cᵒᵖ) (hT₁ : T₁ ∈ distinguishedTriangles C) (T₂ : Triangle Cᵒᵖ) (e : T₂ ≅ T₁) : T₂ ∈ distinguishedTriangles C := by simp only [mem_distinguishedTriangles_iff] at hT₁ ⊢ exact Pretriangulated.isomorphic_distinguished _ hT₁ _ ((triangleOpEquivalence C).inverse.mapIso e).unop.symm /-- Up to rotation, the contractible triangle `X ⟶ X ⟶ 0 ⟶ X⟦1⟧` for `X : Cᵒᵖ` corresponds to the contractible triangle for `X.unop` in `C`. -/ @[simps!] noncomputable def contractibleTriangleIso (X : Cᵒᵖ) : contractibleTriangle X ≅ (triangleOpEquivalence C).functor.obj (Opposite.op (contractibleTriangle X.unop).invRotate) := Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (IsZero.iso (isZero_zero _) (by dsimp rw [IsZero.iff_id_eq_zero] change (𝟙 ((0 : C)⟦(-1 : ℤ)⟧)).op = 0 rw [← Functor.map_id, id_zero, Functor.map_zero, op_zero])) (by aesop_cat) (by aesop_cat) (by aesop_cat) lemma contractible_distinguished (X : Cᵒᵖ) : contractibleTriangle X ∈ distinguishedTriangles C := by rw [mem_distinguishedTriangles_iff'] exact ⟨_, inv_rot_of_distTriang _ (Pretriangulated.contractible_distinguished X.unop), ⟨contractibleTriangleIso X⟩⟩ /-- Isomorphism expressing a compatibility of the equivalence `triangleOpEquivalence C` with the rotation of triangles. -/ noncomputable def rotateTriangleOpEquivalenceInverseObjRotateUnopIso (T : Triangle Cᵒᵖ) : ((triangleOpEquivalence C).inverse.obj T.rotate).unop.rotate ≅ ((triangleOpEquivalence C).inverse.obj T).unop := Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (-((opShiftFunctorEquivalence C 1).unitIso.app T.obj₁).unop) (by simp) (Quiver.Hom.op_inj (by aesop_cat)) (by aesop_cat) lemma rotate_distinguished_triangle (T : Triangle Cᵒᵖ) : T ∈ distinguishedTriangles C ↔ T.rotate ∈ distinguishedTriangles C := by simp only [mem_distinguishedTriangles_iff, Pretriangulated.rotate_distinguished_triangle ((triangleOpEquivalence C).inverse.obj (T.rotate)).unop] exact distinguished_iff_of_iso (rotateTriangleOpEquivalenceInverseObjRotateUnopIso T).symm lemma distinguished_cocone_triangle {X Y : Cᵒᵖ} (f : X ⟶ Y) : ∃ (Z : Cᵒᵖ) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1 : ℤ)⟧), Triangle.mk f g h ∈ distinguishedTriangles C := by obtain ⟨Z, g, h, H⟩ := Pretriangulated.distinguished_cocone_triangle₁ f.unop refine ⟨_, g.op, (opShiftFunctorEquivalence C 1).counitIso.inv.app (Opposite.op Z) ≫ (shiftFunctor Cᵒᵖ (1 : ℤ)).map h.op, ?_⟩ simp only [mem_distinguishedTriangles_iff] refine Pretriangulated.isomorphic_distinguished _ H _ ?_ exact Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (Quiver.Hom.op_inj (by simp [shift_unop_opShiftFunctorEquivalence_counitIso_inv_app])) lemma complete_distinguished_triangle_morphism (T₁ T₂ : Triangle Cᵒᵖ) (hT₁ : T₁ ∈ distinguishedTriangles C) (hT₂ : T₂ ∈ distinguishedTriangles C) (a : T₁.obj₁ ⟶ T₂.obj₁) (b : T₁.obj₂ ⟶ T₂.obj₂) (comm : T₁.mor₁ ≫ b = a ≫ T₂.mor₁) : ∃ (c : T₁.obj₃ ⟶ T₂.obj₃), T₁.mor₂ ≫ c = b ≫ T₂.mor₂ ∧ T₁.mor₃ ≫ a⟦1⟧' = c ≫ T₂.mor₃ := by rw [mem_distinguishedTriangles_iff] at hT₁ hT₂ obtain ⟨c, hc₁, hc₂⟩ := Pretriangulated.complete_distinguished_triangle_morphism₁ _ _ hT₂ hT₁ b.unop a.unop (Quiver.Hom.op_inj comm.symm) dsimp at c hc₁ hc₂ replace hc₂ := ((opShiftFunctorEquivalence C 1).unitIso.hom.app T₂.obj₁).unop ≫= hc₂ dsimp at hc₂ simp only [assoc, Iso.unop_hom_inv_id_app_assoc] at hc₂ refine ⟨c.op, Quiver.Hom.unop_inj hc₁.symm, Quiver.Hom.unop_inj ?_⟩ apply (shiftFunctor C (1 : ℤ)).map_injective rw [unop_comp, unop_comp, Functor.map_comp, Functor.map_comp, Quiver.Hom.unop_op, hc₂, ← unop_comp_assoc, ← unop_comp_assoc, ← opShiftFunctorEquivalence_unitIso_inv_naturality] simp /-- The pretriangulated structure on the opposite category of a pretriangulated category. It is a scoped instance, so that we need to `open CategoryTheory.Pretriangulated.Opposite` in order to be able to use it: the reason is that it relies on the definition of the shift on the opposite category `Cᵒᵖ`, for which it is unclear whether it should be a global instance or not. -/ scoped instance : Pretriangulated Cᵒᵖ where distinguishedTriangles := distinguishedTriangles C isomorphic_distinguished := isomorphic_distinguished contractible_distinguished := contractible_distinguished distinguished_cocone_triangle := distinguished_cocone_triangle rotate_distinguished_triangle := rotate_distinguished_triangle complete_distinguished_triangle_morphism := complete_distinguished_triangle_morphism end Opposite variable {C} lemma mem_distTriang_op_iff (T : Triangle Cᵒᵖ) : (T ∈ distTriang Cᵒᵖ) ↔ ((triangleOpEquivalence C).inverse.obj T).unop ∈ distTriang C := by rfl lemma mem_distTriang_op_iff' (T : Triangle Cᵒᵖ) : (T ∈ distTriang Cᵒᵖ) ↔ ∃ (T' : Triangle C) (_ : T' ∈ distTriang C), Nonempty (T ≅ (triangleOpEquivalence C).functor.obj (Opposite.op T')) := Opposite.mem_distinguishedTriangles_iff' T lemma op_distinguished (T : Triangle C) (hT : T ∈ distTriang C) : ((triangleOpEquivalence C).functor.obj (Opposite.op T)) ∈ distTriang Cᵒᵖ := by rw [mem_distTriang_op_iff'] exact ⟨T, hT, ⟨Iso.refl _⟩⟩ lemma unop_distinguished (T : Triangle Cᵒᵖ) (hT : T ∈ distTriang Cᵒᵖ) : ((triangleOpEquivalence C).inverse.obj T).unop ∈ distTriang C := hT end Pretriangulated namespace Functor open Pretriangulated.Opposite Pretriangulated variable {C} lemma map_distinguished_op_exact [HasShift C ℤ] [HasZeroObject C] [Preadditive C] [∀ (n : ℤ), (shiftFunctor C n).Additive] [Pretriangulated C]{A : Type*} [Category A] [Abelian A] (F : Cᵒᵖ ⥤ A) [F.IsHomological] (T : Triangle C) (hT : T ∈ distTriang C) : ((shortComplexOfDistTriangle T hT).op.map F).Exact := F.map_distinguished_exact _ (op_distinguished T hT) end Functor end CategoryTheory
CategoryTheory\Triangulated\Pretriangulated.lean
/- Copyright (c) 2021 Luke Kershaw. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Luke Kershaw, Joël Riou -/ import Mathlib.Algebra.Homology.ShortComplex.Basic import Mathlib.CategoryTheory.Limits.Constructions.FiniteProductsOfBinaryProducts import Mathlib.CategoryTheory.Triangulated.TriangleShift /-! # Pretriangulated Categories This file contains the definition of pretriangulated categories and triangulated functors between them. ## Implementation Notes We work under the assumption that pretriangulated categories are preadditive categories, but not necessarily additive categories, as is assumed in some sources. TODO: generalise this to n-angulated categories as in https://arxiv.org/abs/1006.4592 -/ noncomputable section open CategoryTheory Preadditive Limits universe v v₀ v₁ v₂ u u₀ u₁ u₂ namespace CategoryTheory open Category Pretriangulated ZeroObject /- We work in a preadditive category `C` equipped with an additive shift. -/ variable (C : Type u) [Category.{v} C] [HasZeroObject C] [HasShift C ℤ] [Preadditive C] /-- A preadditive category `C` with an additive shift, and a class of "distinguished triangles" relative to that shift is called pretriangulated if the following hold: * Any triangle that is isomorphic to a distinguished triangle is also distinguished. * Any triangle of the form `(X,X,0,id,0,0)` is distinguished. * For any morphism `f : X ⟶ Y` there exists a distinguished triangle of the form `(X,Y,Z,f,g,h)`. * The triangle `(X,Y,Z,f,g,h)` is distinguished if and only if `(Y,Z,X⟦1⟧,g,h,-f⟦1⟧)` is. * Given a diagram: ``` f g h X ───> Y ───> Z ───> X⟦1⟧ │ │ │ │a │b │a⟦1⟧' V V V X' ───> Y' ───> Z' ───> X'⟦1⟧ f' g' h' ``` where the left square commutes, and whose rows are distinguished triangles, there exists a morphism `c : Z ⟶ Z'` such that `(a,b,c)` is a triangle morphism. See <https://stacks.math.columbia.edu/tag/0145> -/ class Pretriangulated [∀ n : ℤ, Functor.Additive (shiftFunctor C n)] where /-- a class of triangle which are called `distinguished` -/ distinguishedTriangles : Set (Triangle C) /-- a triangle that is isomorphic to a distinguished triangle is distinguished -/ isomorphic_distinguished : ∀ T₁ ∈ distinguishedTriangles, ∀ (T₂) (_ : T₂ ≅ T₁), T₂ ∈ distinguishedTriangles /-- obvious triangles `X ⟶ X ⟶ 0 ⟶ X⟦1⟧` are distinguished -/ contractible_distinguished : ∀ X : C, contractibleTriangle X ∈ distinguishedTriangles /-- any morphism `X ⟶ Y` is part of a distinguished triangle `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` -/ distinguished_cocone_triangle : ∀ {X Y : C} (f : X ⟶ Y), ∃ (Z : C) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1 : ℤ)⟧), Triangle.mk f g h ∈ distinguishedTriangles /-- a triangle is distinguished iff it is so after rotating it -/ rotate_distinguished_triangle : ∀ T : Triangle C, T ∈ distinguishedTriangles ↔ T.rotate ∈ distinguishedTriangles /-- given two distinguished triangle, a commutative square can be extended as morphism of triangles -/ complete_distinguished_triangle_morphism : ∀ (T₁ T₂ : Triangle C) (_ : T₁ ∈ distinguishedTriangles) (_ : T₂ ∈ distinguishedTriangles) (a : T₁.obj₁ ⟶ T₂.obj₁) (b : T₁.obj₂ ⟶ T₂.obj₂) (_ : T₁.mor₁ ≫ b = a ≫ T₂.mor₁), ∃ c : T₁.obj₃ ⟶ T₂.obj₃, T₁.mor₂ ≫ c = b ≫ T₂.mor₂ ∧ T₁.mor₃ ≫ a⟦1⟧' = c ≫ T₂.mor₃ namespace Pretriangulated variable [∀ n : ℤ, Functor.Additive (CategoryTheory.shiftFunctor C n)] [hC : Pretriangulated C] -- Porting note: increased the priority so that we can write `T ∈ distTriang C`, and -- not just `T ∈ (distTriang C)` /-- distinguished triangles in a pretriangulated category -/ notation:60 "distTriang " C => @distinguishedTriangles C _ _ _ _ _ _ variable {C} lemma distinguished_iff_of_iso {T₁ T₂ : Triangle C} (e : T₁ ≅ T₂) : (T₁ ∈ distTriang C) ↔ T₂ ∈ distTriang C := ⟨fun hT₁ => isomorphic_distinguished _ hT₁ _ e.symm, fun hT₂ => isomorphic_distinguished _ hT₂ _ e⟩ /-- Given any distinguished triangle `T`, then we know `T.rotate` is also distinguished. -/ theorem rot_of_distTriang (T : Triangle C) (H : T ∈ distTriang C) : T.rotate ∈ distTriang C := (rotate_distinguished_triangle T).mp H /-- Given any distinguished triangle `T`, then we know `T.inv_rotate` is also distinguished. -/ theorem inv_rot_of_distTriang (T : Triangle C) (H : T ∈ distTriang C) : T.invRotate ∈ distTriang C := (rotate_distinguished_triangle T.invRotate).mpr (isomorphic_distinguished T H T.invRotate.rotate (invRotCompRot.app T)) /-- Given any distinguished triangle ``` f g h X ───> Y ───> Z ───> X⟦1⟧ ``` the composition `f ≫ g = 0`. See <https://stacks.math.columbia.edu/tag/0146> -/ @[reassoc] theorem comp_distTriang_mor_zero₁₂ (T) (H : T ∈ (distTriang C)) : T.mor₁ ≫ T.mor₂ = 0 := by obtain ⟨c, hc⟩ := complete_distinguished_triangle_morphism _ _ (contractible_distinguished T.obj₁) H (𝟙 T.obj₁) T.mor₁ rfl simpa only [contractibleTriangle_mor₂, zero_comp] using hc.left.symm /-- Given any distinguished triangle ``` f g h X ───> Y ───> Z ───> X⟦1⟧ ``` the composition `g ≫ h = 0`. See <https://stacks.math.columbia.edu/tag/0146> -/ @[reassoc] theorem comp_distTriang_mor_zero₂₃ (T : Triangle C) (H : T ∈ distTriang C) : T.mor₂ ≫ T.mor₃ = 0 := comp_distTriang_mor_zero₁₂ T.rotate (rot_of_distTriang T H) /-- Given any distinguished triangle ``` f g h X ───> Y ───> Z ───> X⟦1⟧ ``` the composition `h ≫ f⟦1⟧ = 0`. See <https://stacks.math.columbia.edu/tag/0146> -/ @[reassoc] theorem comp_distTriang_mor_zero₃₁ (T : Triangle C) (H : T ∈ distTriang C) : T.mor₃ ≫ T.mor₁⟦1⟧' = 0 := by have H₂ := rot_of_distTriang T.rotate (rot_of_distTriang T H) simpa using comp_distTriang_mor_zero₁₂ T.rotate.rotate H₂ /-- The short complex `T.obj₁ ⟶ T.obj₂ ⟶ T.obj₃` attached to a distinguished triangle. -/ @[simps] def shortComplexOfDistTriangle (T : Triangle C) (hT : T ∈ distTriang C) : ShortComplex C := ShortComplex.mk T.mor₁ T.mor₂ (comp_distTriang_mor_zero₁₂ _ hT) /-- The isomorphism between the short complex attached to two isomorphic distinguished triangles. -/ @[simps!] def shortComplexOfDistTriangleIsoOfIso {T T' : Triangle C} (e : T ≅ T') (hT : T ∈ distTriang C) : shortComplexOfDistTriangle T hT ≅ shortComplexOfDistTriangle T' (isomorphic_distinguished _ hT _ e.symm) := ShortComplex.isoMk (Triangle.π₁.mapIso e) (Triangle.π₂.mapIso e) (Triangle.π₃.mapIso e) /-- Any morphism `Y ⟶ Z` is part of a distinguished triangle `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` -/ lemma distinguished_cocone_triangle₁ {Y Z : C} (g : Y ⟶ Z) : ∃ (X : C) (f : X ⟶ Y) (h : Z ⟶ X⟦(1 : ℤ)⟧), Triangle.mk f g h ∈ distTriang C := by obtain ⟨X', f', g', mem⟩ := distinguished_cocone_triangle g exact ⟨_, _, _, inv_rot_of_distTriang _ mem⟩ /-- Any morphism `Z ⟶ X⟦1⟧` is part of a distinguished triangle `X ⟶ Y ⟶ Z ⟶ X⟦1⟧` -/ lemma distinguished_cocone_triangle₂ {Z X : C} (h : Z ⟶ X⟦(1 : ℤ)⟧) : ∃ (Y : C) (f : X ⟶ Y) (g : Y ⟶ Z), Triangle.mk f g h ∈ distTriang C := by obtain ⟨Y', f', g', mem⟩ := distinguished_cocone_triangle h let T' := (Triangle.mk h f' g').invRotate.invRotate refine ⟨T'.obj₂, ((shiftEquiv C (1 : ℤ)).unitIso.app X).hom ≫ T'.mor₁, T'.mor₂, isomorphic_distinguished _ (inv_rot_of_distTriang _ (inv_rot_of_distTriang _ mem)) _ ?_⟩ exact Triangle.isoMk _ _ ((shiftEquiv C (1 : ℤ)).unitIso.app X) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by dsimp; simp only [shift_shiftFunctorCompIsoId_inv_app, id_comp]) /-- A commutative square involving the morphisms `mor₂` of two distinguished triangles can be extended as morphism of triangles -/ lemma complete_distinguished_triangle_morphism₁ (T₁ T₂ : Triangle C) (hT₁ : T₁ ∈ distTriang C) (hT₂ : T₂ ∈ distTriang C) (b : T₁.obj₂ ⟶ T₂.obj₂) (c : T₁.obj₃ ⟶ T₂.obj₃) (comm : T₁.mor₂ ≫ c = b ≫ T₂.mor₂) : ∃ (a : T₁.obj₁ ⟶ T₂.obj₁), T₁.mor₁ ≫ b = a ≫ T₂.mor₁ ∧ T₁.mor₃ ≫ a⟦(1 : ℤ)⟧' = c ≫ T₂.mor₃ := by obtain ⟨a, ⟨ha₁, ha₂⟩⟩ := complete_distinguished_triangle_morphism _ _ (rot_of_distTriang _ hT₁) (rot_of_distTriang _ hT₂) b c comm refine ⟨(shiftFunctor C (1 : ℤ)).preimage a, ⟨?_, ?_⟩⟩ · apply (shiftFunctor C (1 : ℤ)).map_injective dsimp at ha₂ rw [neg_comp, comp_neg, neg_inj] at ha₂ simpa only [Functor.map_comp, Functor.map_preimage] using ha₂ · simpa only [Functor.map_preimage] using ha₁ /-- A commutative square involving the morphisms `mor₃` of two distinguished triangles can be extended as morphism of triangles -/ lemma complete_distinguished_triangle_morphism₂ (T₁ T₂ : Triangle C) (hT₁ : T₁ ∈ distTriang C) (hT₂ : T₂ ∈ distTriang C) (a : T₁.obj₁ ⟶ T₂.obj₁) (c : T₁.obj₃ ⟶ T₂.obj₃) (comm : T₁.mor₃ ≫ a⟦(1 : ℤ)⟧' = c ≫ T₂.mor₃) : ∃ (b : T₁.obj₂ ⟶ T₂.obj₂), T₁.mor₁ ≫ b = a ≫ T₂.mor₁ ∧ T₁.mor₂ ≫ c = b ≫ T₂.mor₂ := by obtain ⟨a, ⟨ha₁, ha₂⟩⟩ := complete_distinguished_triangle_morphism _ _ (inv_rot_of_distTriang _ hT₁) (inv_rot_of_distTriang _ hT₂) (c⟦(-1 : ℤ)⟧') a (by dsimp simp only [neg_comp, comp_neg, ← Functor.map_comp_assoc, ← comm, Functor.map_comp, shift_shift_neg', Functor.id_obj, assoc, Iso.inv_hom_id_app, comp_id]) refine ⟨a, ⟨ha₁, ?_⟩⟩ dsimp only [Triangle.invRotate, Triangle.mk] at ha₂ rw [← cancel_mono ((shiftEquiv C (1 : ℤ)).counitIso.inv.app T₂.obj₃), assoc, assoc, ← ha₂] simp only [shiftEquiv'_counitIso, shift_neg_shift', assoc, Iso.inv_hom_id_app_assoc] /-- Obvious triangles `0 ⟶ X ⟶ X ⟶ 0⟦1⟧` are distinguished -/ lemma contractible_distinguished₁ (X : C) : Triangle.mk (0 : 0 ⟶ X) (𝟙 X) 0 ∈ distTriang C := by refine isomorphic_distinguished _ (inv_rot_of_distTriang _ (contractible_distinguished X)) _ ?_ exact Triangle.isoMk _ _ (Functor.mapZeroObject _).symm (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by aesop_cat) /-- Obvious triangles `X ⟶ 0 ⟶ X⟦1⟧ ⟶ X⟦1⟧` are distinguished -/ lemma contractible_distinguished₂ (X : C) : Triangle.mk (0 : X ⟶ 0) 0 (𝟙 (X⟦1⟧)) ∈ distTriang C := by refine isomorphic_distinguished _ (inv_rot_of_distTriang _ (contractible_distinguished₁ (X⟦(1 : ℤ)⟧))) _ ?_ exact Triangle.isoMk _ _ ((shiftEquiv C (1 : ℤ)).unitIso.app X) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by dsimp; simp only [shift_shiftFunctorCompIsoId_inv_app, id_comp]) namespace Triangle variable (T : Triangle C) (hT : T ∈ distTriang C) lemma yoneda_exact₂ {X : C} (f : T.obj₂ ⟶ X) (hf : T.mor₁ ≫ f = 0) : ∃ (g : T.obj₃ ⟶ X), f = T.mor₂ ≫ g := by obtain ⟨g, ⟨hg₁, _⟩⟩ := complete_distinguished_triangle_morphism T _ hT (contractible_distinguished₁ X) 0 f (by aesop_cat) exact ⟨g, by simpa using hg₁.symm⟩ lemma yoneda_exact₃ {X : C} (f : T.obj₃ ⟶ X) (hf : T.mor₂ ≫ f = 0) : ∃ (g : T.obj₁⟦(1 : ℤ)⟧ ⟶ X), f = T.mor₃ ≫ g := yoneda_exact₂ _ (rot_of_distTriang _ hT) f hf lemma coyoneda_exact₂ {X : C} (f : X ⟶ T.obj₂) (hf : f ≫ T.mor₂ = 0) : ∃ (g : X ⟶ T.obj₁), f = g ≫ T.mor₁ := by obtain ⟨a, ⟨ha₁, _⟩⟩ := complete_distinguished_triangle_morphism₁ _ T (contractible_distinguished X) hT f 0 (by aesop_cat) exact ⟨a, by simpa using ha₁⟩ lemma coyoneda_exact₁ {X : C} (f : X ⟶ T.obj₁⟦(1 : ℤ)⟧) (hf : f ≫ T.mor₁⟦1⟧' = 0) : ∃ (g : X ⟶ T.obj₃), f = g ≫ T.mor₃ := coyoneda_exact₂ _ (rot_of_distTriang _ (rot_of_distTriang _ hT)) f (by aesop_cat) lemma coyoneda_exact₃ {X : C} (f : X ⟶ T.obj₃) (hf : f ≫ T.mor₃ = 0) : ∃ (g : X ⟶ T.obj₂), f = g ≫ T.mor₂ := coyoneda_exact₂ _ (rot_of_distTriang _ hT) f hf lemma mor₃_eq_zero_iff_epi₂ : T.mor₃ = 0 ↔ Epi T.mor₂ := by constructor · intro h rw [epi_iff_cancel_zero] intro X g hg obtain ⟨f, rfl⟩ := yoneda_exact₃ T hT g hg rw [h, zero_comp] · intro rw [← cancel_epi T.mor₂, comp_distTriang_mor_zero₂₃ _ hT, comp_zero] lemma mor₂_eq_zero_iff_epi₁ : T.mor₂ = 0 ↔ Epi T.mor₁ := by have h := mor₃_eq_zero_iff_epi₂ _ (inv_rot_of_distTriang _ hT) dsimp at h rw [← h, IsIso.comp_right_eq_zero] lemma mor₁_eq_zero_iff_epi₃ : T.mor₁ = 0 ↔ Epi T.mor₃ := by have h := mor₃_eq_zero_iff_epi₂ _ (rot_of_distTriang _ hT) dsimp at h rw [← h, neg_eq_zero] constructor · intro h simp only [h, Functor.map_zero] · intro h rw [← (CategoryTheory.shiftFunctor C (1 : ℤ)).map_eq_zero_iff, h] lemma mor₃_eq_zero_of_epi₂ (h : Epi T.mor₂) : T.mor₃ = 0 := (T.mor₃_eq_zero_iff_epi₂ hT).2 h lemma mor₂_eq_zero_of_epi₁ (h : Epi T.mor₁) : T.mor₂ = 0 := (T.mor₂_eq_zero_iff_epi₁ hT).2 h lemma mor₁_eq_zero_of_epi₃ (h : Epi T.mor₃) : T.mor₁ = 0 := (T.mor₁_eq_zero_iff_epi₃ hT).2 h lemma epi₂ (h : T.mor₃ = 0) : Epi T.mor₂ := (T.mor₃_eq_zero_iff_epi₂ hT).1 h lemma epi₁ (h : T.mor₂ = 0) : Epi T.mor₁ := (T.mor₂_eq_zero_iff_epi₁ hT).1 h lemma epi₃ (h : T.mor₁ = 0) : Epi T.mor₃ := (T.mor₁_eq_zero_iff_epi₃ hT).1 h lemma mor₁_eq_zero_iff_mono₂ : T.mor₁ = 0 ↔ Mono T.mor₂ := by constructor · intro h rw [mono_iff_cancel_zero] intro X g hg obtain ⟨f, rfl⟩ := coyoneda_exact₂ T hT g hg rw [h, comp_zero] · intro rw [← cancel_mono T.mor₂, comp_distTriang_mor_zero₁₂ _ hT, zero_comp] lemma mor₂_eq_zero_iff_mono₃ : T.mor₂ = 0 ↔ Mono T.mor₃ := mor₁_eq_zero_iff_mono₂ _ (rot_of_distTriang _ hT) lemma mor₃_eq_zero_iff_mono₁ : T.mor₃ = 0 ↔ Mono T.mor₁ := by have h := mor₁_eq_zero_iff_mono₂ _ (inv_rot_of_distTriang _ hT) dsimp at h rw [← h, neg_eq_zero, IsIso.comp_right_eq_zero] constructor · intro h simp only [h, Functor.map_zero] · intro h rw [← (CategoryTheory.shiftFunctor C (-1 : ℤ)).map_eq_zero_iff, h] lemma mor₁_eq_zero_of_mono₂ (h : Mono T.mor₂) : T.mor₁ = 0 := (T.mor₁_eq_zero_iff_mono₂ hT).2 h lemma mor₂_eq_zero_of_mono₃ (h : Mono T.mor₃) : T.mor₂ = 0 := (T.mor₂_eq_zero_iff_mono₃ hT).2 h lemma mor₃_eq_zero_of_mono₁ (h : Mono T.mor₁) : T.mor₃ = 0 := (T.mor₃_eq_zero_iff_mono₁ hT).2 h lemma mono₂ (h : T.mor₁ = 0) : Mono T.mor₂ := (T.mor₁_eq_zero_iff_mono₂ hT).1 h lemma mono₃ (h : T.mor₂ = 0) : Mono T.mor₃ := (T.mor₂_eq_zero_iff_mono₃ hT).1 h lemma mono₁ (h : T.mor₃ = 0) : Mono T.mor₁ := (T.mor₃_eq_zero_iff_mono₁ hT).1 h lemma isZero₂_iff : IsZero T.obj₂ ↔ (T.mor₁ = 0 ∧ T.mor₂ = 0) := by constructor · intro h exact ⟨h.eq_of_tgt _ _, h.eq_of_src _ _⟩ · intro ⟨h₁, h₂⟩ obtain ⟨f, hf⟩ := coyoneda_exact₂ T hT (𝟙 _) (by rw [h₂, comp_zero]) rw [IsZero.iff_id_eq_zero, hf, h₁, comp_zero] lemma isZero₁_iff : IsZero T.obj₁ ↔ (T.mor₁ = 0 ∧ T.mor₃ = 0) := by refine (isZero₂_iff _ (inv_rot_of_distTriang _ hT)).trans ?_ dsimp simp only [neg_eq_zero, IsIso.comp_right_eq_zero, Functor.map_eq_zero_iff] tauto lemma isZero₃_iff : IsZero T.obj₃ ↔ (T.mor₂ = 0 ∧ T.mor₃ = 0) := by refine (isZero₂_iff _ (rot_of_distTriang _ hT)).trans ?_ dsimp tauto lemma isZero₁_of_isZero₂₃ (h₂ : IsZero T.obj₂) (h₃ : IsZero T.obj₃) : IsZero T.obj₁ := by rw [T.isZero₁_iff hT] exact ⟨h₂.eq_of_tgt _ _, h₃.eq_of_src _ _⟩ lemma isZero₂_of_isZero₁₃ (h₁ : IsZero T.obj₁) (h₃ : IsZero T.obj₃) : IsZero T.obj₂ := by rw [T.isZero₂_iff hT] exact ⟨h₁.eq_of_src _ _, h₃.eq_of_tgt _ _⟩ lemma isZero₃_of_isZero₁₂ (h₁ : IsZero T.obj₁) (h₂ : IsZero T.obj₂) : IsZero T.obj₃ := isZero₂_of_isZero₁₃ _ (rot_of_distTriang _ hT) h₂ (by dsimp simp only [IsZero.iff_id_eq_zero] at h₁ ⊢ rw [← Functor.map_id, h₁, Functor.map_zero]) lemma isZero₁_iff_isIso₂ : IsZero T.obj₁ ↔ IsIso T.mor₂ := by rw [T.isZero₁_iff hT] constructor · intro ⟨h₁, h₃⟩ have := T.epi₂ hT h₃ obtain ⟨f, hf⟩ := yoneda_exact₂ T hT (𝟙 _) (by rw [h₁, zero_comp]) exact ⟨f, hf.symm, by rw [← cancel_epi T.mor₂, comp_id, ← reassoc_of% hf]⟩ · intro rw [T.mor₁_eq_zero_iff_mono₂ hT, T.mor₃_eq_zero_iff_epi₂ hT] constructor <;> infer_instance lemma isZero₂_iff_isIso₃ : IsZero T.obj₂ ↔ IsIso T.mor₃ := isZero₁_iff_isIso₂ _ (rot_of_distTriang _ hT) lemma isZero₃_iff_isIso₁ : IsZero T.obj₃ ↔ IsIso T.mor₁ := by refine Iff.trans ?_ (Triangle.isZero₁_iff_isIso₂ _ (inv_rot_of_distTriang _ hT)) dsimp simp only [IsZero.iff_id_eq_zero, ← Functor.map_id, Functor.map_eq_zero_iff] lemma isZero₁_of_isIso₂ (h : IsIso T.mor₂) : IsZero T.obj₁ := (T.isZero₁_iff_isIso₂ hT).2 h lemma isZero₂_of_isIso₃ (h : IsIso T.mor₃) : IsZero T.obj₂ := (T.isZero₂_iff_isIso₃ hT).2 h lemma isZero₃_of_isIso₁ (h : IsIso T.mor₁) : IsZero T.obj₃ := (T.isZero₃_iff_isIso₁ hT).2 h lemma shift_distinguished (n : ℤ) : (CategoryTheory.shiftFunctor (Triangle C) n).obj T ∈ distTriang C := by revert T hT let H : ℤ → Prop := fun n => ∀ (T : Triangle C) (_ : T ∈ distTriang C), (Triangle.shiftFunctor C n).obj T ∈ distTriang C change H n have H_zero : H 0 := fun T hT => isomorphic_distinguished _ hT _ ((Triangle.shiftFunctorZero C).app T) have H_one : H 1 := fun T hT => isomorphic_distinguished _ (rot_of_distTriang _ (rot_of_distTriang _ (rot_of_distTriang _ hT))) _ ((rotateRotateRotateIso C).symm.app T) have H_neg_one : H (-1) := fun T hT => isomorphic_distinguished _ (inv_rot_of_distTriang _ (inv_rot_of_distTriang _ (inv_rot_of_distTriang _ hT))) _ ((invRotateInvRotateInvRotateIso C).symm.app T) have H_add : ∀ {a b c : ℤ}, H a → H b → a + b = c → H c := fun {a b c} ha hb hc T hT => isomorphic_distinguished _ (hb _ (ha _ hT)) _ ((Triangle.shiftFunctorAdd' C _ _ _ hc).app T) obtain (n|n) := n · induction' n with n hn · exact H_zero · exact H_add hn H_one rfl · induction' n with n hn · exact H_neg_one · exact H_add hn H_neg_one rfl end Triangle instance : SplitEpiCategory C where isSplitEpi_of_epi f hf := by obtain ⟨Z, g, h, hT⟩ := distinguished_cocone_triangle f obtain ⟨r, hr⟩ := Triangle.coyoneda_exact₂ _ hT (𝟙 _) (by rw [Triangle.mor₂_eq_zero_of_epi₁ _ hT hf, comp_zero]) exact ⟨r, hr.symm⟩ instance : SplitMonoCategory C where isSplitMono_of_mono f hf := by obtain ⟨X, g, h, hT⟩ := distinguished_cocone_triangle₁ f obtain ⟨r, hr⟩ := Triangle.yoneda_exact₂ _ hT (𝟙 _) (by rw [Triangle.mor₁_eq_zero_of_mono₂ _ hT hf, zero_comp]) exact ⟨r, hr.symm⟩ lemma isIso₂_of_isIso₁₃ {T T' : Triangle C} (φ : T ⟶ T') (hT : T ∈ distTriang C) (hT' : T' ∈ distTriang C) (h₁ : IsIso φ.hom₁) (h₃ : IsIso φ.hom₃) : IsIso φ.hom₂ := by have : Mono φ.hom₂ := by rw [mono_iff_cancel_zero] intro A f hf obtain ⟨g, rfl⟩ := Triangle.coyoneda_exact₂ _ hT f (by rw [← cancel_mono φ.hom₃, assoc, φ.comm₂, reassoc_of% hf, zero_comp, zero_comp]) rw [assoc] at hf obtain ⟨h, hh⟩ := Triangle.coyoneda_exact₂ T'.invRotate (inv_rot_of_distTriang _ hT') (g ≫ φ.hom₁) (by dsimp; rw [assoc, ← φ.comm₁, hf]) obtain ⟨k, rfl⟩ : ∃ (k : A ⟶ T.invRotate.obj₁), k ≫ T.invRotate.mor₁ = g := by refine ⟨h ≫ inv (φ.hom₃⟦(-1 : ℤ)⟧'), ?_⟩ have eq := ((invRotate C).map φ).comm₁ dsimp only [invRotate] at eq rw [← cancel_mono φ.hom₁, assoc, assoc, eq, IsIso.inv_hom_id_assoc, hh] erw [assoc, comp_distTriang_mor_zero₁₂ _ (inv_rot_of_distTriang _ hT), comp_zero] refine isIso_of_yoneda_map_bijective _ (fun A => ⟨?_, ?_⟩) · intro f₁ f₂ h simpa only [← cancel_mono φ.hom₂] using h · intro y₂ obtain ⟨x₃, hx₃⟩ : ∃ (x₃ : A ⟶ T.obj₃), x₃ ≫ φ.hom₃ = y₂ ≫ T'.mor₂ := ⟨y₂ ≫ T'.mor₂ ≫ inv φ.hom₃, by simp⟩ obtain ⟨x₂, hx₂⟩ := Triangle.coyoneda_exact₃ _ hT x₃ (by rw [← cancel_mono (φ.hom₁⟦(1 : ℤ)⟧'), assoc, zero_comp, φ.comm₃, reassoc_of% hx₃, comp_distTriang_mor_zero₂₃ _ hT', comp_zero]) obtain ⟨y₁, hy₁⟩ := Triangle.coyoneda_exact₂ _ hT' (y₂ - x₂ ≫ φ.hom₂) (by rw [sub_comp, assoc, ← φ.comm₂, ← reassoc_of% hx₂, hx₃, sub_self]) obtain ⟨x₁, hx₁⟩ : ∃ (x₁ : A ⟶ T.obj₁), x₁ ≫ φ.hom₁ = y₁ := ⟨y₁ ≫ inv φ.hom₁, by simp⟩ refine ⟨x₂ + x₁ ≫ T.mor₁, ?_⟩ dsimp rw [add_comp, assoc, φ.comm₁, reassoc_of% hx₁, ← hy₁, add_sub_cancel] lemma isIso₃_of_isIso₁₂ {T T' : Triangle C} (φ : T ⟶ T') (hT : T ∈ distTriang C) (hT' : T' ∈ distTriang C) (h₁ : IsIso φ.hom₁) (h₂ : IsIso φ.hom₂) : IsIso φ.hom₃ := isIso₂_of_isIso₁₃ ((rotate C).map φ) (rot_of_distTriang _ hT) (rot_of_distTriang _ hT') h₂ (by dsimp; infer_instance) lemma isIso₁_of_isIso₂₃ {T T' : Triangle C} (φ : T ⟶ T') (hT : T ∈ distTriang C) (hT' : T' ∈ distTriang C) (h₂ : IsIso φ.hom₂) (h₃ : IsIso φ.hom₃) : IsIso φ.hom₁ := isIso₂_of_isIso₁₃ ((invRotate C).map φ) (inv_rot_of_distTriang _ hT) (inv_rot_of_distTriang _ hT') (by dsimp; infer_instance) (by dsimp; infer_instance) /-- Given a distinguished triangle `T` such that `T.mor₃ = 0` and the datum of morphisms `inr : T.obj₃ ⟶ T.obj₂` and `fst : T.obj₂ ⟶ T.obj₁` satisfying suitable relations, this is the binary biproduct data expressing that `T.obj₂` identifies to the binary biproduct of `T.obj₁` and `T.obj₃`. See also `exists_iso_binaryBiproduct_of_distTriang`. -/ @[simps] def binaryBiproductData (T : Triangle C) (hT : T ∈ distTriang C) (hT₀ : T.mor₃ = 0) (inr : T.obj₃ ⟶ T.obj₂) (inr_snd : inr ≫ T.mor₂ = 𝟙 _) (fst : T.obj₂ ⟶ T.obj₁) (total : fst ≫ T.mor₁ + T.mor₂ ≫ inr = 𝟙 T.obj₂) : BinaryBiproductData T.obj₁ T.obj₃ := by have : Mono T.mor₁ := T.mono₁ hT hT₀ have eq : fst ≫ T.mor₁ = 𝟙 T.obj₂ - T.mor₂ ≫ inr := by rw [← total, add_sub_cancel_right] exact { bicone := { pt := T.obj₂ fst := fst snd := T.mor₂ inl := T.mor₁ inr := inr inl_fst := by simp only [← cancel_mono T.mor₁, assoc, id_comp, eq, comp_sub, comp_id, comp_distTriang_mor_zero₁₂_assoc _ hT, zero_comp, sub_zero] inl_snd := comp_distTriang_mor_zero₁₂ _ hT inr_fst := by simp only [← cancel_mono T.mor₁, assoc, eq, comp_sub, reassoc_of% inr_snd, comp_id, sub_self, zero_comp] inr_snd := inr_snd } isBilimit := isBinaryBilimitOfTotal _ total } instance : HasBinaryBiproducts C := ⟨fun X₁ X₃ => by obtain ⟨X₂, inl, snd, mem⟩ := distinguished_cocone_triangle₂ (0 : X₃ ⟶ X₁⟦(1 : ℤ)⟧) obtain ⟨inr : X₃ ⟶ X₂, inr_snd : 𝟙 _ = inr ≫ snd⟩ := Triangle.coyoneda_exact₃ _ mem (𝟙 X₃) (by simp) obtain ⟨fst : X₂ ⟶ X₁, hfst : 𝟙 X₂ - snd ≫ inr = fst ≫ inl⟩ := Triangle.coyoneda_exact₂ _ mem (𝟙 X₂ - snd ≫ inr) (by dsimp simp only [sub_comp, assoc, id_comp, ← inr_snd, comp_id, sub_self]) refine ⟨⟨binaryBiproductData _ mem rfl inr inr_snd.symm fst ?_⟩⟩ dsimp simp only [← hfst, sub_add_cancel]⟩ instance : HasFiniteProducts C := hasFiniteProducts_of_has_binary_and_terminal instance : HasFiniteCoproducts C := hasFiniteCoproducts_of_has_binary_and_initial instance : HasFiniteBiproducts C := HasFiniteBiproducts.of_hasFiniteProducts lemma exists_iso_binaryBiproduct_of_distTriang (T : Triangle C) (hT : T ∈ distTriang C) (zero : T.mor₃ = 0) : ∃ (e : T.obj₂ ≅ T.obj₁ ⊞ T.obj₃), T.mor₁ ≫ e.hom = biprod.inl ∧ T.mor₂ = e.hom ≫ biprod.snd := by have := T.epi₂ hT zero have := isSplitEpi_of_epi T.mor₂ obtain ⟨fst, hfst⟩ := T.coyoneda_exact₂ hT (𝟙 T.obj₂ - T.mor₂ ≫ section_ T.mor₂) (by simp) let d := binaryBiproductData _ hT zero (section_ T.mor₂) (by simp) fst (by simp only [← hfst, sub_add_cancel]) refine ⟨biprod.uniqueUpToIso _ _ d.isBilimit, ⟨?_, by simp [d]⟩⟩ ext · simpa [d] using d.bicone.inl_fst · simpa [d] using d.bicone.inl_snd lemma binaryBiproductTriangle_distinguished (X₁ X₂ : C) : binaryBiproductTriangle X₁ X₂ ∈ distTriang C := by obtain ⟨Y, g, h, mem⟩ := distinguished_cocone_triangle₂ (0 : X₂ ⟶ X₁⟦(1 : ℤ)⟧) obtain ⟨e, ⟨he₁, he₂⟩⟩ := exists_iso_binaryBiproduct_of_distTriang _ mem rfl dsimp at he₁ he₂ refine isomorphic_distinguished _ mem _ (Iso.symm ?_) refine Triangle.isoMk _ _ (Iso.refl _) e (Iso.refl _) (by aesop_cat) (by aesop_cat) (by aesop_cat) lemma binaryProductTriangle_distinguished (X₁ X₂ : C) : binaryProductTriangle X₁ X₂ ∈ distTriang C := isomorphic_distinguished _ (binaryBiproductTriangle_distinguished X₁ X₂) _ (binaryProductTriangleIsoBinaryBiproductTriangle X₁ X₂) /-- A chosen extension of a commutative square into a morphism of distinguished triangles. -/ @[simps hom₁ hom₂] def completeDistinguishedTriangleMorphism (T₁ T₂ : Triangle C) (hT₁ : T₁ ∈ distTriang C) (hT₂ : T₂ ∈ distTriang C) (a : T₁.obj₁ ⟶ T₂.obj₁) (b : T₁.obj₂ ⟶ T₂.obj₂) (comm : T₁.mor₁ ≫ b = a ≫ T₂.mor₁) : T₁ ⟶ T₂ := have h := complete_distinguished_triangle_morphism _ _ hT₁ hT₂ a b comm { hom₁ := a hom₂ := b hom₃ := h.choose comm₁ := comm comm₂ := h.choose_spec.1 comm₃ := h.choose_spec.2 } /-- A product of distinguished triangles is distinguished -/ lemma productTriangle_distinguished {J : Type*} (T : J → Triangle C) (hT : ∀ j, T j ∈ distTriang C) [HasProduct (fun j => (T j).obj₁)] [HasProduct (fun j => (T j).obj₂)] [HasProduct (fun j => (T j).obj₃)] [HasProduct (fun j => (T j).obj₁⟦(1 : ℤ)⟧)] : productTriangle T ∈ distTriang C := by /- The proof proceeds by constructing a morphism of triangles `φ' : T' ⟶ productTriangle T` with `T'` distinguished, and such that `φ'.hom₁` and `φ'.hom₂` are identities. Then, it suffices to show that `φ'.hom₃` is an isomorphism, which is achieved by using Yoneda's lemma and diagram chases. -/ let f₁ := Pi.map (fun j => (T j).mor₁) obtain ⟨Z, f₂, f₃, hT'⟩ := distinguished_cocone_triangle f₁ let T' := Triangle.mk f₁ f₂ f₃ change T' ∈ distTriang C at hT' let φ : ∀ j, T' ⟶ T j := fun j => completeDistinguishedTriangleMorphism _ _ hT' (hT j) (Pi.π _ j) (Pi.π _ j) (by simp [f₁, T']) let φ' := productTriangle.lift _ φ have h₁ : φ'.hom₁ = 𝟙 _ := by aesop_cat have h₂ : φ'.hom₂ = 𝟙 _ := by aesop_cat have : IsIso φ'.hom₁ := by rw [h₁]; infer_instance have : IsIso φ'.hom₂ := by rw [h₂]; infer_instance suffices IsIso φ'.hom₃ by have : IsIso φ' := by apply Triangle.isIso_of_isIsos all_goals infer_instance exact isomorphic_distinguished _ hT' _ (asIso φ').symm refine isIso_of_yoneda_map_bijective _ (fun A => ⟨?_, ?_⟩) /- the proofs by diagram chase start here -/ · suffices Mono φ'.hom₃ by intro a₁ a₂ ha simpa only [← cancel_mono φ'.hom₃] using ha rw [mono_iff_cancel_zero] intro A f hf have hf' : f ≫ T'.mor₃ = 0 := by rw [← cancel_mono (φ'.hom₁⟦1⟧'), zero_comp, assoc, φ'.comm₃, reassoc_of% hf, zero_comp] obtain ⟨g, hg⟩ := T'.coyoneda_exact₃ hT' f hf' have hg' : ∀ j, (g ≫ Pi.π _ j) ≫ (T j).mor₂ = 0 := fun j => by have : g ≫ T'.mor₂ ≫ φ'.hom₃ ≫ Pi.π _ j = 0 := by rw [← reassoc_of% hg, reassoc_of% hf, zero_comp] rw [φ'.comm₂_assoc, h₂, id_comp] at this simpa using this have hg'' := fun j => (T j).coyoneda_exact₂ (hT j) _ (hg' j) let α := fun j => (hg'' j).choose have hα : ∀ j, _ = α j ≫ _ := fun j => (hg'' j).choose_spec have hg''' : g = Pi.lift α ≫ T'.mor₁ := by dsimp [f₁, T']; ext j; rw [hα]; simp rw [hg, hg''', assoc, comp_distTriang_mor_zero₁₂ _ hT', comp_zero] · intro a obtain ⟨a', ha'⟩ : ∃ (a' : A ⟶ Z), a' ≫ T'.mor₃ = a ≫ (productTriangle T).mor₃ := by have zero : ((productTriangle T).mor₃) ≫ (shiftFunctor C 1).map T'.mor₁ = 0 := by rw [← cancel_mono (φ'.hom₂⟦1⟧'), zero_comp, assoc, ← Functor.map_comp, φ'.comm₁, h₁, id_comp, productTriangle.zero₃₁] intro j exact comp_distTriang_mor_zero₃₁ _ (hT j) have ⟨g, hg⟩ := T'.coyoneda_exact₁ hT' (a ≫ (productTriangle T).mor₃) (by rw [assoc, zero, comp_zero]) exact ⟨g, hg.symm⟩ have ha'' := fun (j : J) => (T j).coyoneda_exact₃ (hT j) ((a - a' ≫ φ'.hom₃) ≫ Pi.π _ j) (by simp only [sub_comp, assoc] erw [← (productTriangle.π T j).comm₃] rw [← φ'.comm₃_assoc] rw [reassoc_of% ha', sub_eq_zero, h₁, Functor.map_id, id_comp]) let b := fun j => (ha'' j).choose have hb : ∀ j, _ = b j ≫ _ := fun j => (ha'' j).choose_spec have hb' : a - a' ≫ φ'.hom₃ = Pi.lift b ≫ (productTriangle T).mor₂ := Limits.Pi.hom_ext _ _ (fun j => by rw [hb]; simp) have : (a' + (by exact Pi.lift b) ≫ T'.mor₂) ≫ φ'.hom₃ = a := by rw [add_comp, assoc, φ'.comm₂, h₂, id_comp, ← hb', add_sub_cancel] exact ⟨_, this⟩ lemma exists_iso_of_arrow_iso (T₁ T₂ : Triangle C) (hT₁ : T₁ ∈ distTriang C) (hT₂ : T₂ ∈ distTriang C) (e : Arrow.mk T₁.mor₁ ≅ Arrow.mk T₂.mor₁) : ∃ (e' : T₁ ≅ T₂), e'.hom.hom₁ = e.hom.left ∧ e'.hom.hom₂ = e.hom.right := by let φ := completeDistinguishedTriangleMorphism T₁ T₂ hT₁ hT₂ e.hom.left e.hom.right e.hom.w.symm have : IsIso φ.hom₁ := by dsimp [φ]; infer_instance have : IsIso φ.hom₂ := by dsimp [φ]; infer_instance have : IsIso φ.hom₃ := isIso₃_of_isIso₁₂ φ hT₁ hT₂ inferInstance inferInstance have : IsIso φ := by apply Triangle.isIso_of_isIsos all_goals infer_instance exact ⟨asIso φ, by simp [φ], by simp [φ]⟩ /-- A choice of isomorphism `T₁ ≅ T₂` between two distinguished triangles when we are given two isomorphisms `e₁ : T₁.obj₁ ≅ T₂.obj₁` and `e₂ : T₁.obj₂ ≅ T₂.obj₂`. -/ @[simps! hom_hom₁ hom_hom₂ inv_hom₁ inv_hom₂] def isoTriangleOfIso₁₂ (T₁ T₂ : Triangle C) (hT₁ : T₁ ∈ distTriang C) (hT₂ : T₂ ∈ distTriang C) (e₁ : T₁.obj₁ ≅ T₂.obj₁) (e₂ : T₁.obj₂ ≅ T₂.obj₂) (comm : T₁.mor₁ ≫ e₂.hom = e₁.hom ≫ T₂.mor₁) : T₁ ≅ T₂ := by have h := exists_iso_of_arrow_iso T₁ T₂ hT₁ hT₂ (Arrow.isoMk e₁ e₂ comm.symm) exact Triangle.isoMk _ _ e₁ e₂ (Triangle.π₃.mapIso h.choose) comm (by have eq := h.choose_spec.2 dsimp at eq ⊢ conv_rhs => rw [← eq, ← TriangleMorphism.comm₂]) (by have eq := h.choose_spec.1 dsimp at eq ⊢ conv_lhs => rw [← eq, TriangleMorphism.comm₃]) end Pretriangulated end CategoryTheory
CategoryTheory\Triangulated\Rotate.lean
/- Copyright (c) 2021 Luke Kershaw. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Luke Kershaw -/ import Mathlib.CategoryTheory.Preadditive.AdditiveFunctor import Mathlib.CategoryTheory.Triangulated.Basic /-! # Rotate This file adds the ability to rotate triangles and triangle morphisms. It also shows that rotation gives an equivalence on the category of triangles. -/ noncomputable section open CategoryTheory open CategoryTheory.Preadditive open CategoryTheory.Limits universe v v₀ v₁ v₂ u u₀ u₁ u₂ namespace CategoryTheory.Pretriangulated open CategoryTheory.Category variable {C : Type u} [Category.{v} C] [Preadditive C] variable [HasShift C ℤ] variable (X : C) /-- If you rotate a triangle, you get another triangle. Given a triangle of the form: ``` f g h X ───> Y ───> Z ───> X⟦1⟧ ``` applying `rotate` gives a triangle of the form: ``` g h -f⟦1⟧' Y ───> Z ───> X⟦1⟧ ───> Y⟦1⟧ ``` -/ @[simps!] def Triangle.rotate (T : Triangle C) : Triangle C := Triangle.mk T.mor₂ T.mor₃ (-T.mor₁⟦1⟧') section /-- Given a triangle of the form: ``` f g h X ───> Y ───> Z ───> X⟦1⟧ ``` applying `invRotate` gives a triangle that can be thought of as: ``` -h⟦-1⟧' f g Z⟦-1⟧ ───> X ───> Y ───> Z ``` (note that this diagram doesn't technically fit the definition of triangle, as `Z⟦-1⟧⟦1⟧` is not necessarily equal to `Z`, but it is isomorphic, by the `counitIso` of `shiftEquiv C 1`) -/ @[simps!] def Triangle.invRotate (T : Triangle C) : Triangle C := Triangle.mk (-T.mor₃⟦(-1 : ℤ)⟧' ≫ (shiftEquiv C (1 : ℤ)).unitIso.inv.app _) (T.mor₁) (T.mor₂ ≫ (shiftEquiv C (1 : ℤ)).counitIso.inv.app _ ) end attribute [local simp] shift_shift_neg' shift_neg_shift' shift_shiftFunctorCompIsoId_add_neg_self_inv_app shift_shiftFunctorCompIsoId_add_neg_self_hom_app variable (C) /-- Rotating triangles gives an endofunctor on the category of triangles in `C`. -/ @[simps] def rotate : Triangle C ⥤ Triangle C where obj := Triangle.rotate map f := { hom₁ := f.hom₂ hom₂ := f.hom₃ hom₃ := f.hom₁⟦1⟧' comm₃ := by dsimp simp only [comp_neg, neg_comp, ← Functor.map_comp, f.comm₁] } /-- The inverse rotation of triangles gives an endofunctor on the category of triangles in `C`. -/ @[simps] def invRotate : Triangle C ⥤ Triangle C where obj := Triangle.invRotate map f := { hom₁ := f.hom₃⟦-1⟧' hom₂ := f.hom₁ hom₃ := f.hom₂ comm₁ := by dsimp simp only [neg_comp, assoc, comp_neg, neg_inj, ← Functor.map_comp_assoc, ← f.comm₃] rw [Functor.map_comp, assoc] erw [← NatTrans.naturality] rfl comm₃ := by erw [← reassoc_of% f.comm₂, Category.assoc, ← NatTrans.naturality] rfl } variable {C} variable [∀ n : ℤ, Functor.Additive (shiftFunctor C n)] /-- The unit isomorphism of the auto-equivalence of categories `triangleRotation C` of `Triangle C` given by the rotation of triangles. -/ @[simps!] def rotCompInvRot : 𝟭 (Triangle C) ≅ rotate C ⋙ invRotate C := NatIso.ofComponents fun T => Triangle.isoMk _ _ ((shiftEquiv C (1 : ℤ)).unitIso.app T.obj₁) (Iso.refl _) (Iso.refl _) /-- The counit isomorphism of the auto-equivalence of categories `triangleRotation C` of `Triangle C` given by the rotation of triangles. -/ @[simps!] def invRotCompRot : invRotate C ⋙ rotate C ≅ 𝟭 (Triangle C) := NatIso.ofComponents fun T => Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) ((shiftEquiv C (1 : ℤ)).counitIso.app T.obj₃) variable (C) /-- Rotating triangles gives an auto-equivalence on the category of triangles in `C`. -/ @[simps] def triangleRotation : Equivalence (Triangle C) (Triangle C) where functor := rotate C inverse := invRotate C unitIso := rotCompInvRot counitIso := invRotCompRot variable {C} instance : (rotate C).IsEquivalence := by change (triangleRotation C).functor.IsEquivalence infer_instance instance : (invRotate C).IsEquivalence := by change (triangleRotation C).inverse.IsEquivalence infer_instance end CategoryTheory.Pretriangulated
CategoryTheory\Triangulated\Subcategory.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.ClosedUnderIsomorphisms import Mathlib.CategoryTheory.Localization.CalculusOfFractions import Mathlib.CategoryTheory.Localization.Triangulated import Mathlib.CategoryTheory.Shift.Localization /-! # Triangulated subcategories In this file, we introduce the notion of triangulated subcategory of a pretriangulated category `C`. If `S : Subcategory W`, we define the class of morphisms `S.W : MorphismProperty C` consisting of morphisms whose "cone" belongs to `S` (up to isomorphisms). We show that `S.W` has both calculus of left and right fractions. ## TODO * obtain (pre)triangulated instances on the localized category with respect to `S.W` * define the type `S.category` as `Fullsubcategory S.set` and show that it is a pretriangulated category. ## Implementation notes In the definition of `Triangulated.Subcategory`, we do not assume that the predicate on objects is closed under isomorphisms (i.e. that the subcategory is "strictly full"). Part of the theory would be more convenient under this stronger assumption (e.g. `Subcategory C` would be a lattice), but some applications require this: for example, the subcategory of bounded below complexes in the homotopy category of an additive category is not closed under isomorphisms. ## References * [Jean-Louis Verdier, *Des catégories dérivées des catégories abéliennes*][verdier1996] -/ namespace CategoryTheory open Category Limits Preadditive ZeroObject namespace Triangulated open Pretriangulated variable (C : Type*) [Category C] [HasZeroObject C] [HasShift C ℤ] [Preadditive C] [∀ (n : ℤ), (shiftFunctor C n).Additive] [Pretriangulated C] /-- A triangulated subcategory of a pretriangulated category `C` consists of a predicate `P : C → Prop` which contains a zero object, is stable by shifts, and such that if `X₁ ⟶ X₂ ⟶ X₃ ⟶ X₁⟦1⟧` is a distinguished triangle such that if `X₁` and `X₃` satisfy `P` then `X₂` is isomorphic to an object satisfying `P`. -/ structure Subcategory where /-- the underlying predicate on objects of a triangulated subcategory -/ P : C → Prop zero' : ∃ (Z : C) (_ : IsZero Z), P Z shift (X : C) (n : ℤ) : P X → P (X⟦n⟧) ext₂' (T : Triangle C) (_ : T ∈ distTriang C) : P T.obj₁ → P T.obj₃ → isoClosure P T.obj₂ namespace Subcategory variable {C} variable (S : Subcategory C) lemma zero [ClosedUnderIsomorphisms S.P] : S.P 0 := by obtain ⟨X, hX, mem⟩ := S.zero' exact mem_of_iso _ hX.isoZero mem /-- The closure under isomorphisms of a triangulated subcategory. -/ def isoClosure : Subcategory C where P := CategoryTheory.isoClosure S.P zero' := by obtain ⟨Z, hZ, hZ'⟩ := S.zero' exact ⟨Z, hZ, Z, hZ', ⟨Iso.refl _⟩⟩ shift X n := by rintro ⟨Y, hY, ⟨e⟩⟩ exact ⟨Y⟦n⟧, S.shift Y n hY, ⟨(shiftFunctor C n).mapIso e⟩⟩ ext₂' := by rintro T hT ⟨X₁, h₁, ⟨e₁⟩⟩ ⟨X₃, h₃, ⟨e₃⟩⟩ exact le_isoClosure _ _ (S.ext₂' (Triangle.mk (e₁.inv ≫ T.mor₁) (T.mor₂ ≫ e₃.hom) (e₃.inv ≫ T.mor₃ ≫ e₁.hom⟦1⟧')) (isomorphic_distinguished _ hT _ (Triangle.isoMk _ _ e₁.symm (Iso.refl _) e₃.symm (by aesop_cat) (by aesop_cat) (by dsimp simp only [assoc, Iso.cancel_iso_inv_left, ← Functor.map_comp, e₁.hom_inv_id, Functor.map_id, comp_id]))) h₁ h₃) instance : ClosedUnderIsomorphisms S.isoClosure.P := by dsimp only [isoClosure] infer_instance section variable (P : C → Prop) (zero : P 0) (shift : ∀ (X : C) (n : ℤ), P X → P (X⟦n⟧)) (ext₂ : ∀ (T : Triangle C) (_ : T ∈ distTriang C), P T.obj₁ → P T.obj₃ → P T.obj₂) /-- An alternative constructor for "strictly full" triangulated subcategory. -/ def mk' : Subcategory C where P := P zero' := ⟨0, isZero_zero _, zero⟩ shift := shift ext₂' T hT h₁ h₃ := le_isoClosure P _ (ext₂ T hT h₁ h₃) instance : ClosedUnderIsomorphisms (mk' P zero shift ext₂).P where of_iso {X Y} e hX := by refine ext₂ (Triangle.mk e.hom (0 : Y ⟶ 0) 0) ?_ hX zero refine isomorphic_distinguished _ (contractible_distinguished X) _ ?_ exact Triangle.isoMk _ _ (Iso.refl _) e.symm (Iso.refl _) end lemma ext₂ [ClosedUnderIsomorphisms S.P] (T : Triangle C) (hT : T ∈ distTriang C) (h₁ : S.P T.obj₁) (h₃ : S.P T.obj₃) : S.P T.obj₂ := by simpa only [isoClosure_eq_self] using S.ext₂' T hT h₁ h₃ /-- Given `S : Triangulated.Subcategory C`, this is the class of morphisms on `C` which consists of morphisms whose cone satisfies `S.P`. -/ def W : MorphismProperty C := fun X Y f => ∃ (Z : C) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1 : ℤ)⟧) (_ : Triangle.mk f g h ∈ distTriang C), S.P Z lemma W_iff {X Y : C} (f : X ⟶ Y) : S.W f ↔ ∃ (Z : C) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1 : ℤ)⟧) (_ : Triangle.mk f g h ∈ distTriang C), S.P Z := by rfl lemma W_iff' {Y Z : C} (g : Y ⟶ Z) : S.W g ↔ ∃ (X : C) (f : X ⟶ Y) (h : Z ⟶ X⟦(1 : ℤ)⟧) (_ : Triangle.mk f g h ∈ distTriang C), S.P X := by rw [S.W_iff] constructor · rintro ⟨Z, g, h, H, mem⟩ exact ⟨_, _, _, inv_rot_of_distTriang _ H, S.shift _ (-1) mem⟩ · rintro ⟨Z, g, h, H, mem⟩ exact ⟨_, _, _, rot_of_distTriang _ H, S.shift _ 1 mem⟩ lemma W.mk {T : Triangle C} (hT : T ∈ distTriang C) (h : S.P T.obj₃) : S.W T.mor₁ := ⟨_, _, _, hT, h⟩ lemma W.mk' {T : Triangle C} (hT : T ∈ distTriang C) (h : S.P T.obj₁) : S.W T.mor₂ := by rw [W_iff'] exact ⟨_, _, _, hT, h⟩ lemma isoClosure_W : S.isoClosure.W = S.W := by ext X Y f constructor · rintro ⟨Z, g, h, mem, ⟨Z', hZ', ⟨e⟩⟩⟩ refine ⟨Z', g ≫ e.hom, e.inv ≫ h, isomorphic_distinguished _ mem _ ?_, hZ'⟩ exact Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) e.symm · rintro ⟨Z, g, h, mem, hZ⟩ exact ⟨Z, g, h, mem, le_isoClosure _ _ hZ⟩ instance respectsIso_W : S.W.RespectsIso where precomp := by rintro X' X Y e f ⟨Z, g, h, mem, mem'⟩ refine ⟨Z, g, h ≫ e.inv⟦(1 : ℤ)⟧', isomorphic_distinguished _ mem _ ?_, mem'⟩ refine Triangle.isoMk _ _ e (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) ?_ dsimp simp only [assoc, ← Functor.map_comp, e.inv_hom_id, Functor.map_id, comp_id, id_comp] postcomp := by rintro X Y Y' e f ⟨Z, g, h, mem, mem'⟩ refine ⟨Z, e.inv ≫ g, h, isomorphic_distinguished _ mem _ ?_, mem'⟩ exact Triangle.isoMk _ _ (Iso.refl _) e.symm (Iso.refl _) instance : S.W.ContainsIdentities := by rw [← isoClosure_W] exact ⟨fun X => ⟨_, _, _, contractible_distinguished X, zero _⟩⟩ lemma W_of_isIso {X Y : C} (f : X ⟶ Y) [IsIso f] : S.W f := by refine (S.W.arrow_mk_iso_iff ?_).1 (MorphismProperty.id_mem _ X) exact Arrow.isoMk (Iso.refl _) (asIso f) lemma smul_mem_W_iff {X Y : C} (f : X ⟶ Y) (n : ℤˣ) : S.W (n • f) ↔ S.W f := S.W.arrow_mk_iso_iff (Arrow.isoMk (n • (Iso.refl _)) (Iso.refl _)) variable {S} lemma W.shift {X₁ X₂ : C} {f : X₁ ⟶ X₂} (hf : S.W f) (n : ℤ) : S.W (f⟦n⟧') := by rw [← smul_mem_W_iff _ _ (n.negOnePow)] obtain ⟨X₃, g, h, hT, mem⟩ := hf exact ⟨_, _, _, Pretriangulated.Triangle.shift_distinguished _ hT n, S.shift _ _ mem⟩ lemma W.unshift {X₁ X₂ : C} {f : X₁ ⟶ X₂} {n : ℤ} (hf : S.W (f⟦n⟧')) : S.W f := (S.W.arrow_mk_iso_iff (Arrow.isoOfNatIso (shiftEquiv C n).unitIso (Arrow.mk f))).2 (hf.shift (-n)) instance : S.W.IsCompatibleWithShift ℤ where condition n := by ext K L f exact ⟨fun hf => hf.unshift, fun hf => hf.shift n⟩ instance [IsTriangulated C] : S.W.IsMultiplicative where comp_mem := by rw [← isoClosure_W] rintro X₁ X₂ X₃ u₁₂ u₂₃ ⟨Z₁₂, v₁₂, w₁₂, H₁₂, mem₁₂⟩ ⟨Z₂₃, v₂₃, w₂₃, H₂₃, mem₂₃⟩ obtain ⟨Z₁₃, v₁₃, w₁₂, H₁₃⟩ := distinguished_cocone_triangle (u₁₂ ≫ u₂₃) exact ⟨_, _, _, H₁₃, S.isoClosure.ext₂ _ (someOctahedron rfl H₁₂ H₂₃ H₁₃).mem mem₁₂ mem₂₃⟩ variable (S) lemma mem_W_iff_of_distinguished [ClosedUnderIsomorphisms S.P] (T : Triangle C) (hT : T ∈ distTriang C) : S.W T.mor₁ ↔ S.P T.obj₃ := by constructor · rintro ⟨Z, g, h, hT', mem⟩ obtain ⟨e, _⟩ := exists_iso_of_arrow_iso _ _ hT' hT (Iso.refl _) exact mem_of_iso S.P (Triangle.π₃.mapIso e) mem · intro h exact ⟨_, _, _, hT, h⟩ instance [IsTriangulated C] : S.W.HasLeftCalculusOfFractions where exists_leftFraction X Y φ := by obtain ⟨Z, f, g, H, mem⟩ := φ.hs obtain ⟨Y', s', f', mem'⟩ := distinguished_cocone_triangle₂ (g ≫ φ.f⟦1⟧') obtain ⟨b, ⟨hb₁, _⟩⟩ := complete_distinguished_triangle_morphism₂ _ _ H mem' φ.f (𝟙 Z) (by simp) exact ⟨MorphismProperty.LeftFraction.mk b s' ⟨_, _, _, mem', mem⟩, hb₁.symm⟩ ext := by rintro X' X Y f₁ f₂ s ⟨Z, g, h, H, mem⟩ hf₁ have hf₂ : s ≫ (f₁ - f₂) = 0 := by rw [comp_sub, hf₁, sub_self] obtain ⟨q, hq⟩ := Triangle.yoneda_exact₂ _ H _ hf₂ obtain ⟨Y', r, t, mem'⟩ := distinguished_cocone_triangle q refine ⟨Y', r, ?_, ?_⟩ · exact ⟨_, _, _, rot_of_distTriang _ mem', S.shift _ _ mem⟩ · have eq := comp_distTriang_mor_zero₁₂ _ mem' dsimp at eq rw [← sub_eq_zero, ← sub_comp, hq, assoc, eq, comp_zero] instance [IsTriangulated C] : S.W.HasRightCalculusOfFractions where exists_rightFraction X Y φ := by obtain ⟨Z, f, g, H, mem⟩ := φ.hs obtain ⟨X', f', h', mem'⟩ := distinguished_cocone_triangle₁ (φ.f ≫ f) obtain ⟨a, ⟨ha₁, _⟩⟩ := complete_distinguished_triangle_morphism₁ _ _ mem' H φ.f (𝟙 Z) (by simp) exact ⟨MorphismProperty.RightFraction.mk f' ⟨_, _, _, mem', mem⟩ a, ha₁⟩ ext Y Z Z' f₁ f₂ s hs hf₁ := by rw [S.W_iff'] at hs obtain ⟨Z, g, h, H, mem⟩ := hs have hf₂ : (f₁ - f₂) ≫ s = 0 := by rw [sub_comp, hf₁, sub_self] obtain ⟨q, hq⟩ := Triangle.coyoneda_exact₂ _ H _ hf₂ obtain ⟨Y', r, t, mem'⟩ := distinguished_cocone_triangle₁ q refine ⟨Y', r, ?_, ?_⟩ · exact ⟨_, _, _, mem', mem⟩ · have eq := comp_distTriang_mor_zero₁₂ _ mem' dsimp at eq rw [← sub_eq_zero, ← comp_sub, hq, reassoc_of% eq, zero_comp] instance [IsTriangulated C] : S.W.IsCompatibleWithTriangulation := ⟨by rintro T₁ T₃ mem₁ mem₃ a b ⟨Z₅, g₅, h₅, mem₅, mem₅'⟩ ⟨Z₄, g₄, h₄, mem₄, mem₄'⟩ comm obtain ⟨Z₂, g₂, h₂, mem₂⟩ := distinguished_cocone_triangle (T₁.mor₁ ≫ b) have H := someOctahedron rfl mem₁ mem₄ mem₂ have H' := someOctahedron comm.symm mem₅ mem₃ mem₂ let φ : T₁ ⟶ T₃ := H.triangleMorphism₁ ≫ H'.triangleMorphism₂ exact ⟨φ.hom₃, S.W.comp_mem _ _ (W.mk S H.mem mem₄') (W.mk' S H'.mem mem₅'), by simpa [φ] using φ.comm₂, by simpa [φ] using φ.comm₃⟩⟩ section variable (T : Triangle C) (hT : T ∈ distTriang C) lemma ext₁ [ClosedUnderIsomorphisms S.P] (h₂ : S.P T.obj₂) (h₃ : S.P T.obj₃) : S.P T.obj₁ := S.ext₂ _ (inv_rot_of_distTriang _ hT) (S.shift _ _ h₃) h₂ lemma ext₃ [ClosedUnderIsomorphisms S.P] (h₁ : S.P T.obj₁) (h₂ : S.P T.obj₂) : S.P T.obj₃ := S.ext₂ _ (rot_of_distTriang _ hT) h₂ (S.shift _ _ h₁) lemma ext₁' (h₂ : S.P T.obj₂) (h₃ : S.P T.obj₃) : CategoryTheory.isoClosure S.P T.obj₁ := S.ext₂' _ (inv_rot_of_distTriang _ hT) (S.shift _ _ h₃) h₂ lemma ext₃' (h₁ : S.P T.obj₁) (h₂ : S.P T.obj₂) : CategoryTheory.isoClosure S.P T.obj₃ := S.ext₂' _ (rot_of_distTriang _ hT) h₂ (S.shift _ _ h₁) end end Subcategory end Triangulated end CategoryTheory
CategoryTheory\Triangulated\TriangleShift.lean
/- Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Linear.LinearFunctor import Mathlib.CategoryTheory.Triangulated.Rotate import Mathlib.Algebra.Ring.NegOnePow /-! # The shift on the category of triangles In this file, it is shown that if `C` is a preadditive category with a shift by `ℤ`, then the category of triangles `Triangle C` is also endowed with a shift. We also show that rotating triangles three times identifies with the shift by `1`. The shift on the category of triangles was also obtained by Adam Topaz, Johan Commelin and Andrew Yang during the Liquid Tensor Experiment. -/ universe v u namespace CategoryTheory open Category Preadditive variable (C : Type u) [Category.{v} C] [Preadditive C] [HasShift C ℤ] [∀ (n : ℤ), (CategoryTheory.shiftFunctor C n).Additive] namespace Pretriangulated attribute [local simp] Triangle.eqToHom_hom₁ Triangle.eqToHom_hom₂ Triangle.eqToHom_hom₃ shiftFunctorAdd_zero_add_hom_app shiftFunctorAdd_add_zero_hom_app shiftFunctorAdd'_eq_shiftFunctorAdd shift_shiftFunctorCompIsoId_inv_app /-- The shift functor `Triangle C ⥤ Triangle C` by `n : ℤ` sends a triangle to the triangle obtained by shifting the objects by `n` in `C` and by multiplying the three morphisms by `(-1)^n`. -/ @[simps] noncomputable def Triangle.shiftFunctor (n : ℤ) : Triangle C ⥤ Triangle C where obj T := Triangle.mk (n.negOnePow • T.mor₁⟦n⟧') (n.negOnePow • T.mor₂⟦n⟧') (n.negOnePow • T.mor₃⟦n⟧' ≫ (shiftFunctorComm C 1 n).hom.app T.obj₁) map f := { hom₁ := f.hom₁⟦n⟧' hom₂ := f.hom₂⟦n⟧' hom₃ := f.hom₃⟦n⟧' comm₁ := by dsimp simp only [Linear.units_smul_comp, Linear.comp_units_smul, ← Functor.map_comp, f.comm₁] comm₂ := by dsimp simp only [Linear.units_smul_comp, Linear.comp_units_smul, ← Functor.map_comp, f.comm₂] comm₃ := by dsimp rw [Linear.units_smul_comp, Linear.comp_units_smul, ← Functor.map_comp_assoc, ← f.comm₃, Functor.map_comp, assoc, assoc] erw [(shiftFunctorComm C 1 n).hom.naturality] rfl } /-- The canonical isomorphism `Triangle.shiftFunctor C 0 ≅ 𝟭 (Triangle C)`. -/ @[simps!] noncomputable def Triangle.shiftFunctorZero : Triangle.shiftFunctor C 0 ≅ 𝟭 _ := NatIso.ofComponents (fun T => Triangle.isoMk _ _ ((CategoryTheory.shiftFunctorZero C ℤ).app _) ((CategoryTheory.shiftFunctorZero C ℤ).app _) ((CategoryTheory.shiftFunctorZero C ℤ).app _) (by aesop_cat) (by aesop_cat) (by dsimp simp only [one_smul, assoc, shiftFunctorComm_zero_hom_app, ← Functor.map_comp, Iso.inv_hom_id_app, Functor.id_obj, Functor.map_id, comp_id, NatTrans.naturality, Functor.id_map])) (by aesop_cat) /-- The canonical isomorphism `Triangle.shiftFunctor C n ≅ Triangle.shiftFunctor C a ⋙ Triangle.shiftFunctor C b` when `a + b = n`. -/ @[simps!] noncomputable def Triangle.shiftFunctorAdd' (a b n : ℤ) (h : a + b = n) : Triangle.shiftFunctor C n ≅ Triangle.shiftFunctor C a ⋙ Triangle.shiftFunctor C b := NatIso.ofComponents (fun T => Triangle.isoMk _ _ ((CategoryTheory.shiftFunctorAdd' C a b n h).app _) ((CategoryTheory.shiftFunctorAdd' C a b n h).app _) ((CategoryTheory.shiftFunctorAdd' C a b n h).app _) (by subst h dsimp rw [Linear.units_smul_comp, NatTrans.naturality, Linear.comp_units_smul, Functor.comp_map, Functor.map_units_smul, Linear.comp_units_smul, smul_smul, Int.negOnePow_add, mul_comm]) (by subst h dsimp rw [Linear.units_smul_comp, NatTrans.naturality, Linear.comp_units_smul, Functor.comp_map, Functor.map_units_smul, Linear.comp_units_smul, smul_smul, Int.negOnePow_add, mul_comm]) (by subst h dsimp rw [Linear.units_smul_comp, Linear.comp_units_smul, Functor.map_units_smul, Linear.units_smul_comp, Linear.comp_units_smul, smul_smul, assoc, Functor.map_comp, assoc] erw [← NatTrans.naturality_assoc] simp only [shiftFunctorAdd'_eq_shiftFunctorAdd, Int.negOnePow_add, shiftFunctorComm_hom_app_comp_shift_shiftFunctorAdd_hom_app, add_comm a])) (by aesop_cat) /-- Rotating triangles three times identifies with the shift by `1`. -/ noncomputable def rotateRotateRotateIso : rotate C ⋙ rotate C ⋙ rotate C ≅ Triangle.shiftFunctor C 1 := NatIso.ofComponents (fun T => Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by aesop_cat)) (by aesop_cat) /-- Rotating triangles three times backwards identifies with the shift by `-1`. -/ noncomputable def invRotateInvRotateInvRotateIso : invRotate C ⋙ invRotate C ⋙ invRotate C ≅ Triangle.shiftFunctor C (-1) := NatIso.ofComponents (fun T => Triangle.isoMk _ _ (Iso.refl _) (Iso.refl _) (Iso.refl _) (by aesop_cat) (by aesop_cat) (by dsimp [shiftFunctorCompIsoId] simp [shiftFunctorComm_eq C _ _ _ (add_neg_self (1 : ℤ))])) (by aesop_cat) /-- The inverse of the rotation of triangles can be expressed using a double rotation and the shift by `-1`. -/ noncomputable def invRotateIsoRotateRotateShiftFunctorNegOne : invRotate C ≅ rotate C ⋙ rotate C ⋙ Triangle.shiftFunctor C (-1) := calc invRotate C ≅ invRotate C ⋙ 𝟭 _ := (Functor.rightUnitor _).symm _ ≅ invRotate C ⋙ Triangle.shiftFunctor C 0 := isoWhiskerLeft _ (Triangle.shiftFunctorZero C).symm _ ≅ invRotate C ⋙ Triangle.shiftFunctor C 1 ⋙ Triangle.shiftFunctor C (-1) := isoWhiskerLeft _ (Triangle.shiftFunctorAdd' C 1 (-1) 0 (add_neg_self 1)) _ ≅ invRotate C ⋙ (rotate C ⋙ rotate C ⋙ rotate C) ⋙ Triangle.shiftFunctor C (-1) := isoWhiskerLeft _ (isoWhiskerRight (rotateRotateRotateIso C).symm _) _ ≅ (invRotate C ⋙ rotate C) ⋙ rotate C ⋙ rotate C ⋙ Triangle.shiftFunctor C (-1) := isoWhiskerLeft _ (Functor.associator _ _ _ ≪≫ isoWhiskerLeft _ (Functor.associator _ _ _)) ≪≫ (Functor.associator _ _ _).symm _ ≅ 𝟭 _ ⋙ rotate C ⋙ rotate C ⋙ Triangle.shiftFunctor C (-1) := isoWhiskerRight (triangleRotation C).counitIso _ _ ≅ _ := Functor.leftUnitor _ namespace Triangle noncomputable instance : HasShift (Triangle C) ℤ := hasShiftMk (Triangle C) ℤ { F := Triangle.shiftFunctor C zero := Triangle.shiftFunctorZero C add := fun a b => Triangle.shiftFunctorAdd' C a b _ rfl assoc_hom_app := fun a b c T => by ext all_goals dsimp rw [← shiftFunctorAdd'_assoc_hom_app a b c _ _ _ rfl rfl (add_assoc a b c)] dsimp only [CategoryTheory.shiftFunctorAdd'] simp } @[simp] lemma shiftFunctor_eq (n : ℤ) : CategoryTheory.shiftFunctor (Triangle C) n = Triangle.shiftFunctor C n := rfl @[simp] lemma shiftFunctorZero_eq : CategoryTheory.shiftFunctorZero (Triangle C) ℤ = Triangle.shiftFunctorZero C := ShiftMkCore.shiftFunctorZero_eq _ @[simp] lemma shiftFunctorAdd_eq (a b : ℤ) : CategoryTheory.shiftFunctorAdd (Triangle C) a b = Triangle.shiftFunctorAdd' C a b _ rfl := ShiftMkCore.shiftFunctorAdd_eq _ _ _ @[simp] lemma shiftFunctorAdd'_eq (a b c : ℤ) (h : a + b = c) : CategoryTheory.shiftFunctorAdd' (Triangle C) a b c h = Triangle.shiftFunctorAdd' C a b c h := by subst h rw [shiftFunctorAdd'_eq_shiftFunctorAdd] apply shiftFunctorAdd_eq end Triangle end Pretriangulated end CategoryTheory
CategoryTheory\Triangulated\Triangulated.lean
/- Copyright (c) 2022 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Triangulated.Pretriangulated /-! # Triangulated Categories This file contains the definition of triangulated categories, which are pretriangulated categories which satisfy the octahedron axiom. -/ noncomputable section namespace CategoryTheory open Limits Category Preadditive Pretriangulated open ZeroObject variable (C : Type*) [Category C] [Preadditive C] [HasZeroObject C] [HasShift C ℤ] [∀ n : ℤ, Functor.Additive (shiftFunctor C n)] [Pretriangulated C] namespace Triangulated variable {C} -- Porting note: see https://github.com/leanprover/lean4/issues/2188 set_option genInjectivity false in /-- An octahedron is a type of datum whose existence is asserted by the octahedron axiom (TR 4), see https://stacks.math.columbia.edu/tag/05QK -/ structure Octahedron {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} (comm : u₁₂ ≫ u₂₃ = u₁₃) {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C) {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} (h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C) {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C) where m₁ : Z₁₂ ⟶ Z₁₃ m₃ : Z₁₃ ⟶ Z₂₃ comm₁ : v₁₂ ≫ m₁ = u₂₃ ≫ v₁₃ comm₂ : m₁ ≫ w₁₃ = w₁₂ comm₃ : v₁₃ ≫ m₃ = v₂₃ comm₄ : w₁₃ ≫ u₁₂⟦1⟧' = m₃ ≫ w₂₃ mem : Triangle.mk m₁ m₃ (w₂₃ ≫ v₁₂⟦1⟧') ∈ distTriang C gen_injective_theorems% Octahedron instance (X : C) : Nonempty (Octahedron (comp_id (𝟙 X)) (contractible_distinguished X) (contractible_distinguished X) (contractible_distinguished X)) := by refine ⟨⟨0, 0, ?_, ?_, ?_, ?_, isomorphic_distinguished _ (contractible_distinguished (0 : C)) _ (Triangle.isoMk _ _ (by rfl) (by rfl) (by rfl))⟩⟩ all_goals apply Subsingleton.elim namespace Octahedron attribute [reassoc] comm₁ comm₂ comm₃ comm₄ variable {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} {comm : u₁₂ ≫ u₂₃ = u₁₃} {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C} {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} {h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C} {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C} (h : Octahedron comm h₁₂ h₂₃ h₁₃) /-- The triangle `Z₁₂ ⟶ Z₁₃ ⟶ Z₂₃ ⟶ Z₁₂⟦1⟧` given by an octahedron. -/ @[simps!] def triangle : Triangle C := Triangle.mk h.m₁ h.m₃ (w₂₃ ≫ v₁₂⟦1⟧') /-- The first morphism of triangles given by an octahedron. -/ @[simps] def triangleMorphism₁ : Triangle.mk u₁₂ v₁₂ w₁₂ ⟶ Triangle.mk u₁₃ v₁₃ w₁₃ where hom₁ := 𝟙 X₁ hom₂ := u₂₃ hom₃ := h.m₁ comm₁ := by dsimp rw [id_comp, comm] comm₂ := h.comm₁ comm₃ := by dsimp simpa only [Functor.map_id, comp_id] using h.comm₂.symm /-- The second morphism of triangles given an octahedron. -/ @[simps] def triangleMorphism₂ : Triangle.mk u₁₃ v₁₃ w₁₃ ⟶ Triangle.mk u₂₃ v₂₃ w₂₃ where hom₁ := u₁₂ hom₂ := 𝟙 X₃ hom₃ := h.m₃ comm₁ := by dsimp rw [comp_id, comm] comm₂ := by dsimp rw [id_comp, h.comm₃] comm₃ := h.comm₄ variable (u₁₂ u₁₃ u₂₃ comm h₁₂ h₁₃ h₂₃) /-- When two diagrams are isomorphic, an octahedron for one gives an octahedron for the other. -/ def ofIso {X₁' X₂' X₃' Z₁₂' Z₂₃' Z₁₃' : C} (u₁₂' : X₁' ⟶ X₂') (u₂₃' : X₂' ⟶ X₃') (u₁₃' : X₁' ⟶ X₃') (comm' : u₁₂' ≫ u₂₃' = u₁₃') (e₁ : X₁ ≅ X₁') (e₂ : X₂ ≅ X₂') (e₃ : X₃ ≅ X₃') (comm₁₂ : u₁₂ ≫ e₂.hom = e₁.hom ≫ u₁₂') (comm₂₃ : u₂₃ ≫ e₃.hom = e₂.hom ≫ u₂₃') (v₁₂' : X₂' ⟶ Z₁₂') (w₁₂' : Z₁₂' ⟶ X₁'⟦(1 : ℤ)⟧) (h₁₂' : Triangle.mk u₁₂' v₁₂' w₁₂' ∈ distTriang C) (v₂₃' : X₃' ⟶ Z₂₃') (w₂₃' : Z₂₃' ⟶ X₂'⟦(1 : ℤ)⟧) (h₂₃' : Triangle.mk u₂₃' v₂₃' w₂₃' ∈ distTriang C) (v₁₃' : X₃' ⟶ Z₁₃') (w₁₃' : Z₁₃' ⟶ X₁'⟦(1 : ℤ)⟧) (h₁₃' : Triangle.mk (u₁₃') v₁₃' w₁₃' ∈ distTriang C) (H : Octahedron comm' h₁₂' h₂₃' h₁₃') : Octahedron comm h₁₂ h₂₃ h₁₃ := by let iso₁₂ := isoTriangleOfIso₁₂ _ _ h₁₂ h₁₂' e₁ e₂ comm₁₂ let iso₂₃ := isoTriangleOfIso₁₂ _ _ h₂₃ h₂₃' e₂ e₃ comm₂₃ let iso₁₃ := isoTriangleOfIso₁₂ _ _ h₁₃ h₁₃' e₁ e₃ (by dsimp; rw [← comm, assoc, ← comm', ← reassoc_of% comm₁₂, comm₂₃]) have eq₁₂ := iso₁₂.hom.comm₂ have eq₁₂' := iso₁₂.hom.comm₃ have eq₁₃ := iso₁₃.hom.comm₂ have eq₁₃' := iso₁₃.hom.comm₃ have eq₂₃ := iso₂₃.hom.comm₂ have eq₂₃' := iso₂₃.hom.comm₃ have rel₁₂ := H.triangleMorphism₁.comm₂ have rel₁₃ := H.triangleMorphism₁.comm₃ have rel₂₂ := H.triangleMorphism₂.comm₂ have rel₂₃ := H.triangleMorphism₂.comm₃ dsimp [iso₁₂, iso₂₃, iso₁₃] at eq₁₂ eq₁₂' eq₁₃ eq₁₃' eq₂₃ eq₂₃' rel₁₂ rel₁₃ rel₂₂ rel₂₃ rw [Functor.map_id, comp_id] at rel₁₃ rw [id_comp] at rel₂₂ refine ⟨iso₁₂.hom.hom₃ ≫ H.m₁ ≫ iso₁₃.inv.hom₃, iso₁₃.hom.hom₃ ≫ H.m₃ ≫ iso₂₃.inv.hom₃, ?_, ?_, ?_, ?_, ?_⟩ · rw [reassoc_of% eq₁₂, ← cancel_mono iso₁₃.hom.hom₃, assoc, assoc, assoc, assoc, iso₁₃.inv_hom_id_triangle_hom₃, eq₁₃, reassoc_of% comm₂₃, ← rel₁₂] dsimp rw [comp_id] · rw [← cancel_mono (e₁.hom⟦(1 : ℤ)⟧'), eq₁₂', assoc, assoc, assoc, eq₁₃', iso₁₃.inv_hom_id_triangle_hom₃_assoc, ← rel₁₃] · rw [reassoc_of% eq₁₃, reassoc_of% rel₂₂, ← cancel_mono iso₂₃.hom.hom₃, assoc, assoc, iso₂₃.inv_hom_id_triangle_hom₃, eq₂₃] dsimp rw [comp_id] · rw [← cancel_mono (e₂.hom⟦(1 : ℤ)⟧'), assoc, assoc, assoc,assoc, eq₂₃', iso₂₃.inv_hom_id_triangle_hom₃_assoc, ← rel₂₃, ← Functor.map_comp, comm₁₂, Functor.map_comp, reassoc_of% eq₁₃'] · refine isomorphic_distinguished _ H.mem _ ?_ refine Triangle.isoMk _ _ (Triangle.π₃.mapIso iso₁₂) (Triangle.π₃.mapIso iso₁₃) (Triangle.π₃.mapIso iso₂₃) (by simp) (by simp) ?_ dsimp rw [assoc, ← Functor.map_comp, eq₁₂, Functor.map_comp, reassoc_of% eq₂₃'] end Octahedron end Triangulated open Triangulated /-- A triangulated category is a pretriangulated category which satisfies the octahedron axiom (TR 4), see https://stacks.math.columbia.edu/tag/05QK -/ class IsTriangulated : Prop where /-- the octahedron axiom (TR 4) -/ octahedron_axiom : ∀ {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} (comm : u₁₂ ≫ u₂₃ = u₁₃) {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C) {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} (h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C) {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C), Nonempty (Octahedron comm h₁₂ h₂₃ h₁₃) namespace Triangulated variable {C} variable {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} (comm : u₁₂ ≫ u₂₃ = u₁₃) {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C} {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} {h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C} {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} {h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C} (h : Octahedron comm h₁₂ h₂₃ h₁₃) /-- A choice of octahedron given by the octahedron axiom. -/ def someOctahedron' [IsTriangulated C] : Octahedron comm h₁₂ h₂₃ h₁₃ := (IsTriangulated.octahedron_axiom comm h₁₂ h₂₃ h₁₃).some /-- A choice of octahedron given by the octahedron axiom. -/ def someOctahedron [IsTriangulated C] {X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C} {u₁₂ : X₁ ⟶ X₂} {u₂₃ : X₂ ⟶ X₃} {u₁₃ : X₁ ⟶ X₃} (comm : u₁₂ ≫ u₂₃ = u₁₃) {v₁₂ : X₂ ⟶ Z₁₂} {w₁₂ : Z₁₂ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C) {v₂₃ : X₃ ⟶ Z₂₃} {w₂₃ : Z₂₃ ⟶ X₂⟦(1 : ℤ)⟧} (h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C) {v₁₃ : X₃ ⟶ Z₁₃} {w₁₃ : Z₁₃ ⟶ X₁⟦(1 : ℤ)⟧} (h₁₃ : Triangle.mk u₁₃ v₁₃ w₁₃ ∈ distTriang C) : Octahedron comm h₁₂ h₂₃ h₁₃ := someOctahedron' _ end Triangulated variable {C} /-- Constructor for `IsTriangulated C` which shows that it suffices to obtain an octahedron for a suitable isomorphic diagram instead of the given diagram. -/ lemma IsTriangulated.mk' (h : ∀ ⦃X₁' X₂' X₃' : C⦄ (u₁₂' : X₁' ⟶ X₂') (u₂₃' : X₂' ⟶ X₃'), ∃ (X₁ X₂ X₃ Z₁₂ Z₂₃ Z₁₃ : C) (u₁₂ : X₁ ⟶ X₂) (u₂₃ : X₂ ⟶ X₃) (e₁ : X₁' ≅ X₁) (e₂ : X₂' ≅ X₂) (e₃ : X₃' ≅ X₃) (_ : u₁₂' ≫ e₂.hom = e₁.hom ≫ u₁₂) (_ : u₂₃' ≫ e₃.hom = e₂.hom ≫ u₂₃) (v₁₂ : X₂ ⟶ Z₁₂) (w₁₂ : Z₁₂ ⟶ X₁⟦1⟧) (h₁₂ : Triangle.mk u₁₂ v₁₂ w₁₂ ∈ distTriang C) (v₂₃ : X₃ ⟶ Z₂₃) (w₂₃ : Z₂₃ ⟶ X₂⟦1⟧) (h₂₃ : Triangle.mk u₂₃ v₂₃ w₂₃ ∈ distTriang C) (v₁₃ : X₃ ⟶ Z₁₃) (w₁₃ : Z₁₃ ⟶ X₁⟦1⟧) (h₁₃ : Triangle.mk (u₁₂ ≫ u₂₃) v₁₃ w₁₃ ∈ distTriang C), Nonempty (Octahedron rfl h₁₂ h₂₃ h₁₃)) : IsTriangulated C where octahedron_axiom {X₁' X₂' X₃' Z₁₂' Z₂₃' Z₁₃' u₁₂' u₂₃' u₁₃'} comm' {v₁₂' w₁₂'} h₁₂' {v₂₃' w₂₃'} h₂₃' {v₁₃' w₁₃'} h₁₃' := by obtain ⟨X₁, X₂, X₃, Z₁₂, Z₂₃, Z₁₃, u₁₂, u₂₃, e₁, e₂, e₃, comm₁₂, comm₂₃, v₁₂, w₁₂, h₁₂, v₂₃, w₂₃, h₂₃, v₁₃, w₁₃, h₁₃, H⟩ := h u₁₂' u₂₃' exact ⟨Octahedron.ofIso u₁₂' u₂₃' u₁₃' comm' h₁₂' h₂₃' h₁₃' u₁₂ u₂₃ _ rfl e₁ e₂ e₃ comm₁₂ comm₂₃ v₁₂ w₁₂ h₁₂ v₂₃ w₂₃ h₂₃ v₁₃ w₁₃ h₁₃ H.some⟩ end CategoryTheory
CategoryTheory\Triangulated\Yoneda.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.Algebra.Homology.ShortComplex.Ab import Mathlib.CategoryTheory.Preadditive.Yoneda.Basic import Mathlib.CategoryTheory.Triangulated.HomologicalFunctor import Mathlib.CategoryTheory.Triangulated.Opposite /-! # The Yoneda functors are homological Let `C` be a pretriangulated category. In this file, we show that the functors `preadditiveCoyoneda.obj A : C ⥤ AddCommGrp` for `A : Cᵒᵖ` and `preadditiveYoneda.obj B : Cᵒᵖ ⥤ AddCommGrp` for `B : C` are homological functors. -/ namespace CategoryTheory open Limits Pretriangulated.Opposite namespace Pretriangulated variable {C : Type*} [Category C] [Preadditive C] [HasZeroObject C] [HasShift C ℤ] [∀ (n : ℤ), (shiftFunctor C n).Additive] [Pretriangulated C] instance (A : Cᵒᵖ) : (preadditiveCoyoneda.obj A).IsHomological where exact T hT := by rw [ShortComplex.ab_exact_iff] intro (x₂ : A.unop ⟶ T.obj₂) (hx₂ : x₂ ≫ T.mor₂ = 0) obtain ⟨x₁, hx₁⟩ := T.coyoneda_exact₂ hT x₂ hx₂ exact ⟨x₁, hx₁.symm⟩ instance (B : C) : (preadditiveYoneda.obj B).IsHomological where exact T hT := by rw [ShortComplex.ab_exact_iff] intro (x₂ : T.obj₂.unop ⟶ B) (hx₂ : T.mor₂.unop ≫ x₂ = 0) obtain ⟨x₃, hx₃⟩ := Triangle.yoneda_exact₂ _ (unop_distinguished T hT) x₂ hx₂ exact ⟨x₃, hx₃.symm⟩ lemma preadditiveYoneda_map_distinguished (T : Triangle C) (hT : T ∈ distTriang C) (B : C) : ((shortComplexOfDistTriangle T hT).op.map (preadditiveYoneda.obj B)).Exact := (preadditiveYoneda.obj B).map_distinguished_op_exact T hT noncomputable instance (A : Cᵒᵖ) : (preadditiveCoyoneda.obj A).ShiftSequence ℤ := Functor.ShiftSequence.tautological _ _ lemma preadditiveCoyoneda_homologySequenceδ_apply {C : Type*} [Category C] [Preadditive C] [HasShift C ℤ] (A : Cᵒᵖ) (T : Triangle C) (n₀ n₁ : ℤ) (h : n₀ + 1 = n₁) (x : A.unop ⟶ T.obj₃⟦n₀⟧) : (preadditiveCoyoneda.obj A).homologySequenceδ T n₀ n₁ h x = x ≫ T.mor₃⟦n₀⟧' ≫ (shiftFunctorAdd' C 1 n₀ n₁ (by omega)).inv.app _ := by apply Category.assoc end Pretriangulated end CategoryTheory
CategoryTheory\Triangulated\TStructure\Basic.lean
/- Copyright (c) 2024 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import Mathlib.CategoryTheory.Shift.Predicate import Mathlib.CategoryTheory.Triangulated.Pretriangulated /-! # t-structures on triangulated categories This files introduces the notion of t-structure on (pre)triangulated categories. The first example of t-structure shall be the canonical t-structure on the derived category of an abelian category (TODO). Given a t-structure `t : TStructure C`, we define type classes `t.IsLE X n` and `t.IsGE X n` in order to say that an object `X : C` is `≤ n` or `≥ n` for `t`. ## Implementation notes We introduce the type of t-structures rather than a type class saying that we have fixed a t-structure on a certain category. The reason is that certain triangulated categories have several t-structures which one may want to use depending on the context. ## TODO * define functors `t.truncLE n : C ⥤ C`,`t.truncGE n : C ⥤ C` and the associated distinguished triangles * promote these truncations to a (functorial) spectral object * define the heart of `t` and show it is an abelian category * define triangulated subcategories `t.plus`, `t.minus`, `t.bounded` and show that there are induced t-structures on these full subcategories ## References * [Beilinson, Bernstein, Deligne, Gabber, *Faisceaux pervers*][bbd-1982] -/ namespace CategoryTheory open Limits namespace Triangulated variable (C : Type _) [Category C] [Preadditive C] [HasZeroObject C] [HasShift C ℤ] [∀ (n : ℤ), (shiftFunctor C n).Additive] [Pretriangulated C] open Pretriangulated /-- `TStructure C` is the type of t-structures on the (pre)triangulated category `C`. -/ structure TStructure where /-- the predicate of objects that are `≤ n` for `n : ℤ`. -/ LE (n : ℤ) : C → Prop /-- the predicate of objects that are `≥ n` for `n : ℤ`. -/ GE (n : ℤ) : C → Prop LE_closedUnderIsomorphisms (n : ℤ) : ClosedUnderIsomorphisms (LE n) := by infer_instance GE_closedUnderIsomorphisms (n : ℤ) : ClosedUnderIsomorphisms (GE n) := by infer_instance LE_shift (n a n' : ℤ) (h : a + n' = n) (X : C) (hX : LE n X) : LE n' (X⟦a⟧) GE_shift (n a n' : ℤ) (h : a + n' = n) (X : C) (hX : GE n X) : GE n' (X⟦a⟧) zero' ⦃X Y : C⦄ (f : X ⟶ Y) (hX : LE 0 X) (hY : GE 1 Y) : f = 0 LE_zero_le : LE 0 ≤ LE 1 GE_one_le : GE 1 ≤ GE 0 exists_triangle_zero_one (A : C) : ∃ (X Y : C) (_ : LE 0 X) (_ : GE 1 Y) (f : X ⟶ A) (g : A ⟶ Y) (h : Y ⟶ X⟦(1 : ℤ)⟧), Triangle.mk f g h ∈ distTriang C namespace TStructure attribute [instance] LE_closedUnderIsomorphisms GE_closedUnderIsomorphisms variable {C} variable (t : TStructure C) lemma exists_triangle (A : C) (n₀ n₁ : ℤ) (h : n₀ + 1 = n₁) : ∃ (X Y : C) (_ : t.LE n₀ X) (_ : t.GE n₁ Y) (f : X ⟶ A) (g : A ⟶ Y) (h : Y ⟶ X⟦(1 : ℤ)⟧), Triangle.mk f g h ∈ distTriang C := by obtain ⟨X, Y, hX, hY, f, g, h, mem⟩ := t.exists_triangle_zero_one (A⟦n₀⟧) let T := (Triangle.shiftFunctor C (-n₀)).obj (Triangle.mk f g h) let e := (shiftEquiv C n₀).unitIso.symm.app A have hT' : Triangle.mk (T.mor₁ ≫ e.hom) (e.inv ≫ T.mor₂) T.mor₃ ∈ distTriang C := by refine isomorphic_distinguished _ (Triangle.shift_distinguished _ mem (-n₀)) _ ?_ refine Triangle.isoMk _ _ (Iso.refl _) e.symm (Iso.refl _) ?_ ?_ ?_ all_goals dsimp; simp [T] exact ⟨_, _, t.LE_shift _ _ _ (neg_add_self n₀) _ hX, t.GE_shift _ _ _ (by omega) _ hY, _, _, _, hT'⟩ lemma predicateShift_LE (a n n' : ℤ) (hn' : a + n = n') : (PredicateShift (t.LE n) a) = t.LE n' := by ext X constructor · intro hX exact (mem_iff_of_iso (LE t n') ((shiftEquiv C a).unitIso.symm.app X)).1 (t.LE_shift n (-a) n' (by omega) _ hX) · intro hX exact t.LE_shift _ _ _ hn' X hX lemma predicateShift_GE (a n n' : ℤ) (hn' : a + n = n') : (PredicateShift (t.GE n) a) = t.GE n' := by ext X constructor · intro hX exact (mem_iff_of_iso (GE t n') ((shiftEquiv C a).unitIso.symm.app X)).1 (t.GE_shift n (-a) n' (by omega) _ hX) · intro hX exact t.GE_shift _ _ _ hn' X hX lemma LE_monotone : Monotone t.LE := by let H := fun (a : ℕ) => ∀ (n : ℤ), t.LE n ≤ t.LE (n + a) suffices ∀ (a : ℕ), H a by intro n₀ n₁ h obtain ⟨a, ha⟩ := Int.nonneg_def.1 h obtain rfl : n₁ = n₀ + a := by omega apply this have H_zero : H 0 := fun n => by simp only [Nat.cast_zero, add_zero] rfl have H_one : H 1 := fun n X hX => by rw [← t.predicateShift_LE n 1 (n + (1 : ℕ)) rfl, predicateShift_iff] rw [← t.predicateShift_LE n 0 n (add_zero n), predicateShift_iff] at hX exact t.LE_zero_le _ hX have H_add : ∀ (a b c : ℕ) (_ : a + b = c) (_ : H a) (_ : H b), H c := by intro a b c h ha hb n rw [← h, Nat.cast_add, ← add_assoc] exact (ha n).trans (hb (n+a)) intro a induction' a with a ha · exact H_zero · exact H_add a 1 _ rfl ha H_one lemma GE_antitone : Antitone t.GE := by let H := fun (a : ℕ) => ∀ (n : ℤ), t.GE (n + a) ≤ t.GE n suffices ∀ (a : ℕ), H a by intro n₀ n₁ h obtain ⟨a, ha⟩ := Int.nonneg_def.1 h obtain rfl : n₁ = n₀ + a := by omega apply this have H_zero : H 0 := fun n => by simp only [Nat.cast_zero, add_zero] rfl have H_one : H 1 := fun n X hX => by rw [← t.predicateShift_GE n 1 (n + (1 : ℕ)) (by simp), predicateShift_iff] at hX rw [← t.predicateShift_GE n 0 n (add_zero n)] exact t.GE_one_le _ hX have H_add : ∀ (a b c : ℕ) (_ : a + b = c) (_ : H a) (_ : H b), H c := by intro a b c h ha hb n rw [← h, Nat.cast_add, ← add_assoc ] exact (hb (n + a)).trans (ha n) intro a induction' a with a ha · exact H_zero · exact H_add a 1 _ rfl ha H_one /-- Given a t-structure `t` on a pretriangulated category `C`, the property `t.IsLE X n` holds if `X : C` is `≤ n` for the t-structure. -/ class IsLE (X : C) (n : ℤ) : Prop where le : t.LE n X /-- Given a t-structure `t` on a pretriangulated category `C`, the property `t.IsGE X n` holds if `X : C` is `≥ n` for the t-structure. -/ class IsGE (X : C) (n : ℤ) : Prop where ge : t.GE n X lemma mem_of_isLE (X : C) (n : ℤ) [t.IsLE X n] : t.LE n X := IsLE.le lemma mem_of_isGE (X : C) (n : ℤ) [t.IsGE X n] : t.GE n X := IsGE.ge end TStructure end Triangulated end CategoryTheory
Combinatorics\Colex.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Alena Gusakov, Yaël Dillies -/ import Mathlib.Algebra.GeomSum import Mathlib.Data.Finset.Slice import Mathlib.Data.Nat.BitIndices import Mathlib.Order.SupClosed /-! # Colexigraphic order We define the colex order for finite sets, and give a couple of important lemmas and properties relating to it. The colex ordering likes to avoid large values: If the biggest element of `t` is bigger than all elements of `s`, then `s < t`. In the special case of `ℕ`, it can be thought of as the "binary" ordering. That is, order `s` based on $∑_{i ∈ s} 2^i$. It's defined here on `Finset α` for any linear order `α`. In the context of the Kruskal-Katona theorem, we are interested in how colex behaves for sets of a fixed size. For example, for size 3, the colex order on ℕ starts `012, 013, 023, 123, 014, 024, 124, 034, 134, 234, ...` ## Main statements * Colex order properties - linearity, decidability and so on. * `Finset.Colex.forall_lt_mono`: if `s < t` in colex, and everything in `t` is `< a`, then everything in `s` is `< a`. This confirms the idea that an enumeration under colex will exhaust all sets using elements `< a` before allowing `a` to be included. * `Finset.toColex_image_le_toColex_image`: Strictly monotone functions preserve colex. * `Finset.geomSum_le_geomSum_iff_toColex_le_toColex`: Colex for α = ℕ is the same as binary. This also proves binary expansions are unique. ## See also Related files are: * `Data.List.Lex`: Lexicographic order on lists. * `Data.Pi.Lex`: Lexicographic order on `Πₗ i, α i`. * `Data.PSigma.Order`: Lexicographic order on `Σ' i, α i`. * `Data.Sigma.Order`: Lexicographic order on `Σ i, α i`. * `Data.Prod.Lex`: Lexicographic order on `α × β`. ## TODO * Generalise `Colex.initSeg` so that it applies to `ℕ`. ## References * https://github.com/b-mehta/maths-notes/blob/master/iii/mich/combinatorics.pdf ## Tags colex, colexicographic, binary -/ open Finset Function variable {α β : Type*} namespace Finset /-- Type synonym of `Finset α` equipped with the colexicographic order rather than the inclusion order. -/ @[ext] structure Colex (α) := /-- `toColex` is the "identity" function between `Finset α` and `Finset.Colex α`. -/ toColex :: /-- `ofColex` is the "identity" function between `Finset.Colex α` and `Finset α`. -/ (ofColex : Finset α) -- TODO: Why can't we export? --export Colex (toColex) open Colex instance : Inhabited (Colex α) := ⟨⟨∅⟩⟩ @[simp] lemma toColex_ofColex (s : Colex α) : toColex (ofColex s) = s := rfl lemma ofColex_toColex (s : Finset α) : ofColex (toColex s) = s := rfl lemma toColex_inj {s t : Finset α} : toColex s = toColex t ↔ s = t := by simp @[simp] lemma ofColex_inj {s t : Colex α} : ofColex s = ofColex t ↔ s = t := by cases s; cases t; simp lemma toColex_ne_toColex {s t : Finset α} : toColex s ≠ toColex t ↔ s ≠ t := by simp lemma ofColex_ne_ofColex {s t : Colex α} : ofColex s ≠ ofColex t ↔ s ≠ t := by simp lemma toColex_injective : Injective (toColex : Finset α → Colex α) := fun _ _ ↦ toColex_inj.1 lemma ofColex_injective : Injective (ofColex : Colex α → Finset α) := fun _ _ ↦ ofColex_inj.1 namespace Colex section PartialOrder variable [PartialOrder α] [PartialOrder β] {f : α → β} {𝒜 𝒜₁ 𝒜₂ : Finset (Finset α)} {s t u : Finset α} {a b : α} instance instLE : LE (Colex α) where le s t := ∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b -- TODO: This lemma is weirdly useful given how strange its statement is. -- Is there a nicer statement? Should this lemma be made public? private lemma trans_aux (hst : toColex s ≤ toColex t) (htu : toColex t ≤ toColex u) (has : a ∈ s) (hat : a ∉ t) : ∃ b, b ∈ u ∧ b ∉ s ∧ a ≤ b := by classical let s' : Finset α := s.filter fun b ↦ b ∉ t ∧ a ≤ b have ⟨b, hb, hbmax⟩ := exists_maximal s' ⟨a, by simp [s', has, hat]⟩ simp only [s', mem_filter, and_imp] at hb hbmax have ⟨c, hct, hcs, hbc⟩ := hst hb.1 hb.2.1 by_cases hcu : c ∈ u · exact ⟨c, hcu, hcs, hb.2.2.trans hbc⟩ have ⟨d, hdu, hdt, hcd⟩ := htu hct hcu have had : a ≤ d := hb.2.2.trans <| hbc.trans hcd refine ⟨d, hdu, fun hds ↦ ?_, had⟩ exact hbmax d hds hdt had <| hbc.trans_lt <| hcd.lt_of_ne <| ne_of_mem_of_not_mem hct hdt private lemma antisymm_aux (hst : toColex s ≤ toColex t) (hts : toColex t ≤ toColex s) : s ⊆ t := by intro a has by_contra! hat have ⟨_b, hb₁, hb₂, _⟩ := trans_aux hst hts has hat exact hb₂ hb₁ instance instPartialOrder : PartialOrder (Colex α) where le_refl s a ha ha' := (ha' ha).elim le_antisymm s t hst hts := Colex.ext <| (antisymm_aux hst hts).antisymm (antisymm_aux hts hst) le_trans s t u hst htu a has hau := by by_cases hat : a ∈ ofColex t · have ⟨b, hbu, hbt, hab⟩ := htu hat hau by_cases hbs : b ∈ ofColex s · have ⟨c, hcu, hcs, hbc⟩ := trans_aux hst htu hbs hbt exact ⟨c, hcu, hcs, hab.trans hbc⟩ · exact ⟨b, hbu, hbs, hab⟩ · exact trans_aux hst htu has hat lemma le_def {s t : Colex α} : s ≤ t ↔ ∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b := Iff.rfl lemma toColex_le_toColex : toColex s ≤ toColex t ↔ ∀ ⦃a⦄, a ∈ s → a ∉ t → ∃ b, b ∈ t ∧ b ∉ s ∧ a ≤ b := Iff.rfl lemma toColex_lt_toColex : toColex s < toColex t ↔ s ≠ t ∧ ∀ ⦃a⦄, a ∈ s → a ∉ t → ∃ b, b ∈ t ∧ b ∉ s ∧ a ≤ b := by simp [lt_iff_le_and_ne, toColex_le_toColex, and_comm] /-- If `s ⊆ t`, then `s ≤ t` in the colex order. Note the converse does not hold, as inclusion does not form a linear order. -/ lemma toColex_mono : Monotone (toColex : Finset α → Colex α) := fun _s _t hst _a has hat ↦ (hat <| hst has).elim /-- If `s ⊂ t`, then `s < t` in the colex order. Note the converse does not hold, as inclusion does not form a linear order. -/ lemma toColex_strictMono : StrictMono (toColex : Finset α → Colex α) := toColex_mono.strictMono_of_injective toColex_injective /-- If `s ⊆ t`, then `s ≤ t` in the colex order. Note the converse does not hold, as inclusion does not form a linear order. -/ lemma toColex_le_toColex_of_subset (h : s ⊆ t) : toColex s ≤ toColex t := toColex_mono h /-- If `s ⊂ t`, then `s < t` in the colex order. Note the converse does not hold, as inclusion does not form a linear order. -/ lemma toColex_lt_toColex_of_ssubset (h : s ⊂ t) : toColex s < toColex t := toColex_strictMono h instance instOrderBot : OrderBot (Colex α) where bot := toColex ∅ bot_le s a ha := by cases ha @[simp] lemma toColex_empty : toColex (∅ : Finset α) = ⊥ := rfl @[simp] lemma ofColex_bot : ofColex (⊥ : Colex α) = ∅ := rfl /-- If `s ≤ t` in colex, and all elements in `t` are small, then all elements in `s` are small. -/ lemma forall_le_mono (hst : toColex s ≤ toColex t) (ht : ∀ b ∈ t, b ≤ a) : ∀ b ∈ s, b ≤ a := by rintro b hb by_cases b ∈ t · exact ht _ ‹_› · obtain ⟨c, hct, -, hbc⟩ := hst hb ‹_› exact hbc.trans <| ht _ hct /-- If `s ≤ t` in colex, and all elements in `t` are small, then all elements in `s` are small. -/ lemma forall_lt_mono (hst : toColex s ≤ toColex t) (ht : ∀ b ∈ t, b < a) : ∀ b ∈ s, b < a := by rintro b hb by_cases b ∈ t · exact ht _ ‹_› · obtain ⟨c, hct, -, hbc⟩ := hst hb ‹_› exact hbc.trans_lt <| ht _ hct /-- `s ≤ {a}` in colex iff all elements of `s` are strictly less than `a`, except possibly `a` in which case `s = {a}`. -/ lemma toColex_le_singleton : toColex s ≤ toColex {a} ↔ ∀ b ∈ s, b ≤ a ∧ (a ∈ s → b = a) := by simp only [toColex_le_toColex, mem_singleton, and_assoc, exists_eq_left] refine forall₂_congr fun b _ ↦ ?_; obtain rfl | hba := eq_or_ne b a <;> aesop /-- `s < {a}` in colex iff all elements of `s` are strictly less than `a`. -/ lemma toColex_lt_singleton : toColex s < toColex {a} ↔ ∀ b ∈ s, b < a := by rw [lt_iff_le_and_ne, toColex_le_singleton, toColex_ne_toColex] refine ⟨fun h b hb ↦ (h.1 _ hb).1.lt_of_ne ?_, fun h ↦ ⟨fun b hb ↦ ⟨(h _ hb).le, fun ha ↦ (lt_irrefl _ <| h _ ha).elim⟩, ?_⟩⟩ <;> rintro rfl · refine h.2 <| eq_singleton_iff_unique_mem.2 ⟨hb, fun c hc ↦ (h.1 _ hc).2 hb⟩ · simp at h /-- `{a} ≤ s` in colex iff `s` contains an element greated than or equal to `a`. -/ lemma singleton_le_toColex : (toColex {a} : Colex α) ≤ toColex s ↔ ∃ x ∈ s, a ≤ x := by simp [toColex_le_toColex]; by_cases a ∈ s <;> aesop /-- Colex is an extension of the base order. -/ lemma singleton_le_singleton : (toColex {a} : Colex α) ≤ toColex {b} ↔ a ≤ b := by simp [toColex_le_singleton, eq_comm] /-- Colex is an extension of the base order. -/ lemma singleton_lt_singleton : (toColex {a} : Colex α) < toColex {b} ↔ a < b := by simp [toColex_lt_singleton] lemma le_iff_sdiff_subset_lowerClosure {s t : Colex α} : s ≤ t ↔ (ofColex s : Set α) \ ofColex t ⊆ lowerClosure (ofColex t \ ofColex s : Set α) := by simp [le_def, Set.subset_def, and_assoc] section DecidableEq variable [DecidableEq α] instance instDecidableEq : DecidableEq (Colex α) := fun s t ↦ decidable_of_iff' (s.ofColex = t.ofColex) Colex.ext_iff instance instDecidableLE [@DecidableRel α (· ≤ ·)] : @DecidableRel (Colex α) (· ≤ ·) := fun s t ↦ decidable_of_iff' (∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b) Iff.rfl instance instDecidableLT [@DecidableRel α (· ≤ ·)] : @DecidableRel (Colex α) (· < ·) := decidableLTOfDecidableLE /-- The colexigraphic order is insensitive to removing the same elements from both sets. -/ lemma toColex_sdiff_le_toColex_sdiff (hus : u ⊆ s) (hut : u ⊆ t) : toColex (s \ u) ≤ toColex (t \ u) ↔ toColex s ≤ toColex t := by simp_rw [toColex_le_toColex, ← and_imp, ← and_assoc, ← mem_sdiff, sdiff_sdiff_sdiff_cancel_right hus, sdiff_sdiff_sdiff_cancel_right hut] /-- The colexigraphic order is insensitive to removing the same elements from both sets. -/ lemma toColex_sdiff_lt_toColex_sdiff (hus : u ⊆ s) (hut : u ⊆ t) : toColex (s \ u) < toColex (t \ u) ↔ toColex s < toColex t := lt_iff_lt_of_le_iff_le' (toColex_sdiff_le_toColex_sdiff hut hus) <| toColex_sdiff_le_toColex_sdiff hus hut @[simp] lemma toColex_sdiff_le_toColex_sdiff' : toColex (s \ t) ≤ toColex (t \ s) ↔ toColex s ≤ toColex t := by simpa using toColex_sdiff_le_toColex_sdiff (inter_subset_left (s₁ := s)) inter_subset_right @[simp] lemma toColex_sdiff_lt_toColex_sdiff' : toColex (s \ t) < toColex (t \ s) ↔ toColex s < toColex t := by simpa using toColex_sdiff_lt_toColex_sdiff (inter_subset_left (s₁ := s)) inter_subset_right end DecidableEq @[simp] lemma cons_le_cons (ha hb) : toColex (s.cons a ha) ≤ toColex (s.cons b hb) ↔ a ≤ b := by obtain rfl | hab := eq_or_ne a b · simp classical rw [← toColex_sdiff_le_toColex_sdiff', cons_sdiff_cons hab, cons_sdiff_cons hab.symm, singleton_le_singleton] @[simp] lemma cons_lt_cons (ha hb) : toColex (s.cons a ha) < toColex (s.cons b hb) ↔ a < b := lt_iff_lt_of_le_iff_le' (cons_le_cons _ _) (cons_le_cons _ _) variable [DecidableEq α] lemma insert_le_insert (ha : a ∉ s) (hb : b ∉ s) : toColex (insert a s) ≤ toColex (insert b s) ↔ a ≤ b := by rw [← cons_eq_insert _ _ ha, ← cons_eq_insert _ _ hb, cons_le_cons] lemma insert_lt_insert (ha : a ∉ s) (hb : b ∉ s) : toColex (insert a s) < toColex (insert b s) ↔ a < b := by rw [← cons_eq_insert _ _ ha, ← cons_eq_insert _ _ hb, cons_lt_cons] lemma erase_le_erase (ha : a ∈ s) (hb : b ∈ s) : toColex (s.erase a) ≤ toColex (s.erase b) ↔ b ≤ a := by obtain rfl | hab := eq_or_ne a b · simp classical rw [← toColex_sdiff_le_toColex_sdiff', erase_sdiff_erase hab hb, erase_sdiff_erase hab.symm ha, singleton_le_singleton] lemma erase_lt_erase (ha : a ∈ s) (hb : b ∈ s) : toColex (s.erase a) < toColex (s.erase b) ↔ b < a := lt_iff_lt_of_le_iff_le' (erase_le_erase hb ha) (erase_le_erase ha hb) end PartialOrder variable [LinearOrder α] [LinearOrder β] {f : α → β} {𝒜 𝒜₁ 𝒜₂ : Finset (Finset α)} {s t u : Finset α} {a b : α} {r : ℕ} instance instLinearOrder : LinearOrder (Colex α) where le_total s t := by classical obtain rfl | hts := eq_or_ne t s · simp have ⟨a, ha, hamax⟩ := exists_max_image _ id (symmDiff_nonempty.2 <| ofColex_ne_ofColex.2 hts) simp_rw [mem_symmDiff] at ha hamax exact ha.imp (fun ha b hbs hbt ↦ ⟨a, ha.1, ha.2, hamax _ <| Or.inr ⟨hbs, hbt⟩⟩) (fun ha b hbt hbs ↦ ⟨a, ha.1, ha.2, hamax _ <| Or.inl ⟨hbt, hbs⟩⟩) decidableLE := instDecidableLE decidableLT := instDecidableLT open scoped symmDiff private lemma max_mem_aux {s t : Colex α} (hst : s ≠ t) : (ofColex s ∆ ofColex t).Nonempty := by simpa lemma toColex_lt_toColex_iff_exists_forall_lt : toColex s < toColex t ↔ ∃ a ∈ t, a ∉ s ∧ ∀ b ∈ s, b ∉ t → b < a := by rw [← not_le, toColex_le_toColex, not_forall] simp only [not_forall, not_exists, not_and, not_le, exists_prop, exists_and_left] lemma lt_iff_exists_forall_lt {s t : Colex α} : s < t ↔ ∃ a ∈ ofColex t, a ∉ ofColex s ∧ ∀ b ∈ ofColex s, b ∉ ofColex t → b < a := toColex_lt_toColex_iff_exists_forall_lt lemma toColex_le_toColex_iff_max'_mem : toColex s ≤ toColex t ↔ ∀ hst : s ≠ t, (s ∆ t).max' (symmDiff_nonempty.2 hst) ∈ t := by refine ⟨fun h hst ↦ ?_, fun h a has hat ↦ ?_⟩ · set m := (s ∆ t).max' (symmDiff_nonempty.2 hst) by_contra hmt have hms : m ∈ s := by simpa [mem_symmDiff, hmt] using max'_mem _ <| symmDiff_nonempty.2 hst have ⟨b, hbt, hbs, hmb⟩ := h hms hmt exact lt_irrefl _ <| (max'_lt_iff _ _).1 (hmb.lt_of_ne <| ne_of_mem_of_not_mem hms hbs) _ <| mem_symmDiff.2 <| Or.inr ⟨hbt, hbs⟩ · have hst : s ≠ t := ne_of_mem_of_not_mem' has hat refine ⟨_, h hst, ?_, le_max' _ _ <| mem_symmDiff.2 <| Or.inl ⟨has, hat⟩⟩ simpa [mem_symmDiff, h hst] using max'_mem _ <| symmDiff_nonempty.2 hst lemma le_iff_max'_mem {s t : Colex α} : s ≤ t ↔ ∀ h : s ≠ t, (ofColex s ∆ ofColex t).max' (max_mem_aux h) ∈ ofColex t := toColex_le_toColex_iff_max'_mem.trans ⟨fun h hst ↦ h <| ofColex_ne_ofColex.2 hst, fun h hst ↦ h <| ofColex_ne_ofColex.1 hst⟩ lemma toColex_lt_toColex_iff_max'_mem : toColex s < toColex t ↔ ∃ hst : s ≠ t, (s ∆ t).max' (symmDiff_nonempty.2 hst) ∈ t := by rw [lt_iff_le_and_ne, toColex_le_toColex_iff_max'_mem]; aesop lemma lt_iff_max'_mem {s t : Colex α} : s < t ↔ ∃ h : s ≠ t, (ofColex s ∆ ofColex t).max' (max_mem_aux h) ∈ ofColex t := by rw [lt_iff_le_and_ne, le_iff_max'_mem]; aesop lemma lt_iff_exists_filter_lt : toColex s < toColex t ↔ ∃ w ∈ t \ s, s.filter (w < ·) = t.filter (w < ·) := by simp only [lt_iff_exists_forall_lt, mem_sdiff, filter_inj, and_assoc] refine ⟨fun h ↦ ?_, ?_⟩ · let u := (t \ s).filter fun w ↦ ∀ a ∈ s, a ∉ t → a < w have mem_u {w : α} : w ∈ u ↔ w ∈ t ∧ w ∉ s ∧ ∀ a ∈ s, a ∉ t → a < w := by simp [u, and_assoc] have hu : u.Nonempty := h.imp fun _ ↦ mem_u.2 let m := max' _ hu have ⟨hmt, hms, hm⟩ : m ∈ t ∧ m ∉ s ∧ ∀ a ∈ s, a ∉ t → a < m := mem_u.1 $ max'_mem _ _ refine ⟨m, hmt, hms, fun a hma ↦ ⟨fun has ↦ not_imp_comm.1 (hm _ has) hma.asymm, fun hat ↦ ?_⟩⟩ by_contra has have hau : a ∈ u := mem_u.2 ⟨hat, has, fun b hbs hbt ↦ (hm _ hbs hbt).trans hma⟩ exact hma.not_le $ le_max' _ _ hau · rintro ⟨w, hwt, hws, hw⟩ refine ⟨w, hwt, hws, fun a has hat ↦ ?_⟩ by_contra! hwa exact hat $ (hw $ hwa.lt_of_ne $ ne_of_mem_of_not_mem hwt hat).1 has /-- If `s ≤ t` in colex and `s.card ≤ t.card`, then `s \ {a} ≤ t \ {min t}` for any `a ∈ s`. -/ lemma erase_le_erase_min' (hst : toColex s ≤ toColex t) (hcard : s.card ≤ t.card) (ha : a ∈ s) : toColex (s.erase a) ≤ toColex (t.erase <| min' t <| card_pos.1 <| (card_pos.2 ⟨a, ha⟩).trans_le hcard) := by generalize_proofs ht set m := min' t ht -- Case on whether `s = t` obtain rfl | h' := eq_or_ne s t -- If `s = t`, then `s \ {a} ≤ s \ {m}` because `m ≤ a` · exact (erase_le_erase ha $ min'_mem _ _).2 $ min'_le _ _ $ ha -- If `s ≠ t`, call `w` the colex witness. Case on whether `w < a` or `a < w` replace hst := hst.lt_of_ne $ toColex_inj.not.2 h' simp only [lt_iff_exists_filter_lt, mem_sdiff, filter_inj, and_assoc] at hst obtain ⟨w, hwt, hws, hw⟩ := hst obtain hwa | haw := (ne_of_mem_of_not_mem ha hws).symm.lt_or_lt -- If `w < a`, then `a` is the colex witness for `s \ {a} < t \ {m}` · have hma : m < a := (min'_le _ _ hwt).trans_lt hwa refine (lt_iff_exists_forall_lt.2 ⟨a, mem_erase.2 ⟨hma.ne', (hw hwa).1 ha⟩, not_mem_erase _ _, fun b hbs hbt ↦ ?_⟩).le change b ∉ t.erase m at hbt rw [mem_erase, not_and_or, not_ne_iff] at hbt obtain rfl | hbt := hbt · assumption · by_contra! hab exact hbt $ (hw $ hwa.trans_le hab).1 $ mem_of_mem_erase hbs -- If `a < w`, case on whether `m < w` or `m = w` obtain rfl | hmw : m = w ∨ m < w := (min'_le _ _ hwt).eq_or_lt -- If `m = w`, then `s \ {a} = t \ {m}` · have : erase t m ⊆ erase s a := by rintro b hb rw [mem_erase] at hb ⊢ exact ⟨(haw.trans_le $ min'_le _ _ hb.2).ne', (hw $ hb.1.lt_of_le' $ min'_le _ _ hb.2).2 hb.2⟩ rw [eq_of_subset_of_card_le this] rw [card_erase_of_mem ha, card_erase_of_mem (min'_mem _ _)] exact tsub_le_tsub_right hcard _ -- If `m < w`, then `w` works as the colex witness for `s \ {a} < t \ {m}` · refine (lt_iff_exists_forall_lt.2 ⟨w, mem_erase.2 ⟨hmw.ne', hwt⟩, mt mem_of_mem_erase hws, fun b hbs hbt ↦ ?_⟩).le change b ∉ t.erase m at hbt rw [mem_erase, not_and_or, not_ne_iff] at hbt obtain rfl | hbt := hbt · assumption · by_contra! hwb exact hbt $ (hw $ hwb.lt_of_ne $ ne_of_mem_of_not_mem hwt hbt).1 $ mem_of_mem_erase hbs /-- Strictly monotone functions preserve the colex ordering. -/ lemma toColex_image_le_toColex_image (hf : StrictMono f) : toColex (s.image f) ≤ toColex (t.image f) ↔ toColex s ≤ toColex t := by simp [toColex_le_toColex, hf.le_iff_le, hf.injective.eq_iff] /-- Strictly monotone functions preserve the colex ordering. -/ lemma toColex_image_lt_toColex_image (hf : StrictMono f) : toColex (s.image f) < toColex (t.image f) ↔ toColex s < toColex t := lt_iff_lt_of_le_iff_le <| toColex_image_le_toColex_image hf lemma toColex_image_ofColex_strictMono (hf : StrictMono f) : StrictMono fun s ↦ toColex <| image f <| ofColex s := fun _s _t ↦ (toColex_image_lt_toColex_image hf).2 section Fintype variable [Fintype α] instance instBoundedOrder : BoundedOrder (Colex α) where top := toColex univ le_top _x := toColex_le_toColex_of_subset <| subset_univ _ @[simp] lemma toColex_univ : toColex (univ : Finset α) = ⊤ := rfl @[simp] lemma ofColex_top : ofColex (⊤ : Colex α) = univ := rfl end Fintype /-! ### Initial segments -/ /-- `𝒜` is an initial segment of the colexigraphic order on sets of `r`, and that if `t` is below `s` in colex where `t` has size `r` and `s` is in `𝒜`, then `t` is also in `𝒜`. In effect, `𝒜` is downwards closed with respect to colex among sets of size `r`. -/ def IsInitSeg (𝒜 : Finset (Finset α)) (r : ℕ) : Prop := (𝒜 : Set (Finset α)).Sized r ∧ ∀ ⦃s t : Finset α⦄, s ∈ 𝒜 → toColex t < toColex s ∧ t.card = r → t ∈ 𝒜 @[simp] lemma isInitSeg_empty : IsInitSeg (∅ : Finset (Finset α)) r := by simp [IsInitSeg] /-- Initial segments are nested in some way. In particular, if they're the same size they're equal. -/ lemma IsInitSeg.total (h₁ : IsInitSeg 𝒜₁ r) (h₂ : IsInitSeg 𝒜₂ r) : 𝒜₁ ⊆ 𝒜₂ ∨ 𝒜₂ ⊆ 𝒜₁ := by classical simp_rw [← sdiff_eq_empty_iff_subset, ← not_nonempty_iff_eq_empty] by_contra! h have ⟨⟨s, hs⟩, t, ht⟩ := h rw [mem_sdiff] at hs ht obtain hst | hst | hts := trichotomous_of (α := Colex α) (· < ·) (toColex s) (toColex t) · exact hs.2 <| h₂.2 ht.1 ⟨hst, h₁.1 hs.1⟩ · simp only [toColex.injEq] at hst exact ht.2 <| hst ▸ hs.1 · exact ht.2 <| h₁.2 hs.1 ⟨hts, h₂.1 ht.1⟩ variable [Fintype α] /-- The initial segment of the colexicographic order on sets with `s.card` elements and ending at `s`. -/ def initSeg (s : Finset α) : Finset (Finset α) := univ.filter fun t ↦ s.card = t.card ∧ toColex t ≤ toColex s @[simp] lemma mem_initSeg : t ∈ initSeg s ↔ s.card = t.card ∧ toColex t ≤ toColex s := by simp [initSeg] lemma mem_initSeg_self : s ∈ initSeg s := by simp @[simp] lemma initSeg_nonempty : (initSeg s).Nonempty := ⟨s, mem_initSeg_self⟩ lemma isInitSeg_initSeg : IsInitSeg (initSeg s) s.card := by refine ⟨fun t ht => (mem_initSeg.1 ht).1.symm, fun t₁ t₂ ht₁ ht₂ ↦ mem_initSeg.2 ⟨ht₂.2.symm, ?_⟩⟩ rw [mem_initSeg] at ht₁ exact ht₂.1.le.trans ht₁.2 lemma IsInitSeg.exists_initSeg (h𝒜 : IsInitSeg 𝒜 r) (h𝒜₀ : 𝒜.Nonempty) : ∃ s : Finset α, s.card = r ∧ 𝒜 = initSeg s := by have hs := sup'_mem (ofColex ⁻¹' 𝒜) (LinearOrder.supClosed _) 𝒜 h𝒜₀ toColex (fun a ha ↦ by simpa using ha) refine ⟨_, h𝒜.1 hs, ?_⟩ ext t rw [mem_initSeg] refine ⟨fun p ↦ ?_, ?_⟩ · rw [h𝒜.1 p, h𝒜.1 hs] exact ⟨rfl, le_sup' _ p⟩ rintro ⟨cards, le⟩ obtain p | p := le.eq_or_lt · rwa [toColex_inj.1 p] · exact h𝒜.2 hs ⟨p, cards ▸ h𝒜.1 hs⟩ /-- Being a nonempty initial segment of colex is equivalent to being an `initSeg`. -/ lemma isInitSeg_iff_exists_initSeg : IsInitSeg 𝒜 r ∧ 𝒜.Nonempty ↔ ∃ s : Finset α, s.card = r ∧ 𝒜 = initSeg s := by refine ⟨fun h𝒜 ↦ h𝒜.1.exists_initSeg h𝒜.2, ?_⟩ rintro ⟨s, rfl, rfl⟩ exact ⟨isInitSeg_initSeg, initSeg_nonempty⟩ end Colex open Colex /-! ### Colex on `ℕ` The colexicographic order agrees with the order induced by interpreting a set of naturals as a `n`-ary expansion. -/ section Nat variable {s t : Finset ℕ} {n : ℕ} lemma geomSum_ofColex_strictMono (hn : 2 ≤ n) : StrictMono fun s ↦ ∑ k ∈ ofColex s, n ^ k := by rintro ⟨s⟩ ⟨t⟩ hst rw [toColex_lt_toColex_iff_exists_forall_lt] at hst obtain ⟨a, hat, has, ha⟩ := hst rw [← sum_sdiff_lt_sum_sdiff] exact (Nat.geomSum_lt hn <| by simpa).trans_le <| single_le_sum (fun _ _ ↦ by positivity) <| mem_sdiff.2 ⟨hat, has⟩ /-- For finsets of naturals, the colexicographic order is equivalent to the order induced by the `n`-ary expansion. -/ lemma geomSum_le_geomSum_iff_toColex_le_toColex (hn : 2 ≤ n) : ∑ k ∈ s, n ^ k ≤ ∑ k ∈ t, n ^ k ↔ toColex s ≤ toColex t := (geomSum_ofColex_strictMono hn).le_iff_le /-- For finsets of naturals, the colexicographic order is equivalent to the order induced by the `n`-ary expansion. -/ lemma geomSum_lt_geomSum_iff_toColex_lt_toColex (hn : 2 ≤ n) : ∑ i ∈ s, n ^ i < ∑ i ∈ t, n ^ i ↔ toColex s < toColex t := (geomSum_ofColex_strictMono hn).lt_iff_lt theorem geomSum_injective {n : ℕ} (hn : 2 ≤ n) : Function.Injective (fun s : Finset ℕ ↦ ∑ i in s, n ^ i) := by intro _ _ h rwa [le_antisymm_iff, geomSum_le_geomSum_iff_toColex_le_toColex hn, geomSum_le_geomSum_iff_toColex_le_toColex hn, ← le_antisymm_iff, Colex.toColex.injEq] at h theorem lt_geomSum_of_mem {a : ℕ} (hn : 2 ≤ n) (hi : a ∈ s) : a < ∑ i in s, n ^ i := (Nat.lt_pow_self hn a).trans_le <| single_le_sum (by simp) hi @[simp] theorem toFinset_bitIndices_twoPowSum (s : Finset ℕ) : (∑ i in s, 2 ^ i).bitIndices.toFinset = s := by simp [← (geomSum_injective rfl.le).eq_iff, List.sum_toFinset _ Nat.bitIndices_sorted.nodup] @[simp] theorem twoPowSum_toFinset_bitIndices (n : ℕ) : ∑ i in n.bitIndices.toFinset, 2 ^ i = n := by simp [List.sum_toFinset _ Nat.bitIndices_sorted.nodup] /-- The equivalence between `ℕ` and `Finset ℕ` that maps `∑ i in s, 2^i` to `s`. -/ @[simps] def equivBitIndices : ℕ ≃ Finset ℕ where toFun n := n.bitIndices.toFinset invFun s := ∑ i in s, 2^i left_inv := twoPowSum_toFinset_bitIndices right_inv := toFinset_bitIndices_twoPowSum /-- The equivalence `Nat.equivBitIndices` enumerates `Finset ℕ` in colexicographic order. -/ @[simps] def orderIsoColex : ℕ ≃o Colex ℕ where toFun n := Colex.toColex (equivBitIndices n) invFun s := equivBitIndices.symm s.ofColex left_inv n := equivBitIndices.symm_apply_apply n right_inv s := Finset.toColex_inj.2 (equivBitIndices.apply_symm_apply s.ofColex) map_rel_iff' := by simp [← (Finset.geomSum_le_geomSum_iff_toColex_le_toColex rfl.le), toFinset_bitIndices_twoPowSum] end Nat end Finset
Combinatorics\Configuration.lean
/- Copyright (c) 2021 Thomas Browning. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Thomas Browning -/ import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Combinatorics.Hall.Basic import Mathlib.Data.Fintype.BigOperators import Mathlib.SetTheory.Cardinal.Finite /-! # Configurations of Points and lines This file introduces abstract configurations of points and lines, and proves some basic properties. ## Main definitions * `Configuration.Nondegenerate`: Excludes certain degenerate configurations, and imposes uniqueness of intersection points. * `Configuration.HasPoints`: A nondegenerate configuration in which every pair of lines has an intersection point. * `Configuration.HasLines`: A nondegenerate configuration in which every pair of points has a line through them. * `Configuration.lineCount`: The number of lines through a given point. * `Configuration.pointCount`: The number of lines through a given line. ## Main statements * `Configuration.HasLines.card_le`: `HasLines` implies `|P| ≤ |L|`. * `Configuration.HasPoints.card_le`: `HasPoints` implies `|L| ≤ |P|`. * `Configuration.HasLines.hasPoints`: `HasLines` and `|P| = |L|` implies `HasPoints`. * `Configuration.HasPoints.hasLines`: `HasPoints` and `|P| = |L|` implies `HasLines`. Together, these four statements say that any two of the following properties imply the third: (a) `HasLines`, (b) `HasPoints`, (c) `|P| = |L|`. -/ open Finset namespace Configuration variable (P L : Type*) [Membership P L] /-- A type synonym. -/ def Dual := P -- Porting note: was `this` instead of `h` instance [h : Inhabited P] : Inhabited (Dual P) := h instance [Finite P] : Finite (Dual P) := ‹Finite P› -- Porting note: was `this` instead of `h` instance [h : Fintype P] : Fintype (Dual P) := h -- Porting note (#11215): TODO: figure out if this is needed. set_option synthInstance.checkSynthOrder false in instance : Membership (Dual L) (Dual P) := ⟨Function.swap (Membership.mem : P → L → Prop)⟩ /-- A configuration is nondegenerate if: 1) there does not exist a line that passes through all of the points, 2) there does not exist a point that is on all of the lines, 3) there is at most one line through any two points, 4) any two lines have at most one intersection point. Conditions 3 and 4 are equivalent. -/ class Nondegenerate : Prop where exists_point : ∀ l : L, ∃ p, p ∉ l exists_line : ∀ p, ∃ l : L, p ∉ l eq_or_eq : ∀ {p₁ p₂ : P} {l₁ l₂ : L}, p₁ ∈ l₁ → p₂ ∈ l₁ → p₁ ∈ l₂ → p₂ ∈ l₂ → p₁ = p₂ ∨ l₁ = l₂ /-- A nondegenerate configuration in which every pair of lines has an intersection point. -/ class HasPoints extends Nondegenerate P L where mkPoint : ∀ {l₁ l₂ : L}, l₁ ≠ l₂ → P mkPoint_ax : ∀ {l₁ l₂ : L} (h : l₁ ≠ l₂), mkPoint h ∈ l₁ ∧ mkPoint h ∈ l₂ /-- A nondegenerate configuration in which every pair of points has a line through them. -/ class HasLines extends Nondegenerate P L where mkLine : ∀ {p₁ p₂ : P}, p₁ ≠ p₂ → L mkLine_ax : ∀ {p₁ p₂ : P} (h : p₁ ≠ p₂), p₁ ∈ mkLine h ∧ p₂ ∈ mkLine h open Nondegenerate open HasPoints (mkPoint mkPoint_ax) open HasLines (mkLine mkLine_ax) instance Dual.Nondegenerate [Nondegenerate P L] : Nondegenerate (Dual L) (Dual P) where exists_point := @exists_line P L _ _ exists_line := @exists_point P L _ _ eq_or_eq := @fun l₁ l₂ p₁ p₂ h₁ h₂ h₃ h₄ => (@eq_or_eq P L _ _ p₁ p₂ l₁ l₂ h₁ h₃ h₂ h₄).symm instance Dual.hasLines [HasPoints P L] : HasLines (Dual L) (Dual P) := { Dual.Nondegenerate _ _ with mkLine := @mkPoint P L _ _ mkLine_ax := @mkPoint_ax P L _ _ } instance Dual.hasPoints [HasLines P L] : HasPoints (Dual L) (Dual P) := { Dual.Nondegenerate _ _ with mkPoint := @mkLine P L _ _ mkPoint_ax := @mkLine_ax P L _ _ } theorem HasPoints.existsUnique_point [HasPoints P L] (l₁ l₂ : L) (hl : l₁ ≠ l₂) : ∃! p, p ∈ l₁ ∧ p ∈ l₂ := ⟨mkPoint hl, mkPoint_ax hl, fun _ hp => (eq_or_eq hp.1 (mkPoint_ax hl).1 hp.2 (mkPoint_ax hl).2).resolve_right hl⟩ theorem HasLines.existsUnique_line [HasLines P L] (p₁ p₂ : P) (hp : p₁ ≠ p₂) : ∃! l : L, p₁ ∈ l ∧ p₂ ∈ l := HasPoints.existsUnique_point (Dual L) (Dual P) p₁ p₂ hp variable {P L} /-- If a nondegenerate configuration has at least as many points as lines, then there exists an injective function `f` from lines to points, such that `f l` does not lie on `l`. -/ theorem Nondegenerate.exists_injective_of_card_le [Nondegenerate P L] [Fintype P] [Fintype L] (h : Fintype.card L ≤ Fintype.card P) : ∃ f : L → P, Function.Injective f ∧ ∀ l, f l ∉ l := by classical let t : L → Finset P := fun l => Set.toFinset { p | p ∉ l } suffices ∀ s : Finset L, s.card ≤ (s.biUnion t).card by -- Hall's marriage theorem obtain ⟨f, hf1, hf2⟩ := (Finset.all_card_le_biUnion_card_iff_exists_injective t).mp this exact ⟨f, hf1, fun l => Set.mem_toFinset.mp (hf2 l)⟩ intro s by_cases hs₀ : s.card = 0 -- If `s = ∅`, then `s.card = 0 ≤ (s.bUnion t).card` · simp_rw [hs₀, zero_le] by_cases hs₁ : s.card = 1 -- If `s = {l}`, then pick a point `p ∉ l` · obtain ⟨l, rfl⟩ := Finset.card_eq_one.mp hs₁ obtain ⟨p, hl⟩ := exists_point l rw [Finset.card_singleton, Finset.singleton_biUnion, Nat.one_le_iff_ne_zero] exact Finset.card_ne_zero_of_mem (Set.mem_toFinset.mpr hl) suffices (s.biUnion t)ᶜ.card ≤ sᶜ.card by -- Rephrase in terms of complements (uses `h`) rw [Finset.card_compl, Finset.card_compl, tsub_le_iff_left] at this replace := h.trans this rwa [← add_tsub_assoc_of_le s.card_le_univ, le_tsub_iff_left (le_add_left s.card_le_univ), add_le_add_iff_right] at this have hs₂ : (s.biUnion t)ᶜ.card ≤ 1 := by -- At most one line through two points of `s` refine Finset.card_le_one_iff.mpr @fun p₁ p₂ hp₁ hp₂ => ?_ simp_rw [t, Finset.mem_compl, Finset.mem_biUnion, not_exists, not_and, Set.mem_toFinset, Set.mem_setOf_eq, Classical.not_not] at hp₁ hp₂ obtain ⟨l₁, l₂, hl₁, hl₂, hl₃⟩ := Finset.one_lt_card_iff.mp (Nat.one_lt_iff_ne_zero_and_ne_one.mpr ⟨hs₀, hs₁⟩) exact (eq_or_eq (hp₁ l₁ hl₁) (hp₂ l₁ hl₁) (hp₁ l₂ hl₂) (hp₂ l₂ hl₂)).resolve_right hl₃ by_cases hs₃ : sᶜ.card = 0 · rw [hs₃, Nat.le_zero] rw [Finset.card_compl, tsub_eq_zero_iff_le, LE.le.le_iff_eq (Finset.card_le_univ _), eq_comm, Finset.card_eq_iff_eq_univ] at hs₃ ⊢ rw [hs₃] rw [Finset.eq_univ_iff_forall] at hs₃ ⊢ exact fun p => Exists.elim (exists_line p)-- If `s = univ`, then show `s.bUnion t = univ` fun l hl => Finset.mem_biUnion.mpr ⟨l, Finset.mem_univ l, Set.mem_toFinset.mpr hl⟩ · exact hs₂.trans (Nat.one_le_iff_ne_zero.mpr hs₃) -- If `s < univ`, then consequence of `hs₂` variable (L) /-- Number of points on a given line. -/ noncomputable def lineCount (p : P) : ℕ := Nat.card { l : L // p ∈ l } variable (P) {L} /-- Number of lines through a given point. -/ noncomputable def pointCount (l : L) : ℕ := Nat.card { p : P // p ∈ l } variable (L) theorem sum_lineCount_eq_sum_pointCount [Fintype P] [Fintype L] : ∑ p : P, lineCount L p = ∑ l : L, pointCount P l := by classical simp only [lineCount, pointCount, Nat.card_eq_fintype_card, ← Fintype.card_sigma] apply Fintype.card_congr calc (Σp, { l : L // p ∈ l }) ≃ { x : P × L // x.1 ∈ x.2 } := (Equiv.subtypeProdEquivSigmaSubtype (· ∈ ·)).symm _ ≃ { x : L × P // x.2 ∈ x.1 } := (Equiv.prodComm P L).subtypeEquiv fun x => Iff.rfl _ ≃ Σl, { p // p ∈ l } := Equiv.subtypeProdEquivSigmaSubtype fun (l : L) (p : P) => p ∈ l variable {P L} theorem HasLines.pointCount_le_lineCount [HasLines P L] {p : P} {l : L} (h : p ∉ l) [Finite { l : L // p ∈ l }] : pointCount P l ≤ lineCount L p := by by_cases hf : Infinite { p : P // p ∈ l } · exact (le_of_eq Nat.card_eq_zero_of_infinite).trans (zero_le (lineCount L p)) haveI := fintypeOfNotInfinite hf cases nonempty_fintype { l : L // p ∈ l } rw [lineCount, pointCount, Nat.card_eq_fintype_card, Nat.card_eq_fintype_card] have : ∀ p' : { p // p ∈ l }, p ≠ p' := fun p' hp' => h ((congr_arg (· ∈ l) hp').mpr p'.2) exact Fintype.card_le_of_injective (fun p' => ⟨mkLine (this p'), (mkLine_ax (this p')).1⟩) fun p₁ p₂ hp => Subtype.ext ((eq_or_eq p₁.2 p₂.2 (mkLine_ax (this p₁)).2 ((congr_arg _ (Subtype.ext_iff.mp hp)).mpr (mkLine_ax (this p₂)).2)).resolve_right fun h' => (congr_arg (¬p ∈ ·) h').mp h (mkLine_ax (this p₁)).1) theorem HasPoints.lineCount_le_pointCount [HasPoints P L] {p : P} {l : L} (h : p ∉ l) [hf : Finite { p : P // p ∈ l }] : lineCount L p ≤ pointCount P l := @HasLines.pointCount_le_lineCount (Dual L) (Dual P) _ _ l p h hf variable (P L) /-- If a nondegenerate configuration has a unique line through any two points, then `|P| ≤ |L|`. -/ theorem HasLines.card_le [HasLines P L] [Fintype P] [Fintype L] : Fintype.card P ≤ Fintype.card L := by classical by_contra hc₂ obtain ⟨f, hf₁, hf₂⟩ := Nondegenerate.exists_injective_of_card_le (le_of_not_le hc₂) have := calc ∑ p, lineCount L p = ∑ l, pointCount P l := sum_lineCount_eq_sum_pointCount P L _ ≤ ∑ l, lineCount L (f l) := (Finset.sum_le_sum fun l _ => HasLines.pointCount_le_lineCount (hf₂ l)) _ = ∑ p ∈ univ.map ⟨f, hf₁⟩, lineCount L p := by rw [sum_map]; dsimp _ < ∑ p, lineCount L p := by obtain ⟨p, hp⟩ := not_forall.mp (mt (Fintype.card_le_of_surjective f) hc₂) refine sum_lt_sum_of_subset (subset_univ _) (mem_univ p) ?_ ?_ fun p _ _ ↦ zero_le _ · simpa only [Finset.mem_map, exists_prop, Finset.mem_univ, true_and_iff] · rw [lineCount, Nat.card_eq_fintype_card, Fintype.card_pos_iff] obtain ⟨l, _⟩ := @exists_line P L _ _ p exact let this := not_exists.mp hp l ⟨⟨mkLine this, (mkLine_ax this).2⟩⟩ exact lt_irrefl _ this /-- If a nondegenerate configuration has a unique point on any two lines, then `|L| ≤ |P|`. -/ theorem HasPoints.card_le [HasPoints P L] [Fintype P] [Fintype L] : Fintype.card L ≤ Fintype.card P := @HasLines.card_le (Dual L) (Dual P) _ _ _ _ variable {P L} theorem HasLines.exists_bijective_of_card_eq [HasLines P L] [Fintype P] [Fintype L] (h : Fintype.card P = Fintype.card L) : ∃ f : L → P, Function.Bijective f ∧ ∀ l, pointCount P l = lineCount L (f l) := by classical obtain ⟨f, hf1, hf2⟩ := Nondegenerate.exists_injective_of_card_le (ge_of_eq h) have hf3 := (Fintype.bijective_iff_injective_and_card f).mpr ⟨hf1, h.symm⟩ exact ⟨f, hf3, fun l ↦ (sum_eq_sum_iff_of_le fun l _ ↦ pointCount_le_lineCount (hf2 l)).1 ((hf3.sum_comp _).trans (sum_lineCount_eq_sum_pointCount P L)).symm _ <| mem_univ _⟩ theorem HasLines.lineCount_eq_pointCount [HasLines P L] [Fintype P] [Fintype L] (hPL : Fintype.card P = Fintype.card L) {p : P} {l : L} (hpl : p ∉ l) : lineCount L p = pointCount P l := by classical obtain ⟨f, hf1, hf2⟩ := HasLines.exists_bijective_of_card_eq hPL let s : Finset (P × L) := Set.toFinset { i | i.1 ∈ i.2 } have step1 : ∑ i : P × L, lineCount L i.1 = ∑ i : P × L, pointCount P i.2 := by rw [← Finset.univ_product_univ, Finset.sum_product_right, Finset.sum_product] simp_rw [Finset.sum_const, Finset.card_univ, hPL, sum_lineCount_eq_sum_pointCount] have step2 : ∑ i ∈ s, lineCount L i.1 = ∑ i ∈ s, pointCount P i.2 := by rw [s.sum_finset_product Finset.univ fun p => Set.toFinset { l | p ∈ l }] on_goal 1 => rw [s.sum_finset_product_right Finset.univ fun l => Set.toFinset { p | p ∈ l }, eq_comm] · refine sum_bijective _ hf1 (by simp) fun l _ ↦ ?_ simp_rw [hf2, sum_const, Set.toFinset_card, ← Nat.card_eq_fintype_card] change pointCount P l • _ = lineCount L (f l) • _ rw [hf2] all_goals simp_rw [s, Finset.mem_univ, true_and_iff, Set.mem_toFinset]; exact fun p => Iff.rfl have step3 : ∑ i ∈ sᶜ, lineCount L i.1 = ∑ i ∈ sᶜ, pointCount P i.2 := by rwa [← s.sum_add_sum_compl, ← s.sum_add_sum_compl, step2, add_left_cancel_iff] at step1 rw [← Set.toFinset_compl] at step3 exact ((Finset.sum_eq_sum_iff_of_le fun i hi => HasLines.pointCount_le_lineCount (by exact Set.mem_toFinset.mp hi)).mp step3.symm (p, l) (Set.mem_toFinset.mpr hpl)).symm theorem HasPoints.lineCount_eq_pointCount [HasPoints P L] [Fintype P] [Fintype L] (hPL : Fintype.card P = Fintype.card L) {p : P} {l : L} (hpl : p ∉ l) : lineCount L p = pointCount P l := (@HasLines.lineCount_eq_pointCount (Dual L) (Dual P) _ _ _ _ hPL.symm l p hpl).symm /-- If a nondegenerate configuration has a unique line through any two points, and if `|P| = |L|`, then there is a unique point on any two lines. -/ noncomputable def HasLines.hasPoints [HasLines P L] [Fintype P] [Fintype L] (h : Fintype.card P = Fintype.card L) : HasPoints P L := let this : ∀ l₁ l₂ : L, l₁ ≠ l₂ → ∃ p : P, p ∈ l₁ ∧ p ∈ l₂ := fun l₁ l₂ hl => by classical obtain ⟨f, _, hf2⟩ := HasLines.exists_bijective_of_card_eq h haveI : Nontrivial L := ⟨⟨l₁, l₂, hl⟩⟩ haveI := Fintype.one_lt_card_iff_nontrivial.mp ((congr_arg _ h).mpr Fintype.one_lt_card) have h₁ : ∀ p : P, 0 < lineCount L p := fun p => Exists.elim (exists_ne p) fun q hq => (congr_arg _ Nat.card_eq_fintype_card).mpr (Fintype.card_pos_iff.mpr ⟨⟨mkLine hq, (mkLine_ax hq).2⟩⟩) have h₂ : ∀ l : L, 0 < pointCount P l := fun l => (congr_arg _ (hf2 l)).mpr (h₁ (f l)) obtain ⟨p, hl₁⟩ := Fintype.card_pos_iff.mp ((congr_arg _ Nat.card_eq_fintype_card).mp (h₂ l₁)) by_cases hl₂ : p ∈ l₂ · exact ⟨p, hl₁, hl₂⟩ have key' : Fintype.card { q : P // q ∈ l₂ } = Fintype.card { l : L // p ∈ l } := ((HasLines.lineCount_eq_pointCount h hl₂).trans Nat.card_eq_fintype_card).symm.trans Nat.card_eq_fintype_card have : ∀ q : { q // q ∈ l₂ }, p ≠ q := fun q hq => hl₂ ((congr_arg (· ∈ l₂) hq).mpr q.2) let f : { q : P // q ∈ l₂ } → { l : L // p ∈ l } := fun q => ⟨mkLine (this q), (mkLine_ax (this q)).1⟩ have hf : Function.Injective f := fun q₁ q₂ hq => Subtype.ext ((eq_or_eq q₁.2 q₂.2 (mkLine_ax (this q₁)).2 ((congr_arg _ (Subtype.ext_iff.mp hq)).mpr (mkLine_ax (this q₂)).2)).resolve_right fun h => (congr_arg (¬p ∈ ·) h).mp hl₂ (mkLine_ax (this q₁)).1) have key' := ((Fintype.bijective_iff_injective_and_card f).mpr ⟨hf, key'⟩).2 obtain ⟨q, hq⟩ := key' ⟨l₁, hl₁⟩ exact ⟨q, (congr_arg _ (Subtype.ext_iff.mp hq)).mp (mkLine_ax (this q)).2, q.2⟩ { ‹HasLines P L› with mkPoint := fun {l₁ l₂} hl => Classical.choose (this l₁ l₂ hl) mkPoint_ax := fun {l₁ l₂} hl => Classical.choose_spec (this l₁ l₂ hl) } /-- If a nondegenerate configuration has a unique point on any two lines, and if `|P| = |L|`, then there is a unique line through any two points. -/ noncomputable def HasPoints.hasLines [HasPoints P L] [Fintype P] [Fintype L] (h : Fintype.card P = Fintype.card L) : HasLines P L := let this := @HasLines.hasPoints (Dual L) (Dual P) _ _ _ _ h.symm { ‹HasPoints P L› with mkLine := @fun _ _ => this.mkPoint mkLine_ax := @fun _ _ => this.mkPoint_ax } variable (P L) /-- A projective plane is a nondegenerate configuration in which every pair of lines has an intersection point, every pair of points has a line through them, and which has three points in general position. -/ class ProjectivePlane extends HasPoints P L, HasLines P L where exists_config : ∃ (p₁ p₂ p₃ : P) (l₁ l₂ l₃ : L), p₁ ∉ l₂ ∧ p₁ ∉ l₃ ∧ p₂ ∉ l₁ ∧ p₂ ∈ l₂ ∧ p₂ ∈ l₃ ∧ p₃ ∉ l₁ ∧ p₃ ∈ l₂ ∧ p₃ ∉ l₃ namespace ProjectivePlane variable [ProjectivePlane P L] instance : ProjectivePlane (Dual L) (Dual P) := { Dual.hasPoints _ _, Dual.hasLines _ _ with exists_config := let ⟨p₁, p₂, p₃, l₁, l₂, l₃, h₁₂, h₁₃, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ := @exists_config P L _ _ ⟨l₁, l₂, l₃, p₁, p₂, p₃, h₂₁, h₃₁, h₁₂, h₂₂, h₃₂, h₁₃, h₂₃, h₃₃⟩ } /-- The order of a projective plane is one less than the number of lines through an arbitrary point. Equivalently, it is one less than the number of points on an arbitrary line. -/ noncomputable def order : ℕ := lineCount L (Classical.choose (@exists_config P L _ _)) - 1 theorem card_points_eq_card_lines [Fintype P] [Fintype L] : Fintype.card P = Fintype.card L := le_antisymm (HasLines.card_le P L) (HasPoints.card_le P L) variable {P} theorem lineCount_eq_lineCount [Finite P] [Finite L] (p q : P) : lineCount L p = lineCount L q := by cases nonempty_fintype P cases nonempty_fintype L obtain ⟨p₁, p₂, p₃, l₁, l₂, l₃, h₁₂, h₁₃, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ := @exists_config P L _ _ have h := card_points_eq_card_lines P L let n := lineCount L p₂ have hp₂ : lineCount L p₂ = n := rfl have hl₁ : pointCount P l₁ = n := (HasLines.lineCount_eq_pointCount h h₂₁).symm.trans hp₂ have hp₃ : lineCount L p₃ = n := (HasLines.lineCount_eq_pointCount h h₃₁).trans hl₁ have hl₃ : pointCount P l₃ = n := (HasLines.lineCount_eq_pointCount h h₃₃).symm.trans hp₃ have hp₁ : lineCount L p₁ = n := (HasLines.lineCount_eq_pointCount h h₁₃).trans hl₃ have hl₂ : pointCount P l₂ = n := (HasLines.lineCount_eq_pointCount h h₁₂).symm.trans hp₁ suffices ∀ p : P, lineCount L p = n by exact (this p).trans (this q).symm refine fun p => or_not.elim (fun h₂ => ?_) fun h₂ => (HasLines.lineCount_eq_pointCount h h₂).trans hl₂ refine or_not.elim (fun h₃ => ?_) fun h₃ => (HasLines.lineCount_eq_pointCount h h₃).trans hl₃ rw [(eq_or_eq h₂ h₂₂ h₃ h₂₃).resolve_right fun h => h₃₃ ((congr_arg (Membership.mem p₃) h).mp h₃₂)] variable (P) {L} theorem pointCount_eq_pointCount [Finite P] [Finite L] (l m : L) : pointCount P l = pointCount P m := by apply lineCount_eq_lineCount (Dual P) variable {P} theorem lineCount_eq_pointCount [Finite P] [Finite L] (p : P) (l : L) : lineCount L p = pointCount P l := Exists.elim (exists_point l) fun q hq => (lineCount_eq_lineCount L p q).trans <| by cases nonempty_fintype P cases nonempty_fintype L exact HasLines.lineCount_eq_pointCount (card_points_eq_card_lines P L) hq variable (P L) theorem Dual.order [Finite P] [Finite L] : order (Dual L) (Dual P) = order P L := congr_arg (fun n => n - 1) (lineCount_eq_pointCount _ _) variable {P} theorem lineCount_eq [Finite P] [Finite L] (p : P) : lineCount L p = order P L + 1 := by classical obtain ⟨q, -, -, l, -, -, -, -, h, -⟩ := Classical.choose_spec (@exists_config P L _ _) cases nonempty_fintype { l : L // q ∈ l } rw [order, lineCount_eq_lineCount L p q, lineCount_eq_lineCount L (Classical.choose _) q, lineCount, Nat.card_eq_fintype_card, Nat.sub_add_cancel] exact Fintype.card_pos_iff.mpr ⟨⟨l, h⟩⟩ variable (P) {L} theorem pointCount_eq [Finite P] [Finite L] (l : L) : pointCount P l = order P L + 1 := (lineCount_eq (Dual P) _).trans (congr_arg (fun n => n + 1) (Dual.order P L)) variable (L) theorem one_lt_order [Finite P] [Finite L] : 1 < order P L := by obtain ⟨p₁, p₂, p₃, l₁, l₂, l₃, -, -, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ := @exists_config P L _ _ cases nonempty_fintype { p : P // p ∈ l₂ } rw [← add_lt_add_iff_right 1, ← pointCount_eq _ l₂, pointCount, Nat.card_eq_fintype_card, Fintype.two_lt_card_iff] simp_rw [Ne, Subtype.ext_iff] have h := mkPoint_ax fun h => h₂₁ ((congr_arg _ h).mpr h₂₂) exact ⟨⟨mkPoint _, h.2⟩, ⟨p₂, h₂₂⟩, ⟨p₃, h₃₂⟩, ne_of_mem_of_not_mem h.1 h₂₁, ne_of_mem_of_not_mem h.1 h₃₁, ne_of_mem_of_not_mem h₂₃ h₃₃⟩ variable {P} theorem two_lt_lineCount [Finite P] [Finite L] (p : P) : 2 < lineCount L p := by simpa only [lineCount_eq L p, Nat.succ_lt_succ_iff] using one_lt_order P L variable (P) {L} theorem two_lt_pointCount [Finite P] [Finite L] (l : L) : 2 < pointCount P l := by simpa only [pointCount_eq P l, Nat.succ_lt_succ_iff] using one_lt_order P L variable (L) theorem card_points [Fintype P] [Finite L] : Fintype.card P = order P L ^ 2 + order P L + 1 := by cases nonempty_fintype L obtain ⟨p, -⟩ := @exists_config P L _ _ let ϕ : { q // q ≠ p } ≃ Σl : { l : L // p ∈ l }, { q // q ∈ l.1 ∧ q ≠ p } := { toFun := fun q => ⟨⟨mkLine q.2, (mkLine_ax q.2).2⟩, q, (mkLine_ax q.2).1, q.2⟩ invFun := fun lq => ⟨lq.2, lq.2.2.2⟩ left_inv := fun q => Subtype.ext rfl right_inv := fun lq => Sigma.subtype_ext (Subtype.ext ((eq_or_eq (mkLine_ax lq.2.2.2).1 (mkLine_ax lq.2.2.2).2 lq.2.2.1 lq.1.2).resolve_left lq.2.2.2)) rfl } classical have h1 : Fintype.card { q // q ≠ p } + 1 = Fintype.card P := by apply (eq_tsub_iff_add_eq_of_le (Nat.succ_le_of_lt (Fintype.card_pos_iff.mpr ⟨p⟩))).mp convert (Fintype.card_subtype_compl _).trans (congr_arg _ (Fintype.card_subtype_eq p)) have h2 : ∀ l : { l : L // p ∈ l }, Fintype.card { q // q ∈ l.1 ∧ q ≠ p } = order P L := by intro l rw [← Fintype.card_congr (Equiv.subtypeSubtypeEquivSubtypeInter (· ∈ l.val) (· ≠ p)), Fintype.card_subtype_compl fun x : Subtype (· ∈ l.val) => x.val = p, ← Nat.card_eq_fintype_card] refine tsub_eq_of_eq_add ((pointCount_eq P l.1).trans ?_) rw [← Fintype.card_subtype_eq (⟨p, l.2⟩ : { q : P // q ∈ l.1 })] simp_rw [Subtype.ext_iff_val] simp_rw [← h1, Fintype.card_congr ϕ, Fintype.card_sigma, h2, Finset.sum_const, Finset.card_univ] rw [← Nat.card_eq_fintype_card, ← lineCount, lineCount_eq, smul_eq_mul, Nat.succ_mul, sq] theorem card_lines [Finite P] [Fintype L] : Fintype.card L = order P L ^ 2 + order P L + 1 := (card_points (Dual L) (Dual P)).trans (congr_arg (fun n => n ^ 2 + n + 1) (Dual.order P L)) end ProjectivePlane end Configuration
Combinatorics\HalesJewett.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import Mathlib.Algebra.BigOperators.Group.Finset import Mathlib.Data.Countable.Small import Mathlib.Data.Fintype.Option import Mathlib.Data.Fintype.Pi import Mathlib.Data.Fintype.Prod import Mathlib.Data.Fintype.Shrink import Mathlib.Data.Fintype.Sum /-! # The Hales-Jewett theorem We prove the Hales-Jewett theorem. We deduce Van der Waerden's theorem and the multidimensional Hales-Jewett theorem as corollaries. The Hales-Jewett theorem is a result in Ramsey theory dealing with *combinatorial lines*. Given an 'alphabet' `α : Type*` and `a b : α`, an example of a combinatorial line in `α^5` is `{ (a, x, x, b, x) | x : α }`. See `Combinatorics.Line` for a precise general definition. The Hales-Jewett theorem states that for any fixed finite types `α` and `κ`, there exists a (potentially huge) finite type `ι` such that whenever `ι → α` is `κ`-colored (i.e. for any coloring `C : (ι → α) → κ`), there exists a monochromatic line. We prove the Hales-Jewett theorem using the idea of *color focusing* and a *product argument*. See the proof of `Combinatorics.Line.exists_mono_in_high_dimension'` for details. *Combinatorial subspaces* are higher-dimensional analogues of combinatorial lines. See `Combinatorics.Subspace`. The multidimensional Hales-Jewett theorem generalises the statement above from combinatorial lines to combinatorial subspaces of a fixed dimension. The version of Van der Waerden's theorem in this file states that whenever a commutative monoid `M` is finitely colored and `S` is a finite subset, there exists a monochromatic homothetic copy of `S`. This follows from the Hales-Jewett theorem by considering the map `(ι → S) → M` sending `v` to `∑ i : ι, v i`, which sends a combinatorial line to a homothetic copy of `S`. ## Main results - `Combinatorics.Line.exists_mono_in_high_dimension`: The Hales-Jewett theorem. - `Combinatorics.Subspace.exists_mono_in_high_dimension`: The multidimensional Hales-Jewett theorem. - `Combinatorics.exists_mono_homothetic_copy`: A generalization of Van der Waerden's theorem. ## Implementation details For convenience, we work directly with finite types instead of natural numbers. That is, we write `α, ι, κ` for (finite) types where one might traditionally use natural numbers `n, H, c`. This allows us to work directly with `α`, `Option α`, `(ι → α) → κ`, and `ι ⊕ ι'` instead of `Fin n`, `Fin (n+1)`, `Fin (c^(n^H))`, and `Fin (H + H')`. ## TODO - Prove a finitary version of Van der Waerden's theorem (either by compactness or by modifying the current proof). - One could reformulate the proof of Hales-Jewett to give explicit upper bounds on the number of coordinates needed. ## Tags combinatorial line, Ramsey theory, arithmetic progression ### References * https://en.wikipedia.org/wiki/Hales%E2%80%93Jewett_theorem -/ open Function open scoped Classical universe u v variable {η α ι κ : Type*} namespace Combinatorics /-- The type of combinatorial subspaces. A subspace `l : Subspace η α ι` in the hypercube `ι → α` defines a function `(η → α) → ι → α` from `η → α` to the hypercube, such that for each coordinate `i : ι` and direction `e : η`, the function `fun x ↦ l x i` is either `fun x ↦ x e` for some direction `e : η` or constant. We require subspaces to be non-degenerate in the sense that, for every `e : η`, `fun x ↦ l x i` is `fun x ↦ x e` for at least one `i`. Formally, a subspace is represented by a word `l.idxFun : ι → α ⊕ η` which says whether `fun x ↦ l x i` is `fun x ↦ x e` (corresponding to `l.idxFun i = Sum.inr e`) or constantly `a` (corresponding to `l.idxFun i = Sum.inl a`). When `α` has size `1` there can be many elements of `Subspace η α ι` defining the same function. -/ @[ext] structure Subspace (η α ι : Type*) where /-- The word representing a combinatorial subspace. `l.idxfun i = Sum.inr e` means that `l x i = x e` for all `x` and `l.idxfun i = some a` means that `l x i = a` for all `x`. -/ idxFun : ι → α ⊕ η /-- We require combinatorial subspaces to be nontrivial in the sense that `fun x ↦ l x i` is `fun x ↦ x e` for at least one coordinate `i`. -/ proper : ∀ e, ∃ i, idxFun i = Sum.inr e namespace Subspace variable {η α ι κ : Type*} {l : Subspace η α ι} {x : η → α} {i : ι} {a : α} {e : η} /-- The combinatorial subspace corresponding to the identity embedding `(ι → α) → (ι → α)`. -/ instance : Inhabited (Subspace ι α ι) := ⟨⟨Sum.inr, fun i ↦ ⟨i, rfl⟩⟩⟩ /-- Consider a subspace `l : Subspace η α ι` as a function `(η → α) → ι → α`. -/ @[coe] def toFun (l : Subspace η α ι) (x : η → α) (i : ι) : α := (l.idxFun i).elim id x instance instCoeFun : CoeFun (Subspace η α ι) (fun _ ↦ (η → α) → ι → α) := ⟨toFun⟩ lemma coe_apply (l : Subspace η α ι) (x : η → α) (i : ι) : l x i = (l.idxFun i).elim id x := rfl -- Note: This is not made a `FunLike` instance to avoid having two syntactically different coercions lemma coe_injective [Nontrivial α] : Injective ((⇑) : Subspace η α ι → (η → α) → ι → α) := by rintro l m hlm ext i simp only [funext_iff] at hlm cases hl : idxFun l i with | inl a => obtain ⟨b, hba⟩ := exists_ne a cases hm : idxFun m i <;> simpa [hl, hm, hba.symm, coe_apply] using hlm (const _ b) i | inr e => cases hm : idxFun m i with | inl a => obtain ⟨b, hba⟩ := exists_ne a simpa [hl, hm, hba, coe_apply] using hlm (const _ b) i | inr f => obtain ⟨a, b, hab⟩ := exists_pair_ne α simp only [Sum.inr.injEq] by_contra! hef simpa [hl, hm, hef, hab, coe_apply] using hlm (Function.update (const _ a) f b) i lemma apply_def (l : Subspace η α ι) (x : η → α) (i : ι) : l x i = (l.idxFun i).elim id x := rfl lemma apply_inl (h : l.idxFun i = Sum.inl a) : l x i = a := by simp [apply_def, h] lemma apply_inr (h : l.idxFun i = Sum.inr e) : l x i = x e := by simp [apply_def, h] /-- Given a coloring `C` of `ι → α` and a combinatorial subspace `l` of `ι → α`, `l.IsMono C` means that `l` is monochromatic with regard to `C`. -/ def IsMono (C : (ι → α) → κ) (l : Subspace η α ι) : Prop := ∃ c, ∀ x, C (l x) = c variable {η' α' ι' : Type*} /-- Change the index types of a subspace. -/ def reindex (l : Subspace η α ι) (eη : η ≃ η') (eα : α ≃ α') (eι : ι ≃ ι') : Subspace η' α' ι' where idxFun i := (l.idxFun <| eι.symm i).map eα eη proper e := (eι.exists_congr fun i ↦ by cases h : idxFun l i <;> simp [*, Function.funext_iff, Equiv.eq_symm_apply]).1 <| l.proper <| eη.symm e @[simp] lemma reindex_apply (l : Subspace η α ι) (eη : η ≃ η') (eα : α ≃ α') (eι : ι ≃ ι') (x i) : l.reindex eη eα eι x i = eα (l (eα.symm ∘ x ∘ eη) <| eι.symm i) := by cases h : l.idxFun (eι.symm i) <;> simp [h, reindex, coe_apply] @[simp] lemma reindex_isMono {eη : η ≃ η'} {eα : α ≃ α'} {eι : ι ≃ ι'} {C : (ι' → α') → κ} : (l.reindex eη eα eι).IsMono C ↔ l.IsMono fun x ↦ C <| eα ∘ x ∘ eι.symm := by simp only [IsMono, funext (reindex_apply _ _ _ _ _), coe_apply] exact exists_congr fun c ↦ (eη.arrowCongr eα).symm.forall_congr <| by aesop protected lemma IsMono.reindex {eη : η ≃ η'} {eα : α ≃ α'} {eι : ι ≃ ι'} {C : (ι → α) → κ} (hl : l.IsMono C) : (l.reindex eη eα eι).IsMono fun x ↦ C <| eα.symm ∘ x ∘ eι := by simp [reindex_isMono, Function.comp.assoc]; simpa [← Function.comp.assoc] end Subspace /-- The type of combinatorial lines. A line `l : Line α ι` in the hypercube `ι → α` defines a function `α → ι → α` from `α` to the hypercube, such that for each coordinate `i : ι`, the function `fun x ↦ l x i` is either `id` or constant. We require lines to be nontrivial in the sense that `fun x ↦ l x i` is `id` for at least one `i`. Formally, a line is represented by a word `l.idxFun : ι → Option α` which says whether `fun x ↦ l x i` is `id` (corresponding to `l.idxFun i = none`) or constantly `y` (corresponding to `l.idxFun i = some y`). When `α` has size `1` there can be many elements of `Line α ι` defining the same function. -/ @[ext] structure Line (α ι : Type*) where /-- The word representing a combinatorial line. `l.idxfun i = none` means that `l x i = x` for all `x` and `l.idxfun i = some y` means that `l x i = y`. -/ idxFun : ι → Option α /-- We require combinatorial lines to be nontrivial in the sense that `fun x ↦ l x i` is `id` for at least one coordinate `i`. -/ proper : ∃ i, idxFun i = none namespace Line variable {l : Line α ι} {i : ι} {a x : α} /-- Consider a line `l : Line α ι` as a function `α → ι → α`. -/ @[coe] def toFun (l : Line α ι) (x : α) (i : ι) : α := (l.idxFun i).getD x -- This lets us treat a line `l : Line α ι` as a function `α → ι → α`. instance instCoeFun : CoeFun (Line α ι) fun _ => α → ι → α := ⟨fun l x i => (l.idxFun i).getD x⟩ lemma coe_apply (l : Line α ι) (x : α) (i : ι) : l x i = (l.idxFun i).getD x := rfl -- Note: This is not made a `FunLike` instance to avoid having two syntactically different coercions lemma coe_injective [Nontrivial α] : Injective ((⇑) : Line α ι → α → ι → α) := by rintro l m hlm ext i a obtain ⟨b, hba⟩ := exists_ne a simp only [Option.mem_def, funext_iff] at hlm ⊢ refine ⟨fun h ↦ ?_, fun h ↦ ?_⟩ · cases hi : idxFun m i <;> simpa [@eq_comm _ a, hi, h, hba] using hlm b i · cases hi : idxFun l i <;> simpa [@eq_comm _ a, hi, h, hba] using hlm b i /-- A line is monochromatic if all its points are the same color. -/ def IsMono {α ι κ} (C : (ι → α) → κ) (l : Line α ι) : Prop := ∃ c, ∀ x, C (l x) = c /-- Consider a line as a one-dimensional subspace. -/ def toSubspaceUnit (l : Line α ι) : Subspace Unit α ι where idxFun i := (l.idxFun i).elim (.inr ()) .inl proper _ := l.proper.imp fun i hi ↦ by simp [hi] @[simp] lemma toSubspaceUnit_apply (l : Line α ι) (a) : ⇑l.toSubspaceUnit a = l (a ()) := by ext i; cases h : l.idxFun i <;> simp [toSubspaceUnit, h, Subspace.coe_apply] @[simp] lemma toSubspaceUnit_isMono {C : (ι → α) → κ} : l.toSubspaceUnit.IsMono C ↔ l.IsMono C := by simp only [Subspace.IsMono, toSubspaceUnit_apply, IsMono] exact exists_congr fun c ↦ ⟨fun h a ↦ h fun _ ↦ a, fun h a ↦ h _⟩ protected alias ⟨_, IsMono.toSubspaceUnit⟩ := toSubspaceUnit_isMono /-- Consider a line in `ι → η → α` as a `η`-dimensional subspace in `ι × η → α`. -/ def toSubspace (l : Line (η → α) ι) : Subspace η α (ι × η) where idxFun ie := (l.idxFun ie.1).elim (.inr ie.2) (fun f ↦ .inl <| f ie.2) proper e := let ⟨i, hi⟩ := l.proper; ⟨(i, e), by simp [hi]⟩ @[simp] lemma toSubspace_apply (l : Line (η → α) ι) (a ie) : ⇑l.toSubspace a ie = l a ie.1 ie.2 := by cases h : l.idxFun ie.1 <;> simp [toSubspace, h, coe_apply, Subspace.coe_apply] @[simp] lemma toSubspace_isMono {l : Line (η → α) ι} {C : (ι × η → α) → κ} : l.toSubspace.IsMono C ↔ l.IsMono fun x : ι → η → α ↦ C fun (i, e) ↦ x i e := by simp [Subspace.IsMono, IsMono, funext (toSubspace_apply _ _)] protected alias ⟨_, IsMono.toSubspace⟩ := toSubspace_isMono /-- The diagonal line. It is the identity at every coordinate. -/ def diagonal (α ι) [Nonempty ι] : Line α ι where idxFun _ := none proper := ⟨Classical.arbitrary ι, rfl⟩ instance (α ι) [Nonempty ι] : Inhabited (Line α ι) := ⟨diagonal α ι⟩ /-- The type of lines that are only one color except possibly at their endpoints. -/ structure AlmostMono {α ι κ : Type*} (C : (ι → Option α) → κ) where /-- The underlying line of an almost monochromatic line, where the coordinate dimension `α` is extended by an additional symbol `none`, thought to be marking the endpoint of the line. -/ line : Line (Option α) ι /-- The main color of an almost monochromatic line. -/ color : κ /-- The proposition that the underlying line of an almost monochromatic line assumes its main color except possibly at the endpoints. -/ has_color : ∀ x : α, C (line (some x)) = color instance {α ι κ : Type*} [Nonempty ι] [Inhabited κ] : Inhabited (AlmostMono fun _ : ι → Option α => (default : κ)) := ⟨{ line := default color := default has_color := fun _ ↦ rfl}⟩ /-- The type of collections of lines such that - each line is only one color except possibly at its endpoint - the lines all have the same endpoint - the colors of the lines are distinct. Used in the proof `exists_mono_in_high_dimension`. -/ structure ColorFocused {α ι κ : Type*} (C : (ι → Option α) → κ) where /-- The underlying multiset of almost monochromatic lines of a color-focused collection. -/ lines : Multiset (AlmostMono C) /-- The common endpoint of the lines in the color-focused collection. -/ focus : ι → Option α /-- The proposition that all lines in a color-focused collection have the same endpoint. -/ is_focused : ∀ p ∈ lines, p.line none = focus /-- The proposition that all lines in a color-focused collection of lines have distinct colors. -/ distinct_colors : (lines.map AlmostMono.color).Nodup instance {α ι κ} (C : (ι → Option α) → κ) : Inhabited (ColorFocused C) := by refine ⟨⟨0, fun _ => none, fun h => ?_, Multiset.nodup_zero⟩⟩ simp only [Multiset.not_mem_zero, IsEmpty.forall_iff] /-- A function `f : α → α'` determines a function `line α ι → line α' ι`. For a coordinate `i` `l.map f` is the identity at `i` if `l` is, and constantly `f y` if `l` is constantly `y` at `i`. -/ def map {α α' ι} (f : α → α') (l : Line α ι) : Line α' ι where idxFun i := (l.idxFun i).map f proper := ⟨l.proper.choose, by simp only [l.proper.choose_spec, Option.map_none']⟩ /-- A point in `ι → α` and a line in `ι' → α` determine a line in `ι ⊕ ι' → α`. -/ def vertical {α ι ι'} (v : ι → α) (l : Line α ι') : Line α (ι ⊕ ι') where idxFun := Sum.elim (some ∘ v) l.idxFun proper := ⟨Sum.inr l.proper.choose, l.proper.choose_spec⟩ /-- A line in `ι → α` and a point in `ι' → α` determine a line in `ι ⊕ ι' → α`. -/ def horizontal {α ι ι'} (l : Line α ι) (v : ι' → α) : Line α (ι ⊕ ι') where idxFun := Sum.elim l.idxFun (some ∘ v) proper := ⟨Sum.inl l.proper.choose, l.proper.choose_spec⟩ /-- One line in `ι → α` and one in `ι' → α` together determine a line in `ι ⊕ ι' → α`. -/ def prod {α ι ι'} (l : Line α ι) (l' : Line α ι') : Line α (ι ⊕ ι') where idxFun := Sum.elim l.idxFun l'.idxFun proper := ⟨Sum.inl l.proper.choose, l.proper.choose_spec⟩ theorem apply_def (l : Line α ι) (x : α) : l x = fun i => (l.idxFun i).getD x := rfl theorem apply_none {α ι} (l : Line α ι) (x : α) (i : ι) (h : l.idxFun i = none) : l x i = x := by simp only [Option.getD_none, h, l.apply_def] lemma apply_some (h : l.idxFun i = some a) : l x i = a := by simp [l.apply_def, h] @[simp] theorem map_apply {α α' ι} (f : α → α') (l : Line α ι) (x : α) : l.map f (f x) = f ∘ l x := by simp only [Line.apply_def, Line.map, Option.getD_map, comp_def] @[simp] theorem vertical_apply {α ι ι'} (v : ι → α) (l : Line α ι') (x : α) : l.vertical v x = Sum.elim v (l x) := by funext i cases i <;> rfl @[simp] theorem horizontal_apply {α ι ι'} (l : Line α ι) (v : ι' → α) (x : α) : l.horizontal v x = Sum.elim (l x) v := by funext i cases i <;> rfl @[simp] theorem prod_apply {α ι ι'} (l : Line α ι) (l' : Line α ι') (x : α) : l.prod l' x = Sum.elim (l x) (l' x) := by funext i cases i <;> rfl @[simp] theorem diagonal_apply {α ι} [Nonempty ι] (x : α) : Line.diagonal α ι x = fun _ => x := by simp_rw [Line.diagonal, Option.getD_none] /-- The **Hales-Jewett theorem**. This version has a restriction on universe levels which is necessary for the proof. See `exists_mono_in_high_dimension` for a fully universe-polymorphic version. -/ private theorem exists_mono_in_high_dimension' : ∀ (α : Type u) [Finite α] (κ : Type max v u) [Finite κ], ∃ (ι : Type) (_ : Fintype ι), ∀ C : (ι → α) → κ, ∃ l : Line α ι, l.IsMono C := -- The proof proceeds by induction on `α`. Finite.induction_empty_option (-- We have to show that the theorem is invariant under `α ≃ α'` for the induction to work. fun {α α'} e => forall_imp fun κ => forall_imp fun _ => Exists.imp fun ι => Exists.imp fun _ h C => let ⟨l, c, lc⟩ := h fun v => C (e ∘ v) ⟨l.map e, c, e.forall_congr_right.mp fun x => by rw [← lc x, Line.map_apply]⟩) (by -- This deals with the degenerate case where `α` is empty. intro κ _ by_cases h : Nonempty κ · refine ⟨Unit, inferInstance, fun C => ⟨default, Classical.arbitrary _, PEmpty.rec⟩⟩ · exact ⟨Empty, inferInstance, fun C => (h ⟨C (Empty.rec)⟩).elim⟩) (by -- Now we have to show that the theorem holds for `Option α` if it holds for `α`. intro α _ ihα κ _ cases nonempty_fintype κ -- Later we'll need `α` to be nonempty. So we first deal with the trivial case where `α` is -- empty. -- Then `Option α` has only one element, so any line is monochromatic. by_cases h : Nonempty α case neg => refine ⟨Unit, inferInstance, fun C => ⟨diagonal _ Unit, C fun _ => none, ?_⟩⟩ rintro (_ | ⟨a⟩) · rfl · exact (h ⟨a⟩).elim -- The key idea is to show that for every `r`, in high dimension we can either find -- `r` color focused lines or a monochromatic line. suffices key : ∀ r : ℕ, ∃ (ι : Type) (_ : Fintype ι), ∀ C : (ι → Option α) → κ, (∃ s : ColorFocused C, Multiset.card s.lines = r) ∨ ∃ l, IsMono C l by -- Given the key claim, we simply take `r = |κ| + 1`. We cannot have this many distinct colors -- so we must be in the second case, where there is a monochromatic line. obtain ⟨ι, _inst, hι⟩ := key (Fintype.card κ + 1) refine ⟨ι, _inst, fun C => (hι C).resolve_left ?_⟩ rintro ⟨s, sr⟩ apply Nat.not_succ_le_self (Fintype.card κ) rw [← Nat.add_one, ← sr, ← Multiset.card_map, ← Finset.card_mk] exact Finset.card_le_univ ⟨_, s.distinct_colors⟩ -- We now prove the key claim, by induction on `r`. intro r induction' r with r ihr -- The base case `r = 0` is trivial as the empty collection is color-focused. · exact ⟨Empty, inferInstance, fun C => Or.inl ⟨default, Multiset.card_zero⟩⟩ -- Supposing the key claim holds for `r`, we need to show it for `r+1`. First pick a high -- enough dimension `ι` for `r`. obtain ⟨ι, _inst, hι⟩ := ihr -- Then since the theorem holds for `α` with any number of colors, pick a dimension `ι'` such -- that `ι' → α` always has a monochromatic line whenever it is `(ι → Option α) → κ`-colored. specialize ihα ((ι → Option α) → κ) obtain ⟨ι', _inst, hι'⟩ := ihα -- We claim that `ι ⊕ ι'` works for `Option α` and `κ`-coloring. refine ⟨ι ⊕ ι', inferInstance, ?_⟩ intro C -- A `κ`-coloring of `ι ⊕ ι' → Option α` induces an `(ι → Option α) → κ`-coloring of `ι' → α`. specialize hι' fun v' v => C (Sum.elim v (some ∘ v')) -- By choice of `ι'` this coloring has a monochromatic line `l'` with color class `C'`, where -- `C'` is a `κ`-coloring of `ι → α`. obtain ⟨l', C', hl'⟩ := hι' -- If `C'` has a monochromatic line, then so does `C`. We use this in two places below. have mono_of_mono : (∃ l, IsMono C' l) → ∃ l, IsMono C l := by rintro ⟨l, c, hl⟩ refine ⟨l.horizontal (some ∘ l' (Classical.arbitrary α)), c, fun x => ?_⟩ rw [Line.horizontal_apply, ← hl, ← hl'] -- By choice of `ι`, `C'` either has `r` color-focused lines or a monochromatic line. specialize hι C' rcases hι with (⟨s, sr⟩ | h) on_goal 2 => exact Or.inr (mono_of_mono h) -- Here we assume `C'` has `r` color focused lines. We split into cases depending on whether -- one of these `r` lines has the same color as the focus point. by_cases h : ∃ p ∈ s.lines, (p : AlmostMono _).color = C' s.focus -- If so then this is a `C'`-monochromatic line and we are done. · obtain ⟨p, p_mem, hp⟩ := h refine Or.inr (mono_of_mono ⟨p.line, p.color, ?_⟩) rintro (_ | _) · rw [hp, s.is_focused p p_mem] · apply p.has_color -- If not, we get `r+1` color focused lines by taking the product of the `r` lines with `l'` -- and adding to this the vertical line obtained by the focus point and `l`. refine Or.inl ⟨⟨(s.lines.map ?_).cons ⟨(l'.map some).vertical s.focus, C' s.focus, fun x => ?_⟩, Sum.elim s.focus (l'.map some none), ?_, ?_⟩, ?_⟩ -- Porting note: Needed to reorder the following two goals -- The product lines are almost monochromatic. · refine fun p => ⟨p.line.prod (l'.map some), p.color, fun x => ?_⟩ rw [Line.prod_apply, Line.map_apply, ← p.has_color, ← congr_fun (hl' x)] -- The vertical line is almost monochromatic. · rw [vertical_apply, ← congr_fun (hl' x), Line.map_apply] -- Our `r+1` lines have the same endpoint. · simp_rw [Multiset.mem_cons, Multiset.mem_map] rintro _ (rfl | ⟨q, hq, rfl⟩) · simp only [vertical_apply] · simp only [prod_apply, s.is_focused q hq] -- Our `r+1` lines have distinct colors (this is why we needed to split into cases above). · rw [Multiset.map_cons, Multiset.map_map, Multiset.nodup_cons, Multiset.mem_map] exact ⟨fun ⟨q, hq, he⟩ => h ⟨q, hq, he⟩, s.distinct_colors⟩ -- Finally, we really do have `r+1` lines! · rw [Multiset.card_cons, Multiset.card_map, sr]) /-- The **Hales-Jewett theorem**: For any finite types `α` and `κ`, there exists a finite type `ι` such that whenever the hypercube `ι → α` is `κ`-colored, there is a monochromatic combinatorial line. -/ theorem exists_mono_in_high_dimension (α : Type u) [Finite α] (κ : Type v) [Finite κ] : ∃ (ι : Type) (_ : Fintype ι), ∀ C : (ι → α) → κ, ∃ l : Line α ι, l.IsMono C := let ⟨ι, ιfin, hι⟩ := exists_mono_in_high_dimension'.{u,v} α (ULift.{u,v} κ) ⟨ι, ιfin, fun C => let ⟨l, c, hc⟩ := hι (ULift.up ∘ C) ⟨l, c.down, fun x => by rw [← hc x, Function.comp_apply]⟩⟩ end Line /-- A generalization of Van der Waerden's theorem: if `M` is a finitely colored commutative monoid, and `S` is a finite subset, then there exists a monochromatic homothetic copy of `S`. -/ theorem exists_mono_homothetic_copy {M κ : Type*} [AddCommMonoid M] (S : Finset M) [Finite κ] (C : M → κ) : ∃ a > 0, ∃ (b : M) (c : κ), ∀ s ∈ S, C (a • s + b) = c := by obtain ⟨ι, _inst, hι⟩ := Line.exists_mono_in_high_dimension S κ specialize hι fun v => C <| ∑ i, v i obtain ⟨l, c, hl⟩ := hι set s : Finset ι := Finset.univ.filter (fun i => l.idxFun i = none) with hs refine ⟨s.card, Finset.card_pos.mpr ⟨l.proper.choose, ?_⟩, ∑ i ∈ sᶜ, ((l.idxFun i).map ?_).getD 0, c, ?_⟩ · rw [hs, Finset.mem_filter] exact ⟨Finset.mem_univ _, l.proper.choose_spec⟩ · exact fun m => m intro x xs rw [← hl ⟨x, xs⟩] clear hl; congr rw [← Finset.sum_add_sum_compl s] congr 1 · rw [← Finset.sum_const] apply Finset.sum_congr rfl intro i hi rw [hs, Finset.mem_filter] at hi rw [l.apply_none _ _ hi.right, Subtype.coe_mk] · apply Finset.sum_congr rfl intro i hi rw [hs, Finset.compl_filter, Finset.mem_filter] at hi obtain ⟨y, hy⟩ := Option.ne_none_iff_exists.mp hi.right simp_rw [← hy, Option.map_some', Option.getD] namespace Subspace /-- The **multidimensional Hales-Jewett theorem**, aka **extended Hales-Jewett theorem**: For any finite types `η`, `α` and `κ`, there exists a finite type `ι` such that whenever the hypercube `ι → α` is `κ`-colored, there is a monochromatic combinatorial subspace of dimension `η`. -/ theorem exists_mono_in_high_dimension (α κ η) [Finite α] [Finite κ] [Finite η] : ∃ (ι : Type) (_ : Fintype ι), ∀ C : (ι → α) → κ, ∃ l : Subspace η α ι, l.IsMono C := by cases nonempty_fintype η obtain ⟨ι, _, hι⟩ := Line.exists_mono_in_high_dimension (Shrink.{0} η → α) κ refine ⟨ι × Shrink η, inferInstance, fun C ↦ ?_⟩ obtain ⟨l, hl⟩ := hι fun x ↦ C fun (i, e) ↦ x i e refine ⟨l.toSubspace.reindex (equivShrink.{0} η).symm (Equiv.refl _) (Equiv.refl _), ?_⟩ convert hl.toSubspace.reindex simp /-- A variant of the **extended Hales-Jewett theorem** `exists_mono_in_high_dimension` where the returned type is some `Fin n` instead of a general fintype. -/ theorem exists_mono_in_high_dimension_fin (α κ η) [Finite α] [Finite κ] [Finite η] : ∃ n, ∀ C : (Fin n → α) → κ, ∃ l : Subspace η α (Fin n), l.IsMono C := by obtain ⟨ι, ιfin, hι⟩ := exists_mono_in_high_dimension α κ η refine ⟨Fintype.card ι, fun C ↦ ?_⟩ obtain ⟨l, c, cl⟩ := hι fun v ↦ C (v ∘ (Fintype.equivFin _).symm) refine ⟨⟨l.idxFun ∘ (Fintype.equivFin _).symm, fun e ↦ ?_⟩, c, cl⟩ obtain ⟨i, hi⟩ := l.proper e use Fintype.equivFin _ i simpa using hi end Subspace end Combinatorics
Combinatorics\Hindman.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import Mathlib.Topology.StoneCech import Mathlib.Topology.Algebra.Semigroup import Mathlib.Data.Stream.Init /-! # Hindman's theorem on finite sums We prove Hindman's theorem on finite sums, using idempotent ultrafilters. Given an infinite sequence `a₀, a₁, a₂, …` of positive integers, the set `FS(a₀, …)` is the set of positive integers that can be expressed as a finite sum of `aᵢ`'s, without repetition. Hindman's theorem asserts that whenever the positive integers are finitely colored, there exists a sequence `a₀, a₁, a₂, …` such that `FS(a₀, …)` is monochromatic. There is also a stronger version, saying that whenever a set of the form `FS(a₀, …)` is finitely colored, there exists a sequence `b₀, b₁, b₂, …` such that `FS(b₀, …)` is monochromatic and contained in `FS(a₀, …)`. We prove both these versions for a general semigroup `M` instead of `ℕ+` since it is no harder, although this special case implies the general case. The idea of the proof is to extend the addition `(+) : M → M → M` to addition `(+) : βM → βM → βM` on the space `βM` of ultrafilters on `M`. One can prove that if `U` is an _idempotent_ ultrafilter, i.e. `U + U = U`, then any `U`-large subset of `M` contains some set `FS(a₀, …)` (see `exists_FS_of_large`). And with the help of a general topological argument one can show that any set of the form `FS(a₀, …)` is `U`-large according to some idempotent ultrafilter `U` (see `exists_idempotent_ultrafilter_le_FS`). This is enough to prove the theorem since in any finite partition of a `U`-large set, one of the parts is `U`-large. ## Main results - `FS_partition_regular`: the strong form of Hindman's theorem - `exists_FS_of_finite_cover`: the weak form of Hindman's theorem ## Tags Ramsey theory, ultrafilter -/ open Filter /-- Multiplication of ultrafilters given by `∀ᶠ m in U*V, p m ↔ ∀ᶠ m in U, ∀ᶠ m' in V, p (m*m')`. -/ @[to_additive "Addition of ultrafilters given by `∀ᶠ m in U+V, p m ↔ ∀ᶠ m in U, ∀ᶠ m' in V, p (m+m')`."] def Ultrafilter.mul {M} [Mul M] : Mul (Ultrafilter M) where mul U V := (· * ·) <$> U <*> V attribute [local instance] Ultrafilter.mul Ultrafilter.add /- We could have taken this as the definition of `U * V`, but then we would have to prove that it defines an ultrafilter. -/ @[to_additive] theorem Ultrafilter.eventually_mul {M} [Mul M] (U V : Ultrafilter M) (p : M → Prop) : (∀ᶠ m in ↑(U * V), p m) ↔ ∀ᶠ m in U, ∀ᶠ m' in V, p (m * m') := Iff.rfl /-- Semigroup structure on `Ultrafilter M` induced by a semigroup structure on `M`. -/ @[to_additive "Additive semigroup structure on `Ultrafilter M` induced by an additive semigroup structure on `M`."] def Ultrafilter.semigroup {M} [Semigroup M] : Semigroup (Ultrafilter M) := { Ultrafilter.mul with mul_assoc := fun U V W => Ultrafilter.coe_inj.mp <| -- porting note (#11083): `simp` was slow to typecheck, replaced by `simp_rw` Filter.ext' fun p => by simp_rw [Ultrafilter.eventually_mul, mul_assoc] } attribute [local instance] Ultrafilter.semigroup Ultrafilter.addSemigroup -- We don't prove `continuous_mul_right`, because in general it is false! @[to_additive] theorem Ultrafilter.continuous_mul_left {M} [Semigroup M] (V : Ultrafilter M) : Continuous (· * V) := ultrafilterBasis_is_basis.continuous_iff.2 <| Set.forall_mem_range.mpr fun s ↦ ultrafilter_isOpen_basic { m : M | ∀ᶠ m' in V, m * m' ∈ s } namespace Hindman -- Porting note: mathport wants these names to be `fS`, `fP`, etc, but this does violence to -- mathematical naming conventions, as does `fs`, `fp`, so we just followed `mathlib` 3 here /-- `FS a` is the set of finite sums in `a`, i.e. `m ∈ FS a` if `m` is the sum of a nonempty subsequence of `a`. We give a direct inductive definition instead of talking about subsequences. -/ inductive FS {M} [AddSemigroup M] : Stream' M → Set M | head (a : Stream' M) : FS a a.head | tail (a : Stream' M) (m : M) (h : FS a.tail m) : FS a m | cons (a : Stream' M) (m : M) (h : FS a.tail m) : FS a (a.head + m) /-- `FP a` is the set of finite products in `a`, i.e. `m ∈ FP a` if `m` is the product of a nonempty subsequence of `a`. We give a direct inductive definition instead of talking about subsequences. -/ @[to_additive FS] inductive FP {M} [Semigroup M] : Stream' M → Set M | head (a : Stream' M) : FP a a.head | tail (a : Stream' M) (m : M) (h : FP a.tail m) : FP a m | cons (a : Stream' M) (m : M) (h : FP a.tail m) : FP a (a.head * m) /-- If `m` and `m'` are finite products in `M`, then so is `m * m'`, provided that `m'` is obtained from a subsequence of `M` starting sufficiently late. -/ @[to_additive "If `m` and `m'` are finite sums in `M`, then so is `m + m'`, provided that `m'` is obtained from a subsequence of `M` starting sufficiently late."] theorem FP.mul {M} [Semigroup M] {a : Stream' M} {m : M} (hm : m ∈ FP a) : ∃ n, ∀ m' ∈ FP (a.drop n), m * m' ∈ FP a := by induction' hm with a a m hm ih a m hm ih · exact ⟨1, fun m hm => FP.cons a m hm⟩ · cases' ih with n hn use n + 1 intro m' hm' exact FP.tail _ _ (hn _ hm') · cases' ih with n hn use n + 1 intro m' hm' rw [mul_assoc] exact FP.cons _ _ (hn _ hm') @[to_additive exists_idempotent_ultrafilter_le_FS] theorem exists_idempotent_ultrafilter_le_FP {M} [Semigroup M] (a : Stream' M) : ∃ U : Ultrafilter M, U * U = U ∧ ∀ᶠ m in U, m ∈ FP a := by let S : Set (Ultrafilter M) := ⋂ n, { U | ∀ᶠ m in U, m ∈ FP (a.drop n) } have h := exists_idempotent_in_compact_subsemigroup ?_ S ?_ ?_ ?_ · rcases h with ⟨U, hU, U_idem⟩ refine ⟨U, U_idem, ?_⟩ convert Set.mem_iInter.mp hU 0 · exact Ultrafilter.continuous_mul_left · apply IsCompact.nonempty_iInter_of_sequence_nonempty_isCompact_isClosed · intro n U hU filter_upwards [hU] rw [add_comm, ← Stream'.drop_drop, ← Stream'.tail_eq_drop] exact FP.tail _ · intro n exact ⟨pure _, mem_pure.mpr <| FP.head _⟩ · exact (ultrafilter_isClosed_basic _).isCompact · intro n apply ultrafilter_isClosed_basic · exact IsClosed.isCompact (isClosed_iInter fun i => ultrafilter_isClosed_basic _) · intro U hU V hV rw [Set.mem_iInter] at * intro n rw [Set.mem_setOf_eq, Ultrafilter.eventually_mul] filter_upwards [hU n] with m hm obtain ⟨n', hn⟩ := FP.mul hm filter_upwards [hV (n' + n)] with m' hm' apply hn simpa only [Stream'.drop_drop] using hm' @[to_additive exists_FS_of_large] theorem exists_FP_of_large {M} [Semigroup M] (U : Ultrafilter M) (U_idem : U * U = U) (s₀ : Set M) (sU : s₀ ∈ U) : ∃ a, FP a ⊆ s₀ := by /- Informally: given a `U`-large set `s₀`, the set `s₀ ∩ { m | ∀ᶠ m' in U, m * m' ∈ s₀ }` is also `U`-large (since `U` is idempotent). Thus in particular there is an `a₀` in this intersection. Now let `s₁` be the intersection `s₀ ∩ { m | a₀ * m ∈ s₀ }`. By choice of `a₀`, this is again `U`-large, so we can repeat the argument starting from `s₁`, obtaining `a₁`, `s₂`, etc. This gives the desired infinite sequence. -/ have exists_elem : ∀ {s : Set M} (_hs : s ∈ U), (s ∩ { m | ∀ᶠ m' in U, m * m' ∈ s }).Nonempty := fun {s} hs => Ultrafilter.nonempty_of_mem (inter_mem hs <| by rwa [← U_idem] at hs) let elem : { s // s ∈ U } → M := fun p => (exists_elem p.property).some let succ : {s // s ∈ U} → {s // s ∈ U} := fun (p : {s // s ∈ U}) => ⟨p.val ∩ {m : M | elem p * m ∈ p.val}, inter_mem p.property (show (exists_elem p.property).some ∈ {m : M | ∀ᶠ (m' : M) in ↑U, m * m' ∈ p.val} from p.val.inter_subset_right (exists_elem p.property).some_mem)⟩ use Stream'.corec elem succ (Subtype.mk s₀ sU) suffices ∀ (a : Stream' M), ∀ m ∈ FP a, ∀ p, a = Stream'.corec elem succ p → m ∈ p.val by intro m hm exact this _ m hm ⟨s₀, sU⟩ rfl clear sU s₀ intro a m h induction' h with b b n h ih b n h ih · rintro p rfl rw [Stream'.corec_eq, Stream'.head_cons] exact Set.inter_subset_left (Set.Nonempty.some_mem _) · rintro p rfl refine Set.inter_subset_left (ih (succ p) ?_) rw [Stream'.corec_eq, Stream'.tail_cons] · rintro p rfl have := Set.inter_subset_right (ih (succ p) ?_) · simpa only using this rw [Stream'.corec_eq, Stream'.tail_cons] /-- The strong form of **Hindman's theorem**: in any finite cover of an FP-set, one the parts contains an FP-set. -/ @[to_additive FS_partition_regular "The strong form of **Hindman's theorem**: in any finite cover of an FS-set, one the parts contains an FS-set."] theorem FP_partition_regular {M} [Semigroup M] (a : Stream' M) (s : Set (Set M)) (sfin : s.Finite) (scov : FP a ⊆ ⋃₀ s) : ∃ c ∈ s, ∃ b : Stream' M, FP b ⊆ c := let ⟨U, idem, aU⟩ := exists_idempotent_ultrafilter_le_FP a let ⟨c, cs, hc⟩ := (Ultrafilter.finite_sUnion_mem_iff sfin).mp (mem_of_superset aU scov) ⟨c, cs, exists_FP_of_large U idem c hc⟩ /-- The weak form of **Hindman's theorem**: in any finite cover of a nonempty semigroup, one of the parts contains an FP-set. -/ @[to_additive exists_FS_of_finite_cover "The weak form of **Hindman's theorem**: in any finite cover of a nonempty additive semigroup, one of the parts contains an FS-set."] theorem exists_FP_of_finite_cover {M} [Semigroup M] [Nonempty M] (s : Set (Set M)) (sfin : s.Finite) (scov : ⊤ ⊆ ⋃₀ s) : ∃ c ∈ s, ∃ a : Stream' M, FP a ⊆ c := let ⟨U, hU⟩ := exists_idempotent_of_compact_t2_of_continuous_mul_left (@Ultrafilter.continuous_mul_left M _) let ⟨c, c_s, hc⟩ := (Ultrafilter.finite_sUnion_mem_iff sfin).mp (mem_of_superset univ_mem scov) ⟨c, c_s, exists_FP_of_large U hU c hc⟩ @[to_additive FS_iter_tail_sub_FS] theorem FP_drop_subset_FP {M} [Semigroup M] (a : Stream' M) (n : ℕ) : FP (a.drop n) ⊆ FP a := by induction' n with n ih · rfl rw [Nat.add_comm, ← Stream'.drop_drop] exact _root_.trans (FP.tail _) ih @[to_additive] theorem FP.singleton {M} [Semigroup M] (a : Stream' M) (i : ℕ) : a.get i ∈ FP a := by induction' i with i ih generalizing a · apply FP.head · apply FP.tail apply ih @[to_additive] theorem FP.mul_two {M} [Semigroup M] (a : Stream' M) (i j : ℕ) (ij : i < j) : a.get i * a.get j ∈ FP a := by refine FP_drop_subset_FP _ i ?_ rw [← Stream'.head_drop] apply FP.cons rcases le_iff_exists_add.mp (Nat.succ_le_of_lt ij) with ⟨d, hd⟩ -- Porting note: need to fix breakage of Set notation change _ ∈ FP _ have := FP.singleton (a.drop i).tail d rw [Stream'.tail_eq_drop, Stream'.get_drop, Stream'.get_drop] at this convert this rw [hd, add_comm, Nat.succ_add, Nat.add_succ] @[to_additive] theorem FP.finset_prod {M} [CommMonoid M] (a : Stream' M) (s : Finset ℕ) (hs : s.Nonempty) : (s.prod fun i => a.get i) ∈ FP a := by refine FP_drop_subset_FP _ (s.min' hs) ?_ induction' s using Finset.strongInduction with s ih rw [← Finset.mul_prod_erase _ _ (s.min'_mem hs), ← Stream'.head_drop] rcases (s.erase (s.min' hs)).eq_empty_or_nonempty with h | h · rw [h, Finset.prod_empty, mul_one] exact FP.head _ · apply FP.cons rw [Stream'.tail_eq_drop, Stream'.drop_drop, add_comm] refine Set.mem_of_subset_of_mem ?_ (ih _ (Finset.erase_ssubset <| s.min'_mem hs) h) have : s.min' hs + 1 ≤ (s.erase (s.min' hs)).min' h := Nat.succ_le_of_lt (Finset.min'_lt_of_mem_erase_min' _ _ <| Finset.min'_mem _ _) cases' le_iff_exists_add.mp this with d hd rw [hd, add_comm, ← Stream'.drop_drop] apply FP_drop_subset_FP end Hindman
Combinatorics\Pigeonhole.lean
/- Copyright (c) 2020 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller, Yury Kudryashov -/ import Mathlib.Algebra.Module.BigOperators import Mathlib.Algebra.Module.Defs import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Data.Nat.ModEq import Mathlib.Data.Set.Finite /-! # Pigeonhole principles Given pigeons (possibly infinitely many) in pigeonholes, the pigeonhole principle states that, if there are more pigeons than pigeonholes, then there is a pigeonhole with two or more pigeons. There are a few variations on this statement, and the conclusion can be made stronger depending on how many pigeons you know you might have. The basic statements of the pigeonhole principle appear in the following locations: * `Data.Finset.Basic` has `Finset.exists_ne_map_eq_of_card_lt_of_maps_to` * `Data.Fintype.Basic` has `Fintype.exists_ne_map_eq_of_card_lt` * `Data.Fintype.Basic` has `Finite.exists_ne_map_eq_of_infinite` * `Data.Fintype.Basic` has `Finite.exists_infinite_fiber` * `Data.Set.Finite` has `Set.infinite.exists_ne_map_eq_of_mapsTo` This module gives access to these pigeonhole principles along with 20 more. The versions vary by: * using a function between `Fintype`s or a function between possibly infinite types restricted to `Finset`s; * counting pigeons by a general weight function (`∑ x ∈ s, w x`) or by heads (`Finset.card s`); * using strict or non-strict inequalities; * establishing upper or lower estimate on the number (or the total weight) of the pigeons in one pigeonhole; * in case when we count pigeons by some weight function `w` and consider a function `f` between `Finset`s `s` and `t`, we can either assume that each pigeon is in one of the pigeonholes (`∀ x ∈ s, f x ∈ t`), or assume that for `y ∉ t`, the total weight of the pigeons in this pigeonhole `∑ x ∈ s.filter (fun x ↦ f x = y), w x` is nonpositive or nonnegative depending on the inequality we are proving. Lemma names follow `mathlib` convention (e.g., `Finset.exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum`); "pigeonhole principle" is mentioned in the docstrings instead of the names. ## See also * `Ordinal.infinite_pigeonhole`: pigeonhole principle for cardinals, formulated using cofinality; * `MeasureTheory.exists_nonempty_inter_of_measure_univ_lt_tsum_measure`, `MeasureTheory.exists_nonempty_inter_of_measure_univ_lt_sum_measure`: pigeonhole principle in a measure space. ## Tags pigeonhole principle -/ universe u v w variable {α : Type u} {β : Type v} {M : Type w} [DecidableEq β] open Nat namespace Finset variable {s : Finset α} {t : Finset β} {f : α → β} {w : α → M} {b : M} {n : ℕ} /-! ### The pigeonhole principles on `Finset`s, pigeons counted by weight In this section we prove the following version of the pigeonhole principle: if the total weight of a finite set of pigeons is greater than `n • b`, and they are sorted into `n` pigeonholes, then for some pigeonhole, the total weight of the pigeons in this pigeonhole is greater than `b`, and a few variations of this theorem. The principle is formalized in the following way, see `Finset.exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum`: if `f : α → β` is a function which maps all elements of `s : Finset α` to `t : Finset β` and `card t • b < ∑ x ∈ s, w x`, where `w : α → M` is a weight function taking values in a `LinearOrderedCancelAddCommMonoid`, then for some `y ∈ t`, the sum of the weights of all `x ∈ s` such that `f x = y` is greater than `b`. There are a few bits we can change in this theorem: * reverse all inequalities, with obvious adjustments to the name; * replace the assumption `∀ a ∈ s, f a ∈ t` with `∀ y ∉ t, (∑ x ∈ s.filter (fun x ↦ f x = y), w x) ≤ 0`, and replace `of_maps_to` with `of_sum_fiber_nonpos` in the name; * use non-strict inequalities assuming `t` is nonempty. We can do all these variations independently, so we have eight versions of the theorem. -/ section variable [LinearOrderedCancelAddCommMonoid M] /-! #### Strict inequality versions -/ /-- The pigeonhole principle for finitely many pigeons counted by weight, strict inequality version: if the total weight of a finite set of pigeons is greater than `n • b`, and they are sorted into `n` pigeonholes, then for some pigeonhole, the total weight of the pigeons in this pigeonhole is greater than `b`. -/ theorem exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum (hf : ∀ a ∈ s, f a ∈ t) (hb : t.card • b < ∑ x ∈ s, w x) : ∃ y ∈ t, b < ∑ x ∈ s.filter fun x => f x = y, w x := exists_lt_of_sum_lt <| by simpa only [sum_fiberwise_of_maps_to hf, sum_const] /-- The pigeonhole principle for finitely many pigeons counted by weight, strict inequality version: if the total weight of a finite set of pigeons is less than `n • b`, and they are sorted into `n` pigeonholes, then for some pigeonhole, the total weight of the pigeons in this pigeonhole is less than `b`. -/ theorem exists_sum_fiber_lt_of_maps_to_of_sum_lt_nsmul (hf : ∀ a ∈ s, f a ∈ t) (hb : ∑ x ∈ s, w x < t.card • b) : ∃ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x < b := exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum (M := Mᵒᵈ) hf hb /-- The pigeonhole principle for finitely many pigeons counted by weight, strict inequality version: if the total weight of a finite set of pigeons is greater than `n • b`, they are sorted into some pigeonholes, and for all but `n` pigeonholes the total weight of the pigeons there is nonpositive, then for at least one of these `n` pigeonholes, the total weight of the pigeons in this pigeonhole is greater than `b`. -/ theorem exists_lt_sum_fiber_of_sum_fiber_nonpos_of_nsmul_lt_sum (ht : ∀ y ∉ t, ∑ x ∈ s.filter fun x => f x = y, w x ≤ 0) (hb : t.card • b < ∑ x ∈ s, w x) : ∃ y ∈ t, b < ∑ x ∈ s.filter fun x => f x = y, w x := exists_lt_of_sum_lt <| calc ∑ _y ∈ t, b < ∑ x ∈ s, w x := by simpa _ ≤ ∑ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x := sum_le_sum_fiberwise_of_sum_fiber_nonpos ht /-- The pigeonhole principle for finitely many pigeons counted by weight, strict inequality version: if the total weight of a finite set of pigeons is less than `n • b`, they are sorted into some pigeonholes, and for all but `n` pigeonholes the total weight of the pigeons there is nonnegative, then for at least one of these `n` pigeonholes, the total weight of the pigeons in this pigeonhole is less than `b`. -/ theorem exists_sum_fiber_lt_of_sum_fiber_nonneg_of_sum_lt_nsmul (ht : ∀ y ∉ t, (0 : M) ≤ ∑ x ∈ s.filter fun x => f x = y, w x) (hb : ∑ x ∈ s, w x < t.card • b) : ∃ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x < b := exists_lt_sum_fiber_of_sum_fiber_nonpos_of_nsmul_lt_sum (M := Mᵒᵈ) ht hb /-! #### Non-strict inequality versions -/ /-- The pigeonhole principle for finitely many pigeons counted by weight, non-strict inequality version: if the total weight of a finite set of pigeons is greater than or equal to `n • b`, and they are sorted into `n > 0` pigeonholes, then for some pigeonhole, the total weight of the pigeons in this pigeonhole is greater than or equal to `b`. -/ theorem exists_le_sum_fiber_of_maps_to_of_nsmul_le_sum (hf : ∀ a ∈ s, f a ∈ t) (ht : t.Nonempty) (hb : t.card • b ≤ ∑ x ∈ s, w x) : ∃ y ∈ t, b ≤ ∑ x ∈ s.filter fun x => f x = y, w x := exists_le_of_sum_le ht <| by simpa only [sum_fiberwise_of_maps_to hf, sum_const] /-- The pigeonhole principle for finitely many pigeons counted by weight, non-strict inequality version: if the total weight of a finite set of pigeons is less than or equal to `n • b`, and they are sorted into `n > 0` pigeonholes, then for some pigeonhole, the total weight of the pigeons in this pigeonhole is less than or equal to `b`. -/ theorem exists_sum_fiber_le_of_maps_to_of_sum_le_nsmul (hf : ∀ a ∈ s, f a ∈ t) (ht : t.Nonempty) (hb : ∑ x ∈ s, w x ≤ t.card • b) : ∃ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x ≤ b := exists_le_sum_fiber_of_maps_to_of_nsmul_le_sum (M := Mᵒᵈ) hf ht hb /-- The pigeonhole principle for finitely many pigeons counted by weight, non-strict inequality version: if the total weight of a finite set of pigeons is greater than or equal to `n • b`, they are sorted into some pigeonholes, and for all but `n > 0` pigeonholes the total weight of the pigeons there is nonpositive, then for at least one of these `n` pigeonholes, the total weight of the pigeons in this pigeonhole is greater than or equal to `b`. -/ theorem exists_le_sum_fiber_of_sum_fiber_nonpos_of_nsmul_le_sum (hf : ∀ y ∉ t, ∑ x ∈ s.filter fun x => f x = y, w x ≤ 0) (ht : t.Nonempty) (hb : t.card • b ≤ ∑ x ∈ s, w x) : ∃ y ∈ t, b ≤ ∑ x ∈ s.filter fun x => f x = y, w x := exists_le_of_sum_le ht <| calc ∑ _y ∈ t, b ≤ ∑ x ∈ s, w x := by simpa _ ≤ ∑ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x := sum_le_sum_fiberwise_of_sum_fiber_nonpos hf /-- The pigeonhole principle for finitely many pigeons counted by weight, non-strict inequality version: if the total weight of a finite set of pigeons is less than or equal to `n • b`, they are sorted into some pigeonholes, and for all but `n > 0` pigeonholes the total weight of the pigeons there is nonnegative, then for at least one of these `n` pigeonholes, the total weight of the pigeons in this pigeonhole is less than or equal to `b`. -/ theorem exists_sum_fiber_le_of_sum_fiber_nonneg_of_sum_le_nsmul (hf : ∀ y ∉ t, (0 : M) ≤ ∑ x ∈ s.filter fun x => f x = y, w x) (ht : t.Nonempty) (hb : ∑ x ∈ s, w x ≤ t.card • b) : ∃ y ∈ t, ∑ x ∈ s.filter fun x => f x = y, w x ≤ b := exists_le_sum_fiber_of_sum_fiber_nonpos_of_nsmul_le_sum (M := Mᵒᵈ) hf ht hb end variable [LinearOrderedCommSemiring M] /-! ### The pigeonhole principles on `Finset`s, pigeons counted by heads In this section we formalize a few versions of the following pigeonhole principle: there is a pigeonhole with at least as many pigeons as the ceiling of the average number of pigeons across all pigeonholes. First, we can use strict or non-strict inequalities. While the versions with non-strict inequalities are weaker than those with strict inequalities, sometimes it might be more convenient to apply the weaker version. Second, we can either state that there exists a pigeonhole with at least `n` pigeons, or state that there exists a pigeonhole with at most `n` pigeons. In the latter case we do not need the assumption `∀ a ∈ s, f a ∈ t`. So, we prove four theorems: `Finset.exists_lt_card_fiber_of_maps_to_of_mul_lt_card`, `Finset.exists_le_card_fiber_of_maps_to_of_mul_le_card`, `Finset.exists_card_fiber_lt_of_card_lt_mul`, and `Finset.exists_card_fiber_le_of_card_le_mul`. -/ /-- The pigeonhole principle for finitely many pigeons counted by heads: there is a pigeonhole with at least as many pigeons as the ceiling of the average number of pigeons across all pigeonholes. -/ theorem exists_lt_card_fiber_of_nsmul_lt_card_of_maps_to (hf : ∀ a ∈ s, f a ∈ t) (ht : t.card • b < s.card) : ∃ y ∈ t, b < (s.filter fun x => f x = y).card := by simp_rw [cast_card] at ht ⊢ exact exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum hf ht /-- The pigeonhole principle for finitely many pigeons counted by heads: there is a pigeonhole with at least as many pigeons as the ceiling of the average number of pigeons across all pigeonholes. ("The maximum is at least the mean" specialized to integers.) More formally, given a function between finite sets `s` and `t` and a natural number `n` such that `card t * n < card s`, there exists `y ∈ t` such that its preimage in `s` has more than `n` elements. -/ theorem exists_lt_card_fiber_of_mul_lt_card_of_maps_to (hf : ∀ a ∈ s, f a ∈ t) (hn : t.card * n < s.card) : ∃ y ∈ t, n < (s.filter fun x => f x = y).card := exists_lt_card_fiber_of_nsmul_lt_card_of_maps_to hf hn /-- The pigeonhole principle for finitely many pigeons counted by heads: there is a pigeonhole with at most as many pigeons as the floor of the average number of pigeons across all pigeonholes. -/ theorem exists_card_fiber_lt_of_card_lt_nsmul (ht : ↑s.card < t.card • b) : ∃ y ∈ t, ↑(s.filter fun x => f x = y).card < b := by simp_rw [cast_card] at ht ⊢ exact exists_sum_fiber_lt_of_sum_fiber_nonneg_of_sum_lt_nsmul (fun _ _ => sum_nonneg fun _ _ => zero_le_one) ht /-- The pigeonhole principle for finitely many pigeons counted by heads: there is a pigeonhole with at most as many pigeons as the floor of the average number of pigeons across all pigeonholes. ("The minimum is at most the mean" specialized to integers.) More formally, given a function `f`, a finite sets `s` in its domain, a finite set `t` in its codomain, and a natural number `n` such that `card s < card t * n`, there exists `y ∈ t` such that its preimage in `s` has less than `n` elements. -/ theorem exists_card_fiber_lt_of_card_lt_mul (hn : s.card < t.card * n) : ∃ y ∈ t, (s.filter fun x => f x = y).card < n := exists_card_fiber_lt_of_card_lt_nsmul hn /-- The pigeonhole principle for finitely many pigeons counted by heads: given a function between finite sets `s` and `t` and a number `b` such that `card t • b ≤ card s`, there exists `y ∈ t` such that its preimage in `s` has at least `b` elements. See also `Finset.exists_lt_card_fiber_of_nsmul_lt_card_of_maps_to` for a stronger statement. -/ theorem exists_le_card_fiber_of_nsmul_le_card_of_maps_to (hf : ∀ a ∈ s, f a ∈ t) (ht : t.Nonempty) (hb : t.card • b ≤ s.card) : ∃ y ∈ t, b ≤ (s.filter fun x => f x = y).card := by simp_rw [cast_card] at hb ⊢ exact exists_le_sum_fiber_of_maps_to_of_nsmul_le_sum hf ht hb /-- The pigeonhole principle for finitely many pigeons counted by heads: given a function between finite sets `s` and `t` and a natural number `b` such that `card t * n ≤ card s`, there exists `y ∈ t` such that its preimage in `s` has at least `n` elements. See also `Finset.exists_lt_card_fiber_of_mul_lt_card_of_maps_to` for a stronger statement. -/ theorem exists_le_card_fiber_of_mul_le_card_of_maps_to (hf : ∀ a ∈ s, f a ∈ t) (ht : t.Nonempty) (hn : t.card * n ≤ s.card) : ∃ y ∈ t, n ≤ (s.filter fun x => f x = y).card := exists_le_card_fiber_of_nsmul_le_card_of_maps_to hf ht hn /-- The pigeonhole principle for finitely many pigeons counted by heads: given a function `f`, a finite sets `s` and `t`, and a number `b` such that `card s ≤ card t • b`, there exists `y ∈ t` such that its preimage in `s` has no more than `b` elements. See also `Finset.exists_card_fiber_lt_of_card_lt_nsmul` for a stronger statement. -/ theorem exists_card_fiber_le_of_card_le_nsmul (ht : t.Nonempty) (hb : ↑s.card ≤ t.card • b) : ∃ y ∈ t, ↑(s.filter fun x => f x = y).card ≤ b := by simp_rw [cast_card] at hb ⊢ refine exists_sum_fiber_le_of_sum_fiber_nonneg_of_sum_le_nsmul (fun _ _ => sum_nonneg fun _ _ => zero_le_one) ht hb /-- The pigeonhole principle for finitely many pigeons counted by heads: given a function `f`, a finite sets `s` in its domain, a finite set `t` in its codomain, and a natural number `n` such that `card s ≤ card t * n`, there exists `y ∈ t` such that its preimage in `s` has no more than `n` elements. See also `Finset.exists_card_fiber_lt_of_card_lt_mul` for a stronger statement. -/ theorem exists_card_fiber_le_of_card_le_mul (ht : t.Nonempty) (hn : s.card ≤ t.card * n) : ∃ y ∈ t, (s.filter fun x => f x = y).card ≤ n := exists_card_fiber_le_of_card_le_nsmul ht hn end Finset namespace Fintype open Finset variable [Fintype α] [Fintype β] (f : α → β) {w : α → M} {b : M} {n : ℕ} section variable [LinearOrderedCancelAddCommMonoid M] /-! ### The pigeonhole principles on `Fintypes`s, pigeons counted by weight In this section we specialize theorems from the previous section to the special case of functions between `Fintype`s and `s = univ`, `t = univ`. In this case the assumption `∀ x ∈ s, f x ∈ t` always holds, so we have four theorems instead of eight. -/ /-- The pigeonhole principle for finitely many pigeons of different weights, strict inequality version: there is a pigeonhole with the total weight of pigeons in it greater than `b` provided that the total number of pigeonholes times `b` is less than the total weight of all pigeons. -/ theorem exists_lt_sum_fiber_of_nsmul_lt_sum (hb : card β • b < ∑ x, w x) : ∃ y, b < ∑ x ∈ univ.filter fun x => f x = y, w x := let ⟨y, _, hy⟩ := exists_lt_sum_fiber_of_maps_to_of_nsmul_lt_sum (fun _ _ => mem_univ _) hb ⟨y, hy⟩ /-- The pigeonhole principle for finitely many pigeons of different weights, non-strict inequality version: there is a pigeonhole with the total weight of pigeons in it greater than or equal to `b` provided that the total number of pigeonholes times `b` is less than or equal to the total weight of all pigeons. -/ theorem exists_le_sum_fiber_of_nsmul_le_sum [Nonempty β] (hb : card β • b ≤ ∑ x, w x) : ∃ y, b ≤ ∑ x ∈ univ.filter fun x => f x = y, w x := let ⟨y, _, hy⟩ := exists_le_sum_fiber_of_maps_to_of_nsmul_le_sum (fun _ _ => mem_univ _) univ_nonempty hb ⟨y, hy⟩ /-- The pigeonhole principle for finitely many pigeons of different weights, strict inequality version: there is a pigeonhole with the total weight of pigeons in it less than `b` provided that the total number of pigeonholes times `b` is greater than the total weight of all pigeons. -/ theorem exists_sum_fiber_lt_of_sum_lt_nsmul (hb : ∑ x, w x < card β • b) : ∃ y, ∑ x ∈ univ.filter fun x => f x = y, w x < b := exists_lt_sum_fiber_of_nsmul_lt_sum (M := Mᵒᵈ) _ hb /-- The pigeonhole principle for finitely many pigeons of different weights, non-strict inequality version: there is a pigeonhole with the total weight of pigeons in it less than or equal to `b` provided that the total number of pigeonholes times `b` is greater than or equal to the total weight of all pigeons. -/ theorem exists_sum_fiber_le_of_sum_le_nsmul [Nonempty β] (hb : ∑ x, w x ≤ card β • b) : ∃ y, ∑ x ∈ univ.filter fun x => f x = y, w x ≤ b := exists_le_sum_fiber_of_nsmul_le_sum (M := Mᵒᵈ) _ hb end variable [LinearOrderedCommSemiring M] /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. There is a pigeonhole with at least as many pigeons as the ceiling of the average number of pigeons across all pigeonholes. -/ theorem exists_lt_card_fiber_of_nsmul_lt_card (hb : card β • b < card α) : ∃ y : β, b < (univ.filter fun x => f x = y).card := let ⟨y, _, h⟩ := exists_lt_card_fiber_of_nsmul_lt_card_of_maps_to (fun _ _ => mem_univ _) hb ⟨y, h⟩ /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. There is a pigeonhole with at least as many pigeons as the ceiling of the average number of pigeons across all pigeonholes. ("The maximum is at least the mean" specialized to integers.) More formally, given a function `f` between finite types `α` and `β` and a number `n` such that `card β * n < card α`, there exists an element `y : β` such that its preimage has more than `n` elements. -/ theorem exists_lt_card_fiber_of_mul_lt_card (hn : card β * n < card α) : ∃ y : β, n < (univ.filter fun x => f x = y).card := exists_lt_card_fiber_of_nsmul_lt_card _ hn /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. There is a pigeonhole with at most as many pigeons as the floor of the average number of pigeons across all pigeonholes. -/ theorem exists_card_fiber_lt_of_card_lt_nsmul (hb : ↑(card α) < card β • b) : ∃ y : β, ↑(univ.filter fun x => f x = y).card < b := let ⟨y, _, h⟩ := Finset.exists_card_fiber_lt_of_card_lt_nsmul (f := f) hb ⟨y, h⟩ /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. There is a pigeonhole with at most as many pigeons as the floor of the average number of pigeons across all pigeonholes. ("The minimum is at most the mean" specialized to integers.) More formally, given a function `f` between finite types `α` and `β` and a number `n` such that `card α < card β * n`, there exists an element `y : β` such that its preimage has less than `n` elements. -/ theorem exists_card_fiber_lt_of_card_lt_mul (hn : card α < card β * n) : ∃ y : β, (univ.filter fun x => f x = y).card < n := exists_card_fiber_lt_of_card_lt_nsmul _ hn /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. Given a function `f` between finite types `α` and `β` and a number `b` such that `card β • b ≤ card α`, there exists an element `y : β` such that its preimage has at least `b` elements. See also `Fintype.exists_lt_card_fiber_of_nsmul_lt_card` for a stronger statement. -/ theorem exists_le_card_fiber_of_nsmul_le_card [Nonempty β] (hb : card β • b ≤ card α) : ∃ y : β, b ≤ (univ.filter fun x => f x = y).card := let ⟨y, _, h⟩ := exists_le_card_fiber_of_nsmul_le_card_of_maps_to (fun _ _ => mem_univ _) univ_nonempty hb ⟨y, h⟩ /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. Given a function `f` between finite types `α` and `β` and a number `n` such that `card β * n ≤ card α`, there exists an element `y : β` such that its preimage has at least `n` elements. See also `Fintype.exists_lt_card_fiber_of_mul_lt_card` for a stronger statement. -/ theorem exists_le_card_fiber_of_mul_le_card [Nonempty β] (hn : card β * n ≤ card α) : ∃ y : β, n ≤ (univ.filter fun x => f x = y).card := exists_le_card_fiber_of_nsmul_le_card _ hn /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. Given a function `f` between finite types `α` and `β` and a number `b` such that `card α ≤ card β • b`, there exists an element `y : β` such that its preimage has at most `b` elements. See also `Fintype.exists_card_fiber_lt_of_card_lt_nsmul` for a stronger statement. -/ theorem exists_card_fiber_le_of_card_le_nsmul [Nonempty β] (hb : ↑(card α) ≤ card β • b) : ∃ y : β, ↑(univ.filter fun x => f x = y).card ≤ b := let ⟨y, _, h⟩ := Finset.exists_card_fiber_le_of_card_le_nsmul univ_nonempty hb ⟨y, h⟩ /-- The strong pigeonhole principle for finitely many pigeons and pigeonholes. Given a function `f` between finite types `α` and `β` and a number `n` such that `card α ≤ card β * n`, there exists an element `y : β` such that its preimage has at most `n` elements. See also `Fintype.exists_card_fiber_lt_of_card_lt_mul` for a stronger statement. -/ theorem exists_card_fiber_le_of_card_le_mul [Nonempty β] (hn : card α ≤ card β * n) : ∃ y : β, (univ.filter fun x => f x = y).card ≤ n := exists_card_fiber_le_of_card_le_nsmul _ hn end Fintype namespace Nat open Set /-- If `s` is an infinite set of natural numbers and `k > 0`, then `s` contains two elements `m < n` that are equal mod `k`. -/ theorem exists_lt_modEq_of_infinite {s : Set ℕ} (hs : s.Infinite) {k : ℕ} (hk : 0 < k) : ∃ m ∈ s, ∃ n ∈ s, m < n ∧ m ≡ n [MOD k] := (hs.exists_lt_map_eq_of_mapsTo fun n _ => show n % k ∈ Iio k from Nat.mod_lt n hk) <| finite_lt_nat k end Nat
Combinatorics\Schnirelmann.lean
/- Copyright (c) 2023 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta, Doga Can Sertbas -/ import Mathlib.Algebra.Order.Ring.Abs import Mathlib.Data.Nat.ModEq import Mathlib.Data.Nat.Prime.Defs import Mathlib.Data.Real.Archimedean import Mathlib.Order.Interval.Finset.Nat /-! # Schnirelmann density We define the Schnirelmann density of a set `A` of natural numbers as $inf_{n > 0} |A ∩ {1,...,n}| / n$. As this density is very sensitive to changes in small values, we must exclude `0` from the infimum, and from the intersection. ## Main statements * Simple bounds on the Schnirelmann density, that it is between 0 and 1 are given in `schnirelmannDensity_nonneg` and `schnirelmannDensity_le_one`. * `schnirelmannDensity_le_of_not_mem`: If `k ∉ A`, the density can be easily upper-bounded by `1 - k⁻¹` ## Implementation notes Despite the definition being noncomputable, we include a decidable instance argument, since this makes the definition easier to use in explicit cases. Further, we use `Finset.Ioc` rather than a set intersection since the set is finite by construction, which reduces the proof obligations later that would arise with `Nat.card`. ## TODO * Give other calculations of the density, for example powers and their sumsets. * Define other densities like the lower and upper asymptotic density, and the natural density, and show how these relate to the Schnirelmann density. * Show that if the sum of two densities is at least one, the sumset covers the positive naturals. * Prove Schnirelmann's theorem and Mann's theorem on the subadditivity of this density. ## References * [Ruzsa, Imre, *Sumsets and structure*][ruzsa2009] -/ open Finset /-- The Schnirelmann density is defined as the infimum of |A ∩ {1, ..., n}| / n as n ranges over the positive naturals. -/ noncomputable def schnirelmannDensity (A : Set ℕ) [DecidablePred (· ∈ A)] : ℝ := ⨅ n : {n : ℕ // 0 < n}, ((Ioc (0 : ℕ) n).filter (· ∈ A)).card / n section variable {A : Set ℕ} [DecidablePred (· ∈ A)] lemma schnirelmannDensity_nonneg : 0 ≤ schnirelmannDensity A := Real.iInf_nonneg (fun _ => by positivity) lemma schnirelmannDensity_le_div {n : ℕ} (hn : n ≠ 0) : schnirelmannDensity A ≤ ((Ioc 0 n).filter (· ∈ A)).card / n := ciInf_le ⟨0, fun _ ⟨_, hx⟩ => hx ▸ by positivity⟩ (⟨n, hn.bot_lt⟩ : {n : ℕ // 0 < n}) /-- For any natural `n`, the Schnirelmann density multiplied by `n` is bounded by `|A ∩ {1, ..., n}|`. Note this property fails for the natural density. -/ lemma schnirelmannDensity_mul_le_card_filter {n : ℕ} : schnirelmannDensity A * n ≤ ((Ioc 0 n).filter (· ∈ A)).card := by rcases eq_or_ne n 0 with rfl | hn · simp exact (le_div_iff (by positivity)).1 (schnirelmannDensity_le_div hn) /-- To show the Schnirelmann density is upper bounded by `x`, it suffices to show `|A ∩ {1, ..., n}| / n ≤ x`, for any chosen positive value of `n`. We provide `n` explicitly here to make this lemma more easily usable in `apply` or `refine`. This lemma is analogous to `ciInf_le_of_le`. -/ lemma schnirelmannDensity_le_of_le {x : ℝ} (n : ℕ) (hn : n ≠ 0) (hx : ((Ioc 0 n).filter (· ∈ A)).card / n ≤ x) : schnirelmannDensity A ≤ x := (schnirelmannDensity_le_div hn).trans hx lemma schnirelmannDensity_le_one : schnirelmannDensity A ≤ 1 := schnirelmannDensity_le_of_le 1 one_ne_zero <| by rw [Nat.cast_one, div_one, Nat.cast_le_one]; exact card_filter_le _ _ /-- If `k` is omitted from the set, its Schnirelmann density is upper bounded by `1 - k⁻¹`. -/ lemma schnirelmannDensity_le_of_not_mem {k : ℕ} (hk : k ∉ A) : schnirelmannDensity A ≤ 1 - (k⁻¹ : ℝ) := by rcases k.eq_zero_or_pos with rfl | hk' · simpa using schnirelmannDensity_le_one apply schnirelmannDensity_le_of_le k hk'.ne' rw [← one_div, one_sub_div (Nat.cast_pos.2 hk').ne'] gcongr rw [← Nat.cast_pred hk', Nat.cast_le] suffices (Ioc 0 k).filter (· ∈ A) ⊆ Ioo 0 k from (card_le_card this).trans_eq (by simp) rw [← Ioo_insert_right hk', filter_insert, if_neg hk] exact filter_subset _ _ /-- The Schnirelmann density of a set not containing `1` is `0`. -/ lemma schnirelmannDensity_eq_zero_of_one_not_mem (h : 1 ∉ A) : schnirelmannDensity A = 0 := ((schnirelmannDensity_le_of_not_mem h).trans (by simp)).antisymm schnirelmannDensity_nonneg /-- The Schnirelmann density is increasing with the set. -/ lemma schnirelmannDensity_le_of_subset {B : Set ℕ} [DecidablePred (· ∈ B)] (h : A ⊆ B) : schnirelmannDensity A ≤ schnirelmannDensity B := ciInf_mono ⟨0, fun _ ⟨_, hx⟩ ↦ hx ▸ by positivity⟩ fun _ ↦ by gcongr; exact h /-- The Schnirelmann density of `A` is `1` if and only if `A` contains all the positive naturals. -/ lemma schnirelmannDensity_eq_one_iff : schnirelmannDensity A = 1 ↔ {0}ᶜ ⊆ A := by rw [le_antisymm_iff, and_iff_right schnirelmannDensity_le_one] constructor · rw [← not_imp_not, not_le] simp only [Set.not_subset, forall_exists_index, true_and, and_imp, Set.mem_singleton_iff] intro x hx hx' apply (schnirelmannDensity_le_of_not_mem hx').trans_lt simpa only [one_div, sub_lt_self_iff, inv_pos, Nat.cast_pos, pos_iff_ne_zero] using hx · intro h refine le_ciInf fun ⟨n, hn⟩ => ?_ rw [one_le_div (Nat.cast_pos.2 hn), Nat.cast_le, filter_true_of_mem, Nat.card_Ioc, Nat.sub_zero] rintro x hx exact h (mem_Ioc.1 hx).1.ne' /-- The Schnirelmann density of `A` containing `0` is `1` if and only if `A` is the naturals. -/ lemma schnirelmannDensity_eq_one_iff_of_zero_mem (hA : 0 ∈ A) : schnirelmannDensity A = 1 ↔ A = Set.univ := by rw [schnirelmannDensity_eq_one_iff] constructor · refine fun h => Set.eq_univ_of_forall fun x => ?_ rcases eq_or_ne x 0 with rfl | hx · exact hA · exact h hx · rintro rfl exact Set.subset_univ {0}ᶜ lemma le_schnirelmannDensity_iff {x : ℝ} : x ≤ schnirelmannDensity A ↔ ∀ n : ℕ, 0 < n → x ≤ ((Ioc 0 n).filter (· ∈ A)).card / n := (le_ciInf_iff ⟨0, fun _ ⟨_, hx⟩ => hx ▸ by positivity⟩).trans Subtype.forall lemma schnirelmannDensity_lt_iff {x : ℝ} : schnirelmannDensity A < x ↔ ∃ n : ℕ, 0 < n ∧ ((Ioc 0 n).filter (· ∈ A)).card / n < x := by rw [← not_le, le_schnirelmannDensity_iff]; simp lemma schnirelmannDensity_le_iff_forall {x : ℝ} : schnirelmannDensity A ≤ x ↔ ∀ ε : ℝ, 0 < ε → ∃ n : ℕ, 0 < n ∧ ((Ioc 0 n).filter (· ∈ A)).card / n < x + ε := by rw [le_iff_forall_pos_lt_add] simp only [schnirelmannDensity_lt_iff] lemma schnirelmannDensity_congr' {B : Set ℕ} [DecidablePred (· ∈ B)] (h : ∀ n > 0, n ∈ A ↔ n ∈ B) : schnirelmannDensity A = schnirelmannDensity B := by rw [schnirelmannDensity, schnirelmannDensity]; congr; ext ⟨n, hn⟩; congr 3; ext x; aesop /-- The Schnirelmann density is unaffected by adding `0`. -/ @[simp] lemma schnirelmannDensity_insert_zero [DecidablePred (· ∈ insert 0 A)] : schnirelmannDensity (insert 0 A) = schnirelmannDensity A := schnirelmannDensity_congr' (by aesop) /-- The Schnirelmann density is unaffected by removing `0`. -/ lemma schnirelmannDensity_diff_singleton_zero [DecidablePred (· ∈ A \ {0})] : schnirelmannDensity (A \ {0}) = schnirelmannDensity A := schnirelmannDensity_congr' (by aesop) lemma schnirelmannDensity_congr {B : Set ℕ} [DecidablePred (· ∈ B)] (h : A = B) : schnirelmannDensity A = schnirelmannDensity B := schnirelmannDensity_congr' (by aesop) /-- If the Schnirelmann density is `0`, there is a positive natural for which `|A ∩ {1, ..., n}| / n < ε`, for any positive `ε`. Note this cannot be improved to `∃ᶠ n : ℕ in atTop`, as can be seen by `A = {1}ᶜ`. -/ lemma exists_of_schnirelmannDensity_eq_zero {ε : ℝ} (hε : 0 < ε) (hA : schnirelmannDensity A = 0) : ∃ n, 0 < n ∧ ((Ioc 0 n).filter (· ∈ A)).card / n < ε := by by_contra! h rw [← le_schnirelmannDensity_iff] at h linarith end @[simp] lemma schnirelmannDensity_empty : schnirelmannDensity ∅ = 0 := schnirelmannDensity_eq_zero_of_one_not_mem (by simp) /-- The Schnirelmann density of any finset is `0`. -/ lemma schnirelmannDensity_finset (A : Finset ℕ) : schnirelmannDensity A = 0 := by refine le_antisymm ?_ schnirelmannDensity_nonneg simp only [schnirelmannDensity_le_iff_forall, zero_add] intro ε hε wlog hε₁ : ε ≤ 1 generalizing ε · obtain ⟨n, hn, hn'⟩ := this 1 zero_lt_one le_rfl exact ⟨n, hn, hn'.trans_le (le_of_not_le hε₁)⟩ let n : ℕ := ⌊A.card / ε⌋₊ + 1 have hn : 0 < n := Nat.succ_pos _ use n, hn rw [div_lt_iff (Nat.cast_pos.2 hn), ← div_lt_iff' hε, Nat.cast_add_one] exact (Nat.lt_floor_add_one _).trans_le' <| by gcongr; simp [subset_iff] /-- The Schnirelmann density of any finite set is `0`. -/ lemma schnirelmannDensity_finite {A : Set ℕ} [DecidablePred (· ∈ A)] (hA : A.Finite) : schnirelmannDensity A = 0 := by simpa using schnirelmannDensity_finset hA.toFinset @[simp] lemma schnirelmannDensity_univ : schnirelmannDensity Set.univ = 1 := (schnirelmannDensity_eq_one_iff_of_zero_mem (by simp)).2 (by simp) lemma schnirelmannDensity_setOf_even : schnirelmannDensity (setOf Even) = 0 := schnirelmannDensity_eq_zero_of_one_not_mem <| by simp lemma schnirelmannDensity_setOf_prime : schnirelmannDensity (setOf Nat.Prime) = 0 := schnirelmannDensity_eq_zero_of_one_not_mem <| by simp [Nat.not_prime_one] /-- The Schnirelmann density of the set of naturals which are `1 mod m` is `m⁻¹`, for any `m ≠ 1`. Note that if `m = 1`, this set is empty. -/ lemma schnirelmannDensity_setOf_mod_eq_one {m : ℕ} (hm : m ≠ 1) : schnirelmannDensity {n | n % m = 1} = (m⁻¹ : ℝ) := by rcases m.eq_zero_or_pos with rfl | hm' · simp only [Nat.cast_zero, inv_zero] refine schnirelmannDensity_finite ?_ simp apply le_antisymm (schnirelmannDensity_le_of_le m hm'.ne' _) _ · rw [← one_div, ← @Nat.cast_one ℝ] gcongr simp only [Set.mem_setOf_eq, card_le_one_iff_subset_singleton, subset_iff, mem_filter, mem_Ioc, mem_singleton, and_imp] use 1 intro x _ hxm h rcases eq_or_lt_of_le hxm with rfl | hxm' · simp at h rwa [Nat.mod_eq_of_lt hxm'] at h rw [le_schnirelmannDensity_iff] intro n hn simp only [Set.mem_setOf_eq] have : (Icc 0 ((n - 1) / m)).image (· * m + 1) ⊆ (Ioc 0 n).filter (· % m = 1) := by simp only [subset_iff, mem_image, forall_exists_index, mem_filter, mem_Ioc, mem_Icc, and_imp] rintro _ y _ hy' rfl have hm : 2 ≤ m := hm.lt_of_le' hm' simp only [Nat.mul_add_mod', Nat.mod_eq_of_lt hm, add_pos_iff, or_true, and_true, true_and, ← Nat.le_sub_iff_add_le hn, zero_lt_one] exact Nat.mul_le_of_le_div _ _ _ hy' rw [le_div_iff (Nat.cast_pos.2 hn), mul_comm, ← div_eq_mul_inv] apply (Nat.cast_le.2 (card_le_card this)).trans' rw [card_image_of_injective, Nat.card_Icc, Nat.sub_zero, div_le_iff (Nat.cast_pos.2 hm'), ← Nat.cast_mul, Nat.cast_le, add_one_mul (α := ℕ)] · have := @Nat.lt_div_mul_add n.pred m hm' rwa [← Nat.succ_le, Nat.succ_pred hn.ne'] at this intro a b simp [hm'.ne'] lemma schnirelmannDensity_setOf_modeq_one {m : ℕ} : schnirelmannDensity {n | n ≡ 1 [MOD m]} = (m⁻¹ : ℝ) := by rcases eq_or_ne m 1 with rfl | hm · simp [Nat.modEq_one] rw [← schnirelmannDensity_setOf_mod_eq_one hm] apply schnirelmannDensity_congr ext n simp only [Set.mem_setOf_eq, Nat.ModEq, Nat.one_mod_of_ne_one hm] lemma schnirelmannDensity_setOf_Odd : schnirelmannDensity (setOf Odd) = 2⁻¹ := by have h : setOf Odd = {n | n % 2 = 1} := Set.ext fun _ => Nat.odd_iff simp only [h] rw [schnirelmannDensity_setOf_mod_eq_one (by norm_num1), Nat.cast_two]
Combinatorics\Additive\Dissociation.lean
/- Copyright (c) 2023 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.BigOperators.Group.Finset import Mathlib.Algebra.Group.Units.Equiv import Mathlib.Data.Fintype.Card import Mathlib.Data.Set.Pointwise.Basic /-! # Dissociation and span This file defines dissociation and span of sets in groups. These are analogs to the usual linear independence and linear span of sets in a vector space but where the scalars are only allowed to be `0` or `±1`. In characteristic 2 or 3, the two pairs of concepts are actually equivalent. ## Main declarations * `MulDissociated`/`AddDissociated`: Predicate for a set to be dissociated. * `Finset.mulSpan`/`Finset.addSpan`: Span of a finset. -/ variable {α β : Type*} [CommGroup α] [CommGroup β] section dissociation variable {s : Set α} {t u : Finset α} {d : ℕ} {a : α} open Set /-- A set is dissociated iff all its finite subsets have different products. This is an analog of linear independence in a vector space, but with the "scalars" restricted to `0` and `±1`. -/ @[to_additive "A set is dissociated iff all its finite subsets have different sums. This is an analog of linear independence in a vector space, but with the \"scalars\" restricted to `0` and `±1`."] def MulDissociated (s : Set α) : Prop := {t : Finset α | ↑t ⊆ s}.InjOn (∏ x in ·, x) @[to_additive] lemma mulDissociated_iff_sum_eq_subsingleton : MulDissociated s ↔ ∀ a, {t : Finset α | ↑t ⊆ s ∧ ∏ x in t, x = a}.Subsingleton := ⟨fun hs _ _t ht _u hu ↦ hs ht.1 hu.1 $ ht.2.trans hu.2.symm, fun hs _t ht _u hu htu ↦ hs _ ⟨ht, htu⟩ ⟨hu, rfl⟩⟩ @[to_additive] lemma MulDissociated.subset {t : Set α} (hst : s ⊆ t) (ht : MulDissociated t) : MulDissociated s := ht.mono fun _ ↦ hst.trans' @[to_additive (attr := simp)] lemma mulDissociated_empty : MulDissociated (∅ : Set α) := by simp [MulDissociated, subset_empty_iff] @[to_additive (attr := simp)] lemma mulDissociated_singleton : MulDissociated ({a} : Set α) ↔ a ≠ 1 := by simp [MulDissociated, setOf_or, (Finset.singleton_ne_empty _).symm, -subset_singleton_iff, Finset.coe_subset_singleton] @[to_additive (attr := simp)] lemma not_mulDissociated : ¬ MulDissociated s ↔ ∃ t : Finset α, ↑t ⊆ s ∧ ∃ u : Finset α, ↑u ⊆ s ∧ t ≠ u ∧ ∏ x in t, x = ∏ x in u, x := by simp [MulDissociated, InjOn]; aesop @[to_additive] lemma not_mulDissociated_iff_exists_disjoint : ¬ MulDissociated s ↔ ∃ t u : Finset α, ↑t ⊆ s ∧ ↑u ⊆ s ∧ Disjoint t u ∧ t ≠ u ∧ ∏ a in t, a = ∏ a in u, a := by classical refine not_mulDissociated.trans ⟨?_, fun ⟨t, u, ht, hu, _, htune, htusum⟩ ↦ ⟨t, ht, u, hu, htune, htusum⟩⟩ rintro ⟨t, ht, u, hu, htu, h⟩ refine ⟨t \ u, u \ t, ?_, ?_, disjoint_sdiff_sdiff, sdiff_ne_sdiff_iff.2 htu, Finset.prod_sdiff_eq_prod_sdiff_iff.2 h⟩ <;> push_cast <;> exact diff_subset.trans ‹_› @[to_additive (attr := simp)] lemma MulEquiv.mulDissociated_preimage (e : β ≃* α) : MulDissociated (e ⁻¹' s) ↔ MulDissociated s := by simp [MulDissociated, InjOn, ← e.finsetCongr.forall_congr_right, ← e.apply_eq_iff_eq, (Finset.map_injective _).eq_iff] @[to_additive (attr := simp)] lemma mulDissociated_inv : MulDissociated s⁻¹ ↔ MulDissociated s := (MulEquiv.inv α).mulDissociated_preimage @[to_additive] protected alias ⟨MulDissociated.of_inv, MulDissociated.inv⟩ := mulDissociated_inv end dissociation namespace Finset variable [DecidableEq α] [Fintype α] {s t u : Finset α} {a : α} {d : ℕ} /-- The span of a finset `s` is the finset of elements of the form `∏ a in s, a ^ ε a` where `ε ∈ {-1, 0, 1} ^ s`. This is an analog of the linear span in a vector space, but with the "scalars" restricted to `0` and `±1`. -/ @[to_additive "The span of a finset `s` is the finset of elements of the form `∑ a in s, ε a • a` where `ε ∈ {-1, 0, 1} ^ s`. This is an analog of the linear span in a vector space, but with the \"scalars\" restricted to `0` and `±1`."] def mulSpan (s : Finset α) : Finset α := (Fintype.piFinset fun _a ↦ ({-1, 0, 1} : Finset ℤ)).image fun ε ↦ ∏ a in s, a ^ ε a @[to_additive (attr := simp)] lemma mem_mulSpan : a ∈ mulSpan s ↔ ∃ ε : α → ℤ, (∀ a, ε a = -1 ∨ ε a = 0 ∨ ε a = 1) ∧ ∏ a in s, a ^ ε a = a := by simp [mulSpan] @[to_additive (attr := simp)] lemma subset_mulSpan : s ⊆ mulSpan s := fun a ha ↦ mem_mulSpan.2 ⟨Pi.single a 1, fun b ↦ by obtain rfl | hab := eq_or_ne a b <;> simp [*], by simp [Pi.single, Function.update, pow_ite, ha]⟩ @[to_additive] lemma prod_div_prod_mem_mulSpan (ht : t ⊆ s) (hu : u ⊆ s) : (∏ a in t, a) / ∏ a in u, a ∈ mulSpan s := mem_mulSpan.2 ⟨Set.indicator t 1 - Set.indicator u 1, fun a ↦ by by_cases a ∈ t <;> by_cases a ∈ u <;> simp [*], by simp [prod_div_distrib, zpow_sub, ← div_eq_mul_inv, Set.indicator, pow_ite, inter_eq_right.2, *]⟩ /-- If every dissociated subset of `s` has size at most `d`, then `s` is actually generated by a subset of size at most `d`. This is a dissociation analog of the fact that a set whose linearly independent subsets all have size at most `d` is of dimension at most `d` itself. -/ @[to_additive "If every dissociated subset of `s` has size at most `d`, then `s` is actually generated by a subset of size at most `d`. This is a dissociation analog of the fact that a set whose linearly independent subspaces all have size at most `d` is of dimension at most `d` itself."] lemma exists_subset_mulSpan_card_le_of_forall_mulDissociated (hs : ∀ s', s' ⊆ s → MulDissociated (s' : Set α) → s'.card ≤ d) : ∃ s', s' ⊆ s ∧ s'.card ≤ d ∧ s ⊆ mulSpan s' := by classical obtain ⟨s', hs', hs'max⟩ := exists_maximal (s.powerset.filter fun s' : Finset α ↦ MulDissociated (s' : Set α)) ⟨∅, mem_filter.2 ⟨empty_mem_powerset _, by simp⟩⟩ simp only [mem_filter, mem_powerset, lt_eq_subset, and_imp] at hs' hs'max refine ⟨s', hs'.1, hs _ hs'.1 hs'.2, fun a ha ↦ ?_⟩ by_cases ha' : a ∈ s' · exact subset_mulSpan ha' obtain ⟨t, u, ht, hu, htu⟩ := not_mulDissociated_iff_exists_disjoint.1 fun h ↦ hs'max _ (insert_subset_iff.2 ⟨ha, hs'.1⟩) h $ ssubset_insert ha' by_cases hat : a ∈ t · have : a = (∏ b in u, b) / ∏ b in t.erase a, b := by rw [prod_erase_eq_div hat, htu.2.2, div_div_self'] rw [this] exact prod_div_prod_mem_mulSpan ((subset_insert_iff_of_not_mem $ disjoint_left.1 htu.1 hat).1 hu) (subset_insert_iff.1 ht) rw [coe_subset, subset_insert_iff_of_not_mem hat] at ht by_cases hau : a ∈ u · have : a = (∏ b in t, b) / ∏ b in u.erase a, b := by rw [prod_erase_eq_div hau, htu.2.2, div_div_self'] rw [this] exact prod_div_prod_mem_mulSpan ht (subset_insert_iff.1 hu) · rw [coe_subset, subset_insert_iff_of_not_mem hau] at hu cases not_mulDissociated_iff_exists_disjoint.2 ⟨t, u, ht, hu, htu⟩ hs'.2 end Finset
Combinatorics\Additive\Energy.lean
/- Copyright (c) 2022 Yaël Dillies, Ella Yu. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Ella Yu -/ import Mathlib.Algebra.Order.BigOperators.Ring.Finset import Mathlib.Data.Finset.Prod import Mathlib.Data.Fintype.Prod import Mathlib.Data.Finset.Pointwise /-! # Additive energy This file defines the additive energy of two finsets of a group. This is a central quantity in additive combinatorics. ## Main declarations * `Finset.addEnergy`: The additive energy of two finsets in an additive group. * `Finset.mulEnergy`: The multiplicative energy of two finsets in a group. ## Notation The following notations are defined in the `Combinatorics.Additive` scope: * `E[s, t]` for `Finset.addEnergy s t`. * `Eₘ[s, t]` for `Finset.mulEnergy s t`. * `E[s]` for `E[s, s]`. * `Eₘ[s]` for `Eₘ[s, s]`. ## TODO It's possibly interesting to have `(s ×ˢ s) ×ˢ t ×ˢ t).filter (fun x : (α × α) × α × α ↦ x.1.1 * x.2.1 = x.1.2 * x.2.2)` (whose `card` is `mulEnergy s t`) as a standalone definition. -/ open scoped Pointwise variable {α : Type*} [DecidableEq α] namespace Finset section Mul variable [Mul α] {s s₁ s₂ t t₁ t₂ : Finset α} /-- The multiplicative energy `Eₘ[s, t]` of two finsets `s` and `t` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × t × t` such that `a₁ * b₁ = a₂ * b₂`. The notation `Eₘ[s, t]` is available in scope `Combinatorics.Additive`. -/ @[to_additive "The additive energy `E[s, t]` of two finsets `s` and `t` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × t × t` such that `a₁ + b₁ = a₂ + b₂`. The notation `E[s, t]` is available in scope `Combinatorics.Additive`."] def mulEnergy (s t : Finset α) : ℕ := (((s ×ˢ s) ×ˢ t ×ˢ t).filter fun x : (α × α) × α × α => x.1.1 * x.2.1 = x.1.2 * x.2.2).card /-- The multiplicative energy of two finsets `s` and `t` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × t × t` such that `a₁ * b₁ = a₂ * b₂`. -/ scoped[Combinatorics.Additive] notation3:max "Eₘ[" s ", " t "]" => Finset.mulEnergy s t /-- The additive energy of two finsets `s` and `t` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × t × t` such that `a₁ + b₁ = a₂ + b₂`.-/ scoped[Combinatorics.Additive] notation3:max "E[" s ", " t "]" => Finset.addEnergy s t /-- The multiplicative energy of a finset `s` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × s × s` such that `a₁ * b₁ = a₂ * b₂`. -/ scoped[Combinatorics.Additive] notation3:max "Eₘ[" s "]" => Finset.mulEnergy s s /-- The additive energy of a finset `s` in a group is the number of quadruples `(a₁, a₂, b₁, b₂) ∈ s × s × s × s` such that `a₁ + b₁ = a₂ + b₂`. -/ scoped[Combinatorics.Additive] notation3:max "E[" s "]" => Finset.addEnergy s s open scoped Combinatorics.Additive @[to_additive (attr := gcongr)] lemma mulEnergy_mono (hs : s₁ ⊆ s₂) (ht : t₁ ⊆ t₂) : Eₘ[s₁, t₁] ≤ Eₘ[s₂, t₂] := by unfold mulEnergy; gcongr @[to_additive] lemma mulEnergy_mono_left (hs : s₁ ⊆ s₂) : Eₘ[s₁, t] ≤ Eₘ[s₂, t] := mulEnergy_mono hs Subset.rfl @[to_additive] lemma mulEnergy_mono_right (ht : t₁ ⊆ t₂) : Eₘ[s, t₁] ≤ Eₘ[s, t₂] := mulEnergy_mono Subset.rfl ht @[to_additive] lemma le_mulEnergy : s.card * t.card ≤ Eₘ[s, t] := by rw [← card_product] refine card_le_card_of_injOn (@fun x => ((x.1, x.1), x.2, x.2)) (by -- Porting note: changed this from a `simp` proof without `only` because of a timeout simp only [← and_imp, mem_product, Prod.forall, mem_filter, and_self, and_true, imp_self, implies_true]) fun a _ b _ => ?_ simp only [Prod.mk.inj_iff, and_self_iff, and_imp] exact Prod.ext @[to_additive] lemma mulEnergy_pos (hs : s.Nonempty) (ht : t.Nonempty) : 0 < Eₘ[s, t] := (mul_pos hs.card_pos ht.card_pos).trans_le le_mulEnergy variable (s t) @[to_additive (attr := simp)] lemma mulEnergy_empty_left : Eₘ[∅, t] = 0 := by simp [mulEnergy] @[to_additive (attr := simp)] lemma mulEnergy_empty_right : Eₘ[s, ∅] = 0 := by simp [mulEnergy] variable {s t} @[to_additive (attr := simp)] lemma mulEnergy_pos_iff : 0 < Eₘ[s, t] ↔ s.Nonempty ∧ t.Nonempty where mp h := of_not_not fun H => by simp_rw [not_and_or, not_nonempty_iff_eq_empty] at H obtain rfl | rfl := H <;> simp [Nat.not_lt_zero] at h mpr h := mulEnergy_pos h.1 h.2 @[to_additive (attr := simp)] lemma mulEnergy_eq_zero_iff : Eₘ[s, t] = 0 ↔ s = ∅ ∨ t = ∅ := by simp [← (Nat.zero_le _).not_gt_iff_eq, not_and_or, imp_iff_or_not, or_comm] @[to_additive] lemma mulEnergy_eq_card_filter (s t : Finset α) : Eₘ[s, t] = (((s ×ˢ t) ×ˢ s ×ˢ t).filter fun ((a, b), c, d) ↦ a * b = c * d).card := card_equiv (.prodProdProdComm _ _ _ _) (by simp [and_and_and_comm]) @[to_additive] lemma mulEnergy_eq_sum_sq' (s t : Finset α) : Eₘ[s, t] = ∑ a ∈ s * t, ((s ×ˢ t).filter fun (x, y) ↦ x * y = a).card ^ 2 := by simp_rw [mulEnergy_eq_card_filter, sq, ← card_product] rw [← card_disjiUnion] -- The `swap`, `ext` and `simp` calls significantly reduce heartbeats swap · simp only [Set.PairwiseDisjoint, Set.Pairwise, coe_mul, ne_eq, disjoint_left, mem_product, mem_filter, not_and, and_imp, Prod.forall] aesop · congr ext simp only [mem_filter, mem_product, disjiUnion_eq_biUnion, mem_biUnion] aesop (add unsafe mul_mem_mul) @[to_additive] lemma mulEnergy_eq_sum_sq [Fintype α] (s t : Finset α) : Eₘ[s, t] = ∑ a, ((s ×ˢ t).filter fun (x, y) ↦ x * y = a).card ^ 2 := by rw [mulEnergy_eq_sum_sq'] exact Fintype.sum_subset $ by aesop (add simp [filter_eq_empty_iff, mul_mem_mul]) @[to_additive card_sq_le_card_mul_addEnergy] lemma card_sq_le_card_mul_mulEnergy (s t u : Finset α) : ((s ×ˢ t).filter fun (a, b) ↦ a * b ∈ u).card ^ 2 ≤ u.card * Eₘ[s, t] := by calc _ = (∑ c ∈ u, ((s ×ˢ t).filter fun (a, b) ↦ a * b = c).card) ^ 2 := by rw [← sum_card_fiberwise_eq_card_filter] _ ≤ u.card * ∑ c ∈ u, ((s ×ˢ t).filter fun (a, b) ↦ a * b = c).card ^ 2 := by simpa using sum_mul_sq_le_sq_mul_sq (R := ℕ) _ 1 _ _ ≤ u.card * ∑ c ∈ s * t, ((s ×ˢ t).filter fun (a, b) ↦ a * b = c).card ^ 2 := by refine mul_le_mul_left' (sum_le_sum_of_ne_zero ?_) _ aesop (add simp [filter_eq_empty_iff]) (add unsafe mul_mem_mul) _ = u.card * Eₘ[s, t] := by rw [mulEnergy_eq_sum_sq'] @[to_additive le_card_add_mul_addEnergy] lemma le_card_add_mul_mulEnergy (s t : Finset α) : s.card ^ 2 * t.card ^ 2 ≤ (s * t).card * Eₘ[s, t] := calc _ = ((s ×ˢ t).filter fun (a, b) ↦ a * b ∈ s * t).card ^ 2 := by rw [filter_eq_self.2, card_product, mul_pow]; aesop (add unsafe mul_mem_mul) _ ≤ (s * t).card * Eₘ[s, t] := card_sq_le_card_mul_mulEnergy _ _ _ end Mul open scoped Combinatorics.Additive section CommMonoid variable [CommMonoid α] @[to_additive] lemma mulEnergy_comm (s t : Finset α) : Eₘ[s, t] = Eₘ[t, s] := by rw [mulEnergy, ← Finset.card_map (Equiv.prodComm _ _).toEmbedding, map_filter] simp [-Finset.card_map, eq_comm, mulEnergy, mul_comm, map_eq_image, Function.comp] end CommMonoid section CommGroup variable [CommGroup α] [Fintype α] (s t : Finset α) @[to_additive (attr := simp)] lemma mulEnergy_univ_left : Eₘ[univ, t] = Fintype.card α * t.card ^ 2 := by simp only [mulEnergy, univ_product_univ, Fintype.card, sq, ← card_product] let f : α × α × α → (α × α) × α × α := fun x => ((x.1 * x.2.2, x.1 * x.2.1), x.2) have : (↑((univ : Finset α) ×ˢ t ×ˢ t) : Set (α × α × α)).InjOn f := by rintro ⟨a₁, b₁, c₁⟩ _ ⟨a₂, b₂, c₂⟩ h₂ h simp_rw [Prod.ext_iff] at h obtain ⟨h, rfl, rfl⟩ := h rw [mul_right_cancel h.1] rw [← card_image_of_injOn this] congr with a simp only [mem_filter, mem_product, mem_univ, true_and_iff, mem_image, exists_prop, Prod.exists] refine ⟨fun h => ⟨a.1.1 * a.2.2⁻¹, _, _, h.1, by simp [f, mul_right_comm, h.2]⟩, ?_⟩ rintro ⟨b, c, d, hcd, rfl⟩ simpa [mul_right_comm] @[to_additive (attr := simp)] lemma mulEnergy_univ_right : Eₘ[s, univ] = Fintype.card α * s.card ^ 2 := by rw [mulEnergy_comm, mulEnergy_univ_left] end CommGroup end Finset
Combinatorics\Additive\ErdosGinzburgZiv.lean
/- Copyright (c) 2023 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.BigOperators.Ring import Mathlib.Data.Multiset.Fintype import Mathlib.FieldTheory.ChevalleyWarning import Mathlib.RingTheory.UniqueFactorizationDomain /-! # The Erdős–Ginzburg–Ziv theorem This file proves the Erdős–Ginzburg–Ziv theorem as a corollary of Chevalley-Warning. This theorem states that among any (not necessarily distinct) `2 * n - 1` elements of `ZMod n`, we can find `n` elements of sum zero. ## Main declarations * `Int.erdos_ginzburg_ziv`: The Erdős–Ginzburg–Ziv theorem stated using sequences in `ℤ` * `ZMod.erdos_ginzburg_ziv`: The Erdős–Ginzburg–Ziv theorem stated using sequences in `ZMod n` -/ open Finset MvPolynomial open scoped BigOperators variable {ι : Type*} section prime variable {p : ℕ} [Fact p.Prime] {s : Finset ι} set_option linter.unusedVariables false in /-- The first multivariate polynomial used in the proof of Erdős–Ginzburg–Ziv. -/ private noncomputable def f₁ (s : Finset ι) (a : ι → ZMod p) : MvPolynomial s (ZMod p) := ∑ i, X i ^ (p - 1) /-- The second multivariate polynomial used in the proof of Erdős–Ginzburg–Ziv. -/ private noncomputable def f₂ (s : Finset ι) (a : ι → ZMod p) : MvPolynomial s (ZMod p) := ∑ i : s, a i • X i ^ (p - 1) private lemma totalDegree_f₁_add_totalDegree_f₂ {a : ι → ZMod p} : (f₁ s a).totalDegree + (f₂ s a).totalDegree < 2 * p - 1 := by calc _ ≤ (p - 1) + (p - 1) := by gcongr <;> apply totalDegree_finsetSum_le <;> rintro i _ · exact (totalDegree_X_pow ..).le · exact (totalDegree_smul_le ..).trans (totalDegree_X_pow ..).le _ < 2 * p - 1 := by have := (Fact.out : p.Prime).two_le; omega /-- The prime case of the **Erdős–Ginzburg–Ziv theorem** for `ℤ/pℤ`. Any sequence of `2 * p - 1` elements of `ZMod p` contains a subsequence of `p` elements whose sum is zero. -/ private theorem ZMod.erdos_ginzburg_ziv_prime (a : ι → ZMod p) (hs : s.card = 2 * p - 1) : ∃ t ⊆ s, t.card = p ∧ ∑ i ∈ t, a i = 0 := by haveI : NeZero p := inferInstance classical -- Let `N` be the number of common roots of our polynomials `f₁` and `f₂` (`f s ff` and `f s tt`). set N := Fintype.card {x // eval x (f₁ s a) = 0 ∧ eval x (f₂ s a) = 0} -- Zero is a common root to `f₁` and `f₂`, so `N` is nonzero let zero_sol : {x // eval x (f₁ s a) = 0 ∧ eval x (f₂ s a) = 0} := ⟨0, by simp [f₁, f₂, map_sum, (Fact.out : p.Prime).one_lt, tsub_eq_zero_iff_le]⟩ have hN₀ : 0 < N := @Fintype.card_pos _ _ ⟨zero_sol⟩ have hs' : 2 * p - 1 = Fintype.card s := by simp [hs] -- Chevalley-Warning gives us that `p ∣ n` because the total degrees of `f₁` and `f₂` are at most -- `p - 1`, and we have `2 * p - 1 > 2 * (p - 1)` variables. have hpN : p ∣ N := char_dvd_card_solutions_of_add_lt p (totalDegree_f₁_add_totalDegree_f₂.trans_eq hs') -- Hence, `2 ≤ p ≤ N` and we can make a common root `x ≠ 0`. obtain ⟨x, hx⟩ := Fintype.exists_ne_of_one_lt_card ((Fact.out : p.Prime).one_lt.trans_le $ Nat.le_of_dvd hN₀ hpN) zero_sol -- This common root gives us the required subsequence, namely the `i ∈ s` such that `x i ≠ 0`. refine ⟨(s.attach.filter fun a ↦ x.1 a ≠ 0).map ⟨(↑), Subtype.val_injective⟩, ?_, ?_, ?_⟩ · simp (config := { contextual := true }) [subset_iff] -- From `f₁ x = 0`, we get that `p` divides the number of `a` such that `x a ≠ 0`. · rw [card_map] refine Nat.eq_of_dvd_of_lt_two_mul (Finset.card_pos.2 ?_).ne' ?_ $ (Finset.card_filter_le _ _).trans_lt ?_ -- This number is nonzero because `x ≠ 0`. · rw [← Subtype.coe_ne_coe, Function.ne_iff] at hx exact hx.imp (fun a ha ↦ mem_filter.2 ⟨Finset.mem_attach _ _, ha⟩) · rw [← CharP.cast_eq_zero_iff (ZMod p), ← Finset.sum_boole] simpa only [f₁, map_sum, ZMod.pow_card_sub_one, map_pow, eval_X] using x.2.1 -- And it is at most `2 * p - 1`, so it must be `p`. · rw [Finset.card_attach, hs] exact tsub_lt_self (mul_pos zero_lt_two (Fact.out : p.Prime).pos) zero_lt_one -- From `f₂ x = 0`, we get that `p` divides the sum of the `a ∈ s` such that `x a ≠ 0`. · simpa [f₂, ZMod.pow_card_sub_one, Finset.sum_filter] using x.2.2 /-- The prime case of the **Erdős–Ginzburg–Ziv theorem** for `ℤ`. Any sequence of `2 * p - 1` elements of `ℤ` contains a subsequence of `p` elements whose sum is divisible by `p`. -/ private theorem Int.erdos_ginzburg_ziv_prime (a : ι → ℤ) (hs : s.card = 2 * p - 1) : ∃ t ⊆ s, t.card = p ∧ ↑p ∣ ∑ i ∈ t, a i := by simpa [← Int.cast_sum, ZMod.intCast_zmod_eq_zero_iff_dvd] using ZMod.erdos_ginzburg_ziv_prime (Int.cast ∘ a) hs end prime section composite variable {n : ℕ} {s : Finset ι} /-- The **Erdős–Ginzburg–Ziv theorem** for `ℤ`. Any sequence of at least `2 * n - 1` elements of `ℤ` contains a subsequence of `n` elements whose sum is divisible by `n`. -/ theorem Int.erdos_ginzburg_ziv (a : ι → ℤ) (hs : 2 * n - 1 ≤ s.card) : ∃ t ⊆ s, t.card = n ∧ ↑n ∣ ∑ i ∈ t, a i := by classical -- Do induction on the prime factorisation of `n`. Note that we will apply the induction -- hypothesis with `ι := Finset ι`, so we need to generalise. induction n using Nat.prime_composite_induction generalizing ι with -- When `n := 0`, we can set `t := ∅`. | zero => exact ⟨∅, by simp⟩ -- When `n := 1`, we can take `t` to be any subset of `s` of size `2 * n - 1`. | one => simpa using exists_subset_card_eq hs -- When `n := p` is prime, we use the prime case `Int.erdos_ginzburg_ziv_prime`. | prime p hp => haveI := Fact.mk hp obtain ⟨t, hts, ht⟩ := exists_subset_card_eq hs obtain ⟨u, hut, hu⟩ := Int.erdos_ginzburg_ziv_prime a ht exact ⟨u, hut.trans hts, hu⟩ -- When `n := m * n` is composite, we pick (by induction hypothesis on `n`) `2 * m - 1` sets of -- size `n` and sums divisible by `n`. Then by induction hypothesis (on `m`) we can pick `m` of -- these sets whose sum is divisible by `m * n`. | composite m hm ihm n hn ihn => -- First, show that it is enough to have those `2 * m - 1` sets. suffices ∀ k ≤ 2 * m - 1, ∃ 𝒜 : Finset (Finset ι), 𝒜.card = k ∧ (𝒜 : Set (Finset ι)).Pairwise _root_.Disjoint ∧ ∀ ⦃t⦄, t ∈ 𝒜 → t ⊆ s ∧ t.card = n ∧ ↑n ∣ ∑ i ∈ t, a i by -- Assume `𝒜` is a family of `2 * m - 1` sets, each of size `n` and sum divisible by `n`. obtain ⟨𝒜, h𝒜card, h𝒜disj, h𝒜⟩ := this _ le_rfl -- By induction hypothesis on `m`, find a subfamily `ℬ` of size `m` such that the sum over -- `t ∈ ℬ` of `(∑ i ∈ t, a i) / n` is divisible by `m`. obtain ⟨ℬ, hℬ𝒜, hℬcard, hℬ⟩ := ihm (fun t ↦ (∑ i ∈ t, a i) / n) h𝒜card.ge -- We are done. refine ⟨ℬ.biUnion fun x ↦ x, biUnion_subset.2 fun t ht ↦ (h𝒜 $ hℬ𝒜 ht).1, ?_, ?_⟩ · rw [card_biUnion (h𝒜disj.mono hℬ𝒜), sum_const_nat fun t ht ↦ (h𝒜 $ hℬ𝒜 ht).2.1, hℬcard] rwa [sum_biUnion, natCast_mul, mul_comm, ← Int.dvd_div_iff_mul_dvd, Int.sum_div] · exact fun t ht ↦ (h𝒜 $ hℬ𝒜 ht).2.2 · exact dvd_sum fun t ht ↦ (h𝒜 $ hℬ𝒜 ht).2.2 · exact h𝒜disj.mono hℬ𝒜 -- Now, let's find those `2 * m - 1` sets. rintro k hk -- We induct on the size `k ≤ 2 * m - 1` of the family we are constructing. induction' k with k ih -- For `k = 0`, the empty family trivially works. · exact ⟨∅, by simp⟩ -- At `k + 1`, call `𝒜` the existing family of size `k ≤ 2 * m - 2`. obtain ⟨𝒜, h𝒜card, h𝒜disj, h𝒜⟩ := ih (Nat.le_of_succ_le hk) -- There are at least `2 * (m * n) - 1 - k * n ≥ 2 * m - 1` elements in `s` that have not been -- taken in any element of `𝒜`. have : 2 * n - 1 ≤ (s \ 𝒜.biUnion id).card := by calc _ ≤ (2 * m - k) * n - 1 := by gcongr; omega _ = (2 * (m * n) - 1) - ∑ t ∈ 𝒜, t.card := by rw [tsub_mul, mul_assoc, tsub_right_comm, sum_const_nat fun t ht ↦ (h𝒜 ht).2.1, h𝒜card] _ ≤ s.card - (𝒜.biUnion id).card := by gcongr; exact card_biUnion_le _ ≤ (s \ 𝒜.biUnion id).card := le_card_sdiff .. -- So by the induction hypothesis on `n` we can find a new set `t` of size `n` and sum divisible -- by `n`. obtain ⟨t₀, ht₀, ht₀card, ht₀sum⟩ := ihn a this -- This set is distinct and disjoint from the previous ones, so we are done. have : t₀ ∉ 𝒜 := by rintro h obtain rfl : n = 0 := by simpa [← card_eq_zero, ht₀card] using sdiff_disjoint.mono ht₀ $ subset_biUnion_of_mem id h omega refine ⟨𝒜.cons t₀ this, by rw [card_cons, h𝒜card], ?_, ?_⟩ · simp only [cons_eq_insert, coe_insert, Set.pairwise_insert_of_symmetric symmetric_disjoint, mem_coe, ne_eq] exact ⟨h𝒜disj, fun t ht _ ↦ sdiff_disjoint.mono ht₀ $ subset_biUnion_of_mem id ht⟩ · simp only [cons_eq_insert, mem_insert, forall_eq_or_imp, and_assoc] exact ⟨ht₀.trans sdiff_subset, ht₀card, ht₀sum, h𝒜⟩ /-- The **Erdős–Ginzburg–Ziv theorem** for `ℤ/nℤ`. Any sequence of at least `2 * n - 1` elements of `ZMod n` contains a subsequence of `n` elements whose sum is zero. -/ theorem ZMod.erdos_ginzburg_ziv (a : ι → ZMod n) (hs : 2 * n - 1 ≤ s.card) : ∃ t ⊆ s, t.card = n ∧ ∑ i ∈ t, a i = 0 := by simpa [← ZMod.intCast_zmod_eq_zero_iff_dvd] using Int.erdos_ginzburg_ziv (ZMod.cast ∘ a) hs /-- The **Erdős–Ginzburg–Ziv theorem** for `ℤ` for multiset. Any multiset of at least `2 * n - 1` elements of `ℤ` contains a submultiset of `n` elements whose sum is divisible by `n`. -/ theorem Int.erdos_ginzburg_ziv_multiset (s : Multiset ℤ) (hs : 2 * n - 1 ≤ Multiset.card s) : ∃ t ≤ s, Multiset.card t = n ∧ ↑n ∣ t.sum := by obtain ⟨t, hts, ht⟩ := Int.erdos_ginzburg_ziv (s := s.toEnumFinset) Prod.fst (by simpa using hs) exact ⟨t.1.map Prod.fst, Multiset.map_fst_le_of_subset_toEnumFinset hts, by simpa using ht⟩ /-- The **Erdős–Ginzburg–Ziv theorem** for `ℤ/nℤ` for multiset. Any multiset of at least `2 * n - 1` elements of `ℤ` contains a submultiset of `n` elements whose sum is divisible by `n`. -/ theorem ZMod.erdos_ginzburg_ziv_multiset (s : Multiset (ZMod n)) (hs : 2 * n - 1 ≤ Multiset.card s) : ∃ t ≤ s, Multiset.card t = n ∧ t.sum = 0 := by obtain ⟨t, hts, ht⟩ := ZMod.erdos_ginzburg_ziv (s := s.toEnumFinset) Prod.fst (by simpa using hs) exact ⟨t.1.map Prod.fst, Multiset.map_fst_le_of_subset_toEnumFinset hts, by simpa using ht⟩ end composite
Combinatorics\Additive\ETransform.lean
/- Copyright (c) 2023 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Data.Finset.Pointwise /-! # e-transforms e-transforms are a family of transformations of pairs of finite sets that aim to reduce the size of the sumset while keeping some invariant the same. This file defines a few of them, to be used as internals of other proofs. ## Main declarations * `Finset.mulDysonETransform`: The Dyson e-transform. Replaces `(s, t)` by `(s ∪ e • t, t ∩ e⁻¹ • s)`. The additive version preserves `|s ∩ [1, m]| + |t ∩ [1, m - e]|`. * `Finset.mulETransformLeft`/`Finset.mulETransformRight`: Replace `(s, t)` by `(s ∩ s • e, t ∪ e⁻¹ • t)` and `(s ∪ s • e, t ∩ e⁻¹ • t)`. Preserve (together) the sum of the cardinalities (see `Finset.MulETransform.card`). In particular, one of the two transforms increases the sum of the cardinalities and the other one decreases it. See `le_or_lt_of_add_le_add` and around. ## TODO Prove the invariance property of the Dyson e-transform. -/ open MulOpposite open Pointwise variable {α : Type*} [DecidableEq α] namespace Finset /-! ### Dyson e-transform -/ section CommGroup variable [CommGroup α] (e : α) (x : Finset α × Finset α) /-- The **Dyson e-transform**. Turns `(s, t)` into `(s ∪ e • t, t ∩ e⁻¹ • s)`. This reduces the product of the two sets. -/ @[to_additive (attr := simps) "The **Dyson e-transform**. Turns `(s, t)` into `(s ∪ e +ᵥ t, t ∩ -e +ᵥ s)`. This reduces the sum of the two sets."] def mulDysonETransform : Finset α × Finset α := (x.1 ∪ e • x.2, x.2 ∩ e⁻¹ • x.1) @[to_additive] theorem mulDysonETransform.subset : (mulDysonETransform e x).1 * (mulDysonETransform e x).2 ⊆ x.1 * x.2 := by refine union_mul_inter_subset_union.trans (union_subset Subset.rfl ?_) rw [mul_smul_comm, smul_mul_assoc, inv_smul_smul, mul_comm] @[to_additive] theorem mulDysonETransform.card : (mulDysonETransform e x).1.card + (mulDysonETransform e x).2.card = x.1.card + x.2.card := by dsimp rw [← card_smul_finset e (_ ∩ _), smul_finset_inter, smul_inv_smul, inter_comm, card_union_add_card_inter, card_smul_finset] @[to_additive (attr := simp)] theorem mulDysonETransform_idem : mulDysonETransform e (mulDysonETransform e x) = mulDysonETransform e x := by ext : 1 <;> dsimp · rw [smul_finset_inter, smul_inv_smul, inter_comm, union_eq_left] exact inter_subset_union · rw [smul_finset_union, inv_smul_smul, union_comm, inter_eq_left] exact inter_subset_union variable {e x} @[to_additive] theorem mulDysonETransform.smul_finset_snd_subset_fst : e • (mulDysonETransform e x).2 ⊆ (mulDysonETransform e x).1 := by dsimp rw [smul_finset_inter, smul_inv_smul, inter_comm] exact inter_subset_union end CommGroup /-! ### Two unnamed e-transforms The following two transforms both reduce the product/sum of the two sets. Further, one of them must decrease the sum of the size of the sets (and then the other increases it). This pair of transforms doesn't seem to be named in the literature. It is used by Sanders in his bound on Roth numbers, and by DeVos in his proof of Cauchy-Davenport. -/ section Group variable [Group α] (e : α) (x : Finset α × Finset α) /-- An **e-transform**. Turns `(s, t)` into `(s ∩ s • e, t ∪ e⁻¹ • t)`. This reduces the product of the two sets. -/ @[to_additive (attr := simps) "An **e-transform**. Turns `(s, t)` into `(s ∩ s +ᵥ e, t ∪ -e +ᵥ t)`. This reduces the sum of the two sets."] def mulETransformLeft : Finset α × Finset α := (x.1 ∩ op e • x.1, x.2 ∪ e⁻¹ • x.2) /-- An **e-transform**. Turns `(s, t)` into `(s ∪ s • e, t ∩ e⁻¹ • t)`. This reduces the product of the two sets. -/ @[to_additive (attr := simps) "An **e-transform**. Turns `(s, t)` into `(s ∪ s +ᵥ e, t ∩ -e +ᵥ t)`. This reduces the sum of the two sets."] def mulETransformRight : Finset α × Finset α := (x.1 ∪ op e • x.1, x.2 ∩ e⁻¹ • x.2) @[to_additive (attr := simp)] theorem mulETransformLeft_one : mulETransformLeft 1 x = x := by simp [mulETransformLeft] @[to_additive (attr := simp)] theorem mulETransformRight_one : mulETransformRight 1 x = x := by simp [mulETransformRight] @[to_additive] theorem mulETransformLeft.fst_mul_snd_subset : (mulETransformLeft e x).1 * (mulETransformLeft e x).2 ⊆ x.1 * x.2 := by refine inter_mul_union_subset_union.trans (union_subset Subset.rfl ?_) rw [op_smul_finset_mul_eq_mul_smul_finset, smul_inv_smul] @[to_additive] theorem mulETransformRight.fst_mul_snd_subset : (mulETransformRight e x).1 * (mulETransformRight e x).2 ⊆ x.1 * x.2 := by refine union_mul_inter_subset_union.trans (union_subset Subset.rfl ?_) rw [op_smul_finset_mul_eq_mul_smul_finset, smul_inv_smul] @[to_additive] theorem mulETransformLeft.card : (mulETransformLeft e x).1.card + (mulETransformRight e x).1.card = 2 * x.1.card := (card_inter_add_card_union _ _).trans <| by rw [card_smul_finset, two_mul] @[to_additive] theorem mulETransformRight.card : (mulETransformLeft e x).2.card + (mulETransformRight e x).2.card = 2 * x.2.card := (card_union_add_card_inter _ _).trans <| by rw [card_smul_finset, two_mul] /-- This statement is meant to be combined with `le_or_lt_of_add_le_add` and similar lemmas. -/ @[to_additive AddETransform.card "This statement is meant to be combined with `le_or_lt_of_add_le_add` and similar lemmas."] protected theorem MulETransform.card : (mulETransformLeft e x).1.card + (mulETransformLeft e x).2.card + ((mulETransformRight e x).1.card + (mulETransformRight e x).2.card) = x.1.card + x.2.card + (x.1.card + x.2.card) := by rw [add_add_add_comm, mulETransformLeft.card, mulETransformRight.card, ← mul_add, two_mul] end Group section CommGroup variable [CommGroup α] (e : α) (x : Finset α × Finset α) @[to_additive (attr := simp)] theorem mulETransformLeft_inv : mulETransformLeft e⁻¹ x = (mulETransformRight e x.swap).swap := by simp [-op_inv, op_smul_eq_smul, mulETransformLeft, mulETransformRight] @[to_additive (attr := simp)] theorem mulETransformRight_inv : mulETransformRight e⁻¹ x = (mulETransformLeft e x.swap).swap := by simp [-op_inv, op_smul_eq_smul, mulETransformLeft, mulETransformRight] end CommGroup end Finset
Combinatorics\Additive\FreimanHom.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.BigOperators.Ring import Mathlib.Algebra.CharP.Defs import Mathlib.Algebra.Order.BigOperators.Group.Multiset import Mathlib.Data.ZMod.Defs import Mathlib.Data.Set.Pointwise.Basic /-! # Freiman homomorphisms In this file, we define Freiman homomorphisms and isomorphism. An `n`-Freiman homomorphism from `A` to `B` is a function `f : α → β` such that `f '' A ⊆ B` and `f x₁ * ... * f xₙ = f y₁ * ... * f yₙ` for all `x₁, ..., xₙ, y₁, ..., yₙ ∈ A` such that `x₁ * ... * xₙ = y₁ * ... * yₙ`. In particular, any `MulHom` is a Freiman homomorphism. An `n`-Freiman isomorphism from `A` to `B` is a function `f : α → β` bijective between `A` and `B` such that `f x₁ * ... * f xₙ = f y₁ * ... * f yₙ ↔ x₁ * ... * xₙ = y₁ * ... * yₙ` for all `x₁, ..., xₙ, y₁, ..., yₙ ∈ A`. In particular, any `MulEquiv` is a Freiman isomorphism. They are of interest in additive combinatorics. ## Main declaration * `IsMulFreimanHom`: Predicate for a function to be a multiplicative Freiman homomorphism. * `IsAddFreimanHom`: Predicate for a function to be an additive Freiman homomorphism. * `IsMulFreimanIso`: Predicate for a function to be a multiplicative Freiman isomorphism. * `IsAddFreimanIso`: Predicate for a function to be an additive Freiman isomorphism. ## Implementation notes In the context of combinatorics, we are interested in Freiman homomorphisms over sets which are not necessarily closed under addition/multiplication. This means we must parametrize them with a set in an `AddMonoid`/`Monoid` instead of the `AddMonoid`/`Monoid` itself. ## References [Yufei Zhao, *18.225: Graph Theory and Additive Combinatorics*](https://yufeizhao.com/gtac/) ## TODO * `MonoidHomClass.isMulFreimanHom` could be relaxed to `MulHom.toFreimanHom` by proving `(s.map f).prod = (t.map f).prod` directly by induction instead of going through `f s.prod`. * Affine maps are Freiman homs. -/ open Multiset Set open scoped Pointwise variable {F α β γ : Type*} section CommMonoid variable [CommMonoid α] [CommMonoid β] [CommMonoid γ] {A A₁ A₂ : Set α} {B B₁ B₂ : Set β} {C : Set γ} {f f₁ f₂ : α → β} {g : β → γ} {m n : ℕ} /-- An additive `n`-Freiman homomorphism from a set `A` to a set `B` is a map which preserves sums of `n` elements. -/ structure IsAddFreimanHom [AddCommMonoid α] [AddCommMonoid β] (n : ℕ) (A : Set α) (B : Set β) (f : α → β) : Prop where mapsTo : MapsTo f A B /-- An additive `n`-Freiman homomorphism preserves sums of `n` elements. -/ map_sum_eq_map_sum ⦃s t : Multiset α⦄ (hsA : ∀ ⦃x⦄, x ∈ s → x ∈ A) (htA : ∀ ⦃x⦄, x ∈ t → x ∈ A) (hs : Multiset.card s = n) (ht : Multiset.card t = n) (h : s.sum = t.sum) : (s.map f).sum = (t.map f).sum /-- An `n`-Freiman homomorphism from a set `A` to a set `B` is a map which preserves products of `n` elements. -/ @[to_additive] structure IsMulFreimanHom (n : ℕ) (A : Set α) (B : Set β) (f : α → β) : Prop where mapsTo : MapsTo f A B /-- An `n`-Freiman homomorphism preserves products of `n` elements. -/ map_prod_eq_map_prod ⦃s t : Multiset α⦄ (hsA : ∀ ⦃x⦄, x ∈ s → x ∈ A) (htA : ∀ ⦃x⦄, x ∈ t → x ∈ A) (hs : Multiset.card s = n) (ht : Multiset.card t = n) (h : s.prod = t.prod) : (s.map f).prod = (t.map f).prod /-- An additive `n`-Freiman homomorphism from a set `A` to a set `B` is a bijective map which preserves sums of `n` elements. -/ structure IsAddFreimanIso [AddCommMonoid α] [AddCommMonoid β] (n : ℕ) (A : Set α) (B : Set β) (f : α → β) : Prop where bijOn : BijOn f A B /-- An additive `n`-Freiman homomorphism preserves sums of `n` elements. -/ map_sum_eq_map_sum ⦃s t : Multiset α⦄ (hsA : ∀ ⦃x⦄, x ∈ s → x ∈ A) (htA : ∀ ⦃x⦄, x ∈ t → x ∈ A) (hs : Multiset.card s = n) (ht : Multiset.card t = n) : (s.map f).sum = (t.map f).sum ↔ s.sum = t.sum /-- An `n`-Freiman homomorphism from a set `A` to a set `B` is a map which preserves products of `n` elements. -/ @[to_additive] structure IsMulFreimanIso (n : ℕ) (A : Set α) (B : Set β) (f : α → β) : Prop where bijOn : BijOn f A B /-- An `n`-Freiman homomorphism preserves products of `n` elements. -/ map_prod_eq_map_prod ⦃s t : Multiset α⦄ (hsA : ∀ ⦃x⦄, x ∈ s → x ∈ A) (htA : ∀ ⦃x⦄, x ∈ t → x ∈ A) (hs : Multiset.card s = n) (ht : Multiset.card t = n) : (s.map f).prod = (t.map f).prod ↔ s.prod = t.prod @[to_additive] lemma IsMulFreimanIso.isMulFreimanHom (hf : IsMulFreimanIso n A B f) : IsMulFreimanHom n A B f where mapsTo := hf.bijOn.mapsTo map_prod_eq_map_prod _s _t hsA htA hs ht := (hf.map_prod_eq_map_prod hsA htA hs ht).2 @[to_additive] lemma IsMulFreimanHom.mul_eq_mul (hf : IsMulFreimanHom 2 A B f) {a b c d : α} (ha : a ∈ A) (hb : b ∈ A) (hc : c ∈ A) (hd : d ∈ A) (h : a * b = c * d) : f a * f b = f c * f d := by simp_rw [← prod_pair] at h ⊢ refine hf.map_prod_eq_map_prod ?_ ?_ (card_pair _ _) (card_pair _ _) h <;> simp [ha, hb, hc, hd] @[to_additive] lemma IsMulFreimanIso.mul_eq_mul (hf : IsMulFreimanIso 2 A B f) {a b c d : α} (ha : a ∈ A) (hb : b ∈ A) (hc : c ∈ A) (hd : d ∈ A) : f a * f b = f c * f d ↔ a * b = c * d := by simp_rw [← prod_pair] refine hf.map_prod_eq_map_prod ?_ ?_ (card_pair _ _) (card_pair _ _) <;> simp [ha, hb, hc, hd] /-- Characterisation of `2`-Freiman homs. -/ @[to_additive "Characterisation of `2`-Freiman homs."] lemma isMulFreimanHom_two : IsMulFreimanHom 2 A B f ↔ MapsTo f A B ∧ ∀ a ∈ A, ∀ b ∈ A, ∀ c ∈ A, ∀ d ∈ A, a * b = c * d → f a * f b = f c * f d where mp hf := ⟨hf.mapsTo, fun a ha b hb c hc d hd ↦ hf.mul_eq_mul ha hb hc hd⟩ mpr hf := ⟨hf.1, by aesop (add simp [Multiset.card_eq_two])⟩ @[to_additive] lemma isMulFreimanHom_id (hA : A₁ ⊆ A₂) : IsMulFreimanHom n A₁ A₂ id where mapsTo := hA map_prod_eq_map_prod s t _ _ _ _ h := by simpa using h @[to_additive] lemma isMulFreimanIso_id : IsMulFreimanIso n A A id where bijOn := bijOn_id _ map_prod_eq_map_prod s t _ _ _ _ := by simp @[to_additive] lemma IsMulFreimanHom.comp (hg : IsMulFreimanHom n B C g) (hf : IsMulFreimanHom n A B f) : IsMulFreimanHom n A C (g ∘ f) where mapsTo := hg.mapsTo.comp hf.mapsTo map_prod_eq_map_prod s t hsA htA hs ht h := by rw [← map_map, ← map_map] refine hg.map_prod_eq_map_prod ?_ ?_ (by rwa [card_map]) (by rwa [card_map]) (hf.map_prod_eq_map_prod hsA htA hs ht h) · simpa using fun a h ↦ hf.mapsTo (hsA h) · simpa using fun a h ↦ hf.mapsTo (htA h) @[to_additive] lemma IsMulFreimanIso.comp (hg : IsMulFreimanIso n B C g) (hf : IsMulFreimanIso n A B f) : IsMulFreimanIso n A C (g ∘ f) where bijOn := hg.bijOn.comp hf.bijOn map_prod_eq_map_prod s t hsA htA hs ht := by rw [← map_map, ← map_map] rw [hg.map_prod_eq_map_prod _ _ (by rwa [card_map]) (by rwa [card_map]), hf.map_prod_eq_map_prod hsA htA hs ht] · simpa using fun a h ↦ hf.bijOn.mapsTo (hsA h) · simpa using fun a h ↦ hf.bijOn.mapsTo (htA h) @[to_additive] lemma IsMulFreimanHom.subset (hA : A₁ ⊆ A₂) (hf : IsMulFreimanHom n A₂ B₂ f) (hf' : MapsTo f A₁ B₁) : IsMulFreimanHom n A₁ B₁ f where mapsTo := hf' __ := hf.comp (isMulFreimanHom_id hA) @[to_additive] lemma IsMulFreimanHom.superset (hB : B₁ ⊆ B₂) (hf : IsMulFreimanHom n A B₁ f) : IsMulFreimanHom n A B₂ f := (isMulFreimanHom_id hB).comp hf @[to_additive] lemma IsMulFreimanIso.subset (hA : A₁ ⊆ A₂) (hf : IsMulFreimanIso n A₂ B₂ f) (hf' : BijOn f A₁ B₁) : IsMulFreimanIso n A₁ B₁ f where bijOn := hf' map_prod_eq_map_prod s t hsA htA hs ht := by refine hf.map_prod_eq_map_prod (fun a ha ↦ hA (hsA ha)) (fun a ha ↦ hA (htA ha)) hs ht @[to_additive] lemma isMulFreimanHom_const {b : β} (hb : b ∈ B) : IsMulFreimanHom n A B fun _ ↦ b where mapsTo _ _ := hb map_prod_eq_map_prod s t _ _ hs ht _ := by simp only [map_const', hs, prod_replicate, ht] @[to_additive (attr := simp)] lemma isMulFreimanIso_empty : IsMulFreimanIso n (∅ : Set α) (∅ : Set β) f where bijOn := bijOn_empty _ map_prod_eq_map_prod s t hs ht := by simp [eq_zero_of_forall_not_mem hs, eq_zero_of_forall_not_mem ht] @[to_additive] lemma IsMulFreimanHom.mul (h₁ : IsMulFreimanHom n A B₁ f₁) (h₂ : IsMulFreimanHom n A B₂ f₂) : IsMulFreimanHom n A (B₁ * B₂) (f₁ * f₂) where -- TODO: Extract `Set.MapsTo.mul` from this proof mapsTo a ha := mul_mem_mul (h₁.mapsTo ha) (h₂.mapsTo ha) map_prod_eq_map_prod s t hsA htA hs ht h := by rw [Pi.mul_def, prod_map_mul, prod_map_mul, h₁.map_prod_eq_map_prod hsA htA hs ht h, h₂.map_prod_eq_map_prod hsA htA hs ht h] @[to_additive] lemma MonoidHomClass.isMulFreimanHom [FunLike F α β] [MonoidHomClass F α β] (f : F) (hfAB : MapsTo f A B) : IsMulFreimanHom n A B f where mapsTo := hfAB map_prod_eq_map_prod s t _ _ _ _ h := by rw [← map_multiset_prod, h, map_multiset_prod] @[to_additive] lemma MulEquivClass.isMulFreimanIso [EquivLike F α β] [MulEquivClass F α β] (f : F) (hfAB : BijOn f A B) : IsMulFreimanIso n A B f where bijOn := hfAB map_prod_eq_map_prod s t _ _ _ _ := by rw [← map_multiset_prod, ← map_multiset_prod, EquivLike.apply_eq_iff_eq] end CommMonoid section CancelCommMonoid variable [CommMonoid α] [CancelCommMonoid β] {A : Set α} {B : Set β} {f : α → β} {m n : ℕ} @[to_additive] lemma IsMulFreimanHom.mono (hmn : m ≤ n) (hf : IsMulFreimanHom n A B f) : IsMulFreimanHom m A B f where mapsTo := hf.mapsTo map_prod_eq_map_prod s t hsA htA hs ht h := by obtain rfl | hm := m.eq_zero_or_pos · rw [card_eq_zero] at hs ht rw [hs, ht] simp only [← hs, card_pos_iff_exists_mem] at hm obtain ⟨a, ha⟩ := hm suffices ((s + replicate (n - m) a).map f).prod = ((t + replicate (n - m) a).map f).prod by simp_rw [Multiset.map_add, prod_add] at this exact mul_right_cancel this replace ha := hsA ha refine hf.map_prod_eq_map_prod (fun a ha ↦ ?_) (fun a ha ↦ ?_) ?_ ?_ ?_ · rw [Multiset.mem_add] at ha obtain ha | ha := ha · exact hsA ha · rwa [eq_of_mem_replicate ha] · rw [Multiset.mem_add] at ha obtain ha | ha := ha · exact htA ha · rwa [eq_of_mem_replicate ha] · rw [_root_.map_add, card_replicate, hs, Nat.add_sub_cancel' hmn] · rw [_root_.map_add, card_replicate, ht, Nat.add_sub_cancel' hmn] · rw [prod_add, prod_add, h] end CancelCommMonoid section CancelCommMonoid variable [CancelCommMonoid α] [CancelCommMonoid β] {A : Set α} {B : Set β} {f : α → β} {m n : ℕ} @[to_additive] lemma IsMulFreimanIso.mono {hmn : m ≤ n} (hf : IsMulFreimanIso n A B f) : IsMulFreimanIso m A B f where bijOn := hf.bijOn map_prod_eq_map_prod s t hsA htA hs ht := by obtain rfl | hm := m.eq_zero_or_pos · rw [card_eq_zero] at hs ht simp [hs, ht] simp only [← hs, card_pos_iff_exists_mem] at hm obtain ⟨a, ha⟩ := hm suffices ((s + replicate (n - m) a).map f).prod = ((t + replicate (n - m) a).map f).prod ↔ (s + replicate (n - m) a).prod = (t + replicate (n - m) a).prod by simpa only [Multiset.map_add, prod_add, mul_right_cancel_iff] using this replace ha := hsA ha refine hf.map_prod_eq_map_prod (fun a ha ↦ ?_) (fun a ha ↦ ?_) ?_ ?_ · rw [Multiset.mem_add] at ha obtain ha | ha := ha · exact hsA ha · rwa [eq_of_mem_replicate ha] · rw [Multiset.mem_add] at ha obtain ha | ha := ha · exact htA ha · rwa [eq_of_mem_replicate ha] · rw [_root_.map_add, card_replicate, hs, Nat.add_sub_cancel' hmn] · rw [_root_.map_add, card_replicate, ht, Nat.add_sub_cancel' hmn] end CancelCommMonoid section DivisionCommMonoid variable [CommMonoid α] [DivisionCommMonoid β] {A : Set α} {B : Set β} {f : α → β} {m n : ℕ} @[to_additive] lemma IsMulFreimanHom.inv (hf : IsMulFreimanHom n A B f) : IsMulFreimanHom n A B⁻¹ f⁻¹ where -- TODO: Extract `Set.MapsTo.inv` from this proof mapsTo a ha := inv_mem_inv.2 (hf.mapsTo ha) map_prod_eq_map_prod s t hsA htA hs ht h := by rw [Pi.inv_def, prod_map_inv, prod_map_inv, hf.map_prod_eq_map_prod hsA htA hs ht h] @[to_additive] lemma IsMulFreimanHom.div {β : Type*} [DivisionCommMonoid β] {B₁ B₂ : Set β} {f₁ f₂ : α → β} (h₁ : IsMulFreimanHom n A B₁ f₁) (h₂ : IsMulFreimanHom n A B₂ f₂) : IsMulFreimanHom n A (B₁ / B₂) (f₁ / f₂) where -- TODO: Extract `Set.MapsTo.div` from this proof mapsTo a ha := div_mem_div (h₁.mapsTo ha) (h₂.mapsTo ha) map_prod_eq_map_prod s t hsA htA hs ht h := by rw [Pi.div_def, prod_map_div, prod_map_div, h₁.map_prod_eq_map_prod hsA htA hs ht h, h₂.map_prod_eq_map_prod hsA htA hs ht h] end DivisionCommMonoid section Prod variable {α₁ α₂ β₁ β₂ : Type*} [CommMonoid α₁] [CommMonoid α₂] [CommMonoid β₁] [CommMonoid β₂] {A₁ : Set α₁} {A₂ : Set α₂} {B₁ : Set β₁} {B₂ : Set β₂} {f₁ : α₁ → β₁} {f₂ : α₂ → β₂} {n : ℕ} @[to_additive] lemma IsMulFreimanHom.prod (h₁ : IsMulFreimanHom n A₁ B₁ f₁) (h₂ : IsMulFreimanHom n A₂ B₂ f₂) : IsMulFreimanHom n (A₁ ×ˢ A₂) (B₁ ×ˢ B₂) (Prod.map f₁ f₂) where mapsTo := h₁.mapsTo.prodMap h₂.mapsTo map_prod_eq_map_prod s t hsA htA hs ht h := by simp only [mem_prod, forall_and, Prod.forall] at hsA htA simp only [Prod.ext_iff, fst_prod, snd_prod, map_map, Function.comp_apply, Prod.map_fst, Prod.map_snd] at h ⊢ rw [← Function.comp_def, ← map_map, ← map_map, ← Function.comp_def f₂, ← map_map, ← map_map] exact ⟨h₁.map_prod_eq_map_prod (by simpa using hsA.1) (by simpa using htA.1) (by simpa) (by simpa) h.1, h₂.map_prod_eq_map_prod (by simpa [@forall_swap α₁] using hsA.2) (by simpa [@forall_swap α₁] using htA.2) (by simpa) (by simpa) h.2⟩ @[to_additive] lemma IsMulFreimanIso.prod (h₁ : IsMulFreimanIso n A₁ B₁ f₁) (h₂ : IsMulFreimanIso n A₂ B₂ f₂) : IsMulFreimanIso n (A₁ ×ˢ A₂) (B₁ ×ˢ B₂) (Prod.map f₁ f₂) where bijOn := h₁.bijOn.prodMap h₂.bijOn map_prod_eq_map_prod s t hsA htA hs ht := by simp only [mem_prod, forall_and, Prod.forall] at hsA htA simp only [Prod.ext_iff, fst_prod, map_map, Function.comp_apply, Prod.map_fst, snd_prod, Prod.map_snd] rw [← Function.comp_def, ← map_map, ← map_map, ← Function.comp_def f₂, ← map_map, ← map_map, h₁.map_prod_eq_map_prod (by simpa using hsA.1) (by simpa using htA.1) (by simpa) (by simpa), h₂.map_prod_eq_map_prod (by simpa [@forall_swap α₁] using hsA.2) (by simpa [@forall_swap α₁] using htA.2) (by simpa) (by simpa)] end Prod namespace Fin variable {k m n : ℕ} private lemma aux (hm : m ≠ 0) (hkmn : m * k ≤ n) : k < (n + 1) := Nat.lt_succ_iff.2 $ le_trans (Nat.le_mul_of_pos_left _ hm.bot_lt) hkmn /-- **No wrap-around principle**. The first `k + 1` elements of `Fin (n + 1)` are `m`-Freiman isomorphic to the first `k + 1` elements of `ℕ` assuming there is no wrap-around. -/ lemma isAddFreimanIso_Iic (hm : m ≠ 0) (hkmn : m * k ≤ n) : IsAddFreimanIso m (Iic (k : Fin (n + 1))) (Iic k) val where bijOn.left := by simp [MapsTo, Fin.le_iff_val_le_val, Nat.mod_eq_of_lt, aux hm hkmn] bijOn.right.left := val_injective.injOn bijOn.right.right x (hx : x ≤ _) := ⟨x, by simpa [le_iff_val_le_val, -val_fin_le, Nat.mod_eq_of_lt, aux hm hkmn, hx.trans_lt]⟩ map_sum_eq_map_sum s t hsA htA hs ht := by have (u : Multiset (Fin (n + 1))) : Nat.castRingHom _ (u.map val).sum = u.sum := by simp rw [← this, ← this] have {u : Multiset (Fin (n + 1))} (huk : ∀ x ∈ u, x ≤ k) (hu : card u = m) : (u.map val).sum < (n + 1) := Nat.lt_succ_iff.2 $ hkmn.trans' $ by rw [← hu, ← card_map] refine sum_le_card_nsmul (u.map val) k ?_ simpa [le_iff_val_le_val, -val_fin_le, Nat.mod_eq_of_lt, aux hm hkmn] using huk exact ⟨congr_arg _, CharP.natCast_injOn_Iio _ (n + 1) (this hsA hs) (this htA ht)⟩ /-- **No wrap-around principle**. The first `k` elements of `Fin (n + 1)` are `m`-Freiman isomorphic to the first `k` elements of `ℕ` assuming there is no wrap-around. -/ lemma isAddFreimanIso_Iio (hm : m ≠ 0) (hkmn : m * k ≤ n) : IsAddFreimanIso m (Iio (k : Fin (n + 1))) (Iio k) val := by obtain _ | k := k · simp [← bot_eq_zero]; simp [← _root_.bot_eq_zero, -Nat.bot_eq_zero, -bot_eq_zero'] have hkmn' : m * k ≤ n := (Nat.mul_le_mul_left _ k.le_succ).trans hkmn convert isAddFreimanIso_Iic hm hkmn' using 1 <;> ext x · simp [lt_iff_val_lt_val, le_iff_val_le_val, -val_fin_le, -val_fin_lt, Nat.mod_eq_of_lt, aux hm hkmn'] simp_rw [← Nat.cast_add_one] rw [Fin.val_cast_of_lt (aux hm hkmn), Nat.lt_succ_iff] · simp [Nat.lt_succ_iff] end Fin
Combinatorics\Additive\PluenneckeRuzsa.lean
/- Copyright (c) 2022 Yaël Dillies, George Shakan. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, George Shakan -/ import Mathlib.Algebra.Order.Ring.Basic import Mathlib.Combinatorics.Enumerative.DoubleCounting import Mathlib.Data.Finset.Pointwise import Mathlib.Tactic.GCongr /-! # The Plünnecke-Ruzsa inequality This file proves Ruzsa's triangle inequality, the Plünnecke-Petridis lemma, and the Plünnecke-Ruzsa inequality. ## Main declarations * `Finset.ruzsa_triangle_inequality_sub_sub_sub`: The Ruzsa triangle inequality, difference version. * `Finset.ruzsa_triangle_inequality_add_add_add`: The Ruzsa triangle inequality, sum version. * `Finset.pluennecke_petridis_inequality_add`: The Plünnecke-Petridis inequality. * `Finset.pluennecke_ruzsa_inequality_nsmul_sub_nsmul_add`: The Plünnecke-Ruzsa inequality. ## References * [Giorgis Petridis, *The Plünnecke-Ruzsa inequality: an overview*][petridis2014] * [Terrence Tao, Van Vu, *Additive Combinatorics][tao-vu] -/ open Nat open scoped Pointwise namespace Finset variable {α : Type*} [CommGroup α] [DecidableEq α] {A B C : Finset α} /-- **Ruzsa's triangle inequality**. Division version. -/ @[to_additive "**Ruzsa's triangle inequality**. Subtraction version."] theorem ruzsa_triangle_inequality_div_div_div (A B C : Finset α) : (A / C).card * B.card ≤ (A / B).card * (B / C).card := by rw [← card_product (A / B), ← mul_one ((A / B) ×ˢ (B / C)).card] refine card_mul_le_card_mul (fun b ac ↦ ac.1 * ac.2 = b) (fun x hx ↦ ?_) fun x _ ↦ card_le_one_iff.2 fun hu hv ↦ ((mem_bipartiteBelow _).1 hu).2.symm.trans ?_ · obtain ⟨a, ha, c, hc, rfl⟩ := mem_div.1 hx refine card_le_card_of_injOn (fun b ↦ (a / b, b / c)) (fun b hb ↦ ?_) fun b₁ _ b₂ _ h ↦ ?_ · rw [mem_bipartiteAbove] exact ⟨mk_mem_product (div_mem_div ha hb) (div_mem_div hb hc), div_mul_div_cancel' _ _ _⟩ · exact div_right_injective (Prod.ext_iff.1 h).1 · exact ((mem_bipartiteBelow _).1 hv).2 /-- **Ruzsa's triangle inequality**. Div-mul-mul version. -/ @[to_additive "**Ruzsa's triangle inequality**. Sub-add-add version."] theorem ruzsa_triangle_inequality_div_mul_mul (A B C : Finset α) : (A / C).card * B.card ≤ (A * B).card * (B * C).card := by rw [← div_inv_eq_mul, ← card_inv B, ← card_inv (B * C), mul_inv, ← div_eq_mul_inv] exact ruzsa_triangle_inequality_div_div_div _ _ _ /-- **Ruzsa's triangle inequality**. Mul-div-mul version. -/ @[to_additive "**Ruzsa's triangle inequality**. Add-sub-add version."] theorem ruzsa_triangle_inequality_mul_div_mul (A B C : Finset α) : (A * C).card * B.card ≤ (A / B).card * (B * C).card := by rw [← div_inv_eq_mul, ← div_inv_eq_mul B] exact ruzsa_triangle_inequality_div_div_div _ _ _ /-- **Ruzsa's triangle inequality**. Mul-mul-div version. -/ @[to_additive "**Ruzsa's triangle inequality**. Add-add-sub version."] theorem ruzsa_triangle_inequality_mul_mul_div (A B C : Finset α) : (A * C).card * B.card ≤ (A * B).card * (B / C).card := by rw [← div_inv_eq_mul, div_eq_mul_inv B] exact ruzsa_triangle_inequality_div_mul_mul _ _ _ @[to_additive] theorem pluennecke_petridis_inequality_mul (C : Finset α) (hA : ∀ A' ⊆ A, (A * B).card * A'.card ≤ (A' * B).card * A.card) : (A * B * C).card * A.card ≤ (A * B).card * (A * C).card := by induction' C using Finset.induction_on with x C _ ih · simp set A' := A ∩ (A * C / {x}) with hA' set C' := insert x C with hC' have h₀ : A' * {x} = A * {x} ∩ (A * C) := by rw [hA', inter_mul_singleton, (isUnit_singleton x).div_mul_cancel] have h₁ : A * B * C' = A * B * C ∪ (A * B * {x}) \ (A' * B * {x}) := by rw [hC', insert_eq, union_comm, mul_union] refine (sup_sdiff_eq_sup ?_).symm rw [mul_right_comm, mul_right_comm A, h₀] exact mul_subset_mul_right inter_subset_right have h₂ : A' * B * {x} ⊆ A * B * {x} := mul_subset_mul_right (mul_subset_mul_right inter_subset_left) have h₃ : (A * B * C').card ≤ (A * B * C).card + (A * B).card - (A' * B).card := by rw [h₁] refine (card_union_le _ _).trans_eq ?_ rw [card_sdiff h₂, ← add_tsub_assoc_of_le (card_le_card h₂), card_mul_singleton, card_mul_singleton] refine (mul_le_mul_right' h₃ _).trans ?_ rw [tsub_mul, add_mul] refine (tsub_le_tsub (add_le_add_right ih _) <| hA _ inter_subset_left).trans_eq ?_ rw [← mul_add, ← mul_tsub, ← hA', hC', insert_eq, mul_union, ← card_mul_singleton A x, ← card_mul_singleton A' x, add_comm (card _), h₀, eq_tsub_of_add_eq (card_union_add_card_inter _ _)] /-! ### Sum triangle inequality -/ -- Auxiliary lemma for Ruzsa's triangle sum inequality, and the Plünnecke-Ruzsa inequality. @[to_additive] private theorem mul_aux (hA : A.Nonempty) (hAB : A ⊆ B) (h : ∀ A' ∈ B.powerset.erase ∅, ((A * C).card : ℚ≥0) / ↑A.card ≤ (A' * C).card / ↑A'.card) : ∀ A' ⊆ A, (A * C).card * A'.card ≤ (A' * C).card * A.card := by rintro A' hAA' obtain rfl | hA' := A'.eq_empty_or_nonempty · simp have hA₀ : (0 : ℚ≥0) < A.card := cast_pos.2 hA.card_pos have hA₀' : (0 : ℚ≥0) < A'.card := cast_pos.2 hA'.card_pos exact mod_cast (div_le_div_iff hA₀ hA₀').1 (h _ <| mem_erase_of_ne_of_mem hA'.ne_empty <| mem_powerset.2 <| hAA'.trans hAB) /-- **Ruzsa's triangle inequality**. Multiplication version. -/ @[to_additive "**Ruzsa's triangle inequality**. Addition version."] theorem ruzsa_triangle_inequality_mul_mul_mul (A B C : Finset α) : (A * C).card * B.card ≤ (A * B).card * (B * C).card := by obtain rfl | hB := B.eq_empty_or_nonempty · simp have hB' : B ∈ B.powerset.erase ∅ := mem_erase_of_ne_of_mem hB.ne_empty (mem_powerset_self _) obtain ⟨U, hU, hUA⟩ := exists_min_image (B.powerset.erase ∅) (fun U ↦ (U * A).card / U.card : _ → ℚ≥0) ⟨B, hB'⟩ rw [mem_erase, mem_powerset, ← nonempty_iff_ne_empty] at hU refine cast_le.1 (?_ : (_ : ℚ≥0) ≤ _) push_cast refine (le_div_iff <| cast_pos.2 hB.card_pos).1 ?_ rw [mul_div_right_comm, mul_comm _ B] refine (Nat.cast_le.2 <| card_le_card_mul_left _ hU.1).trans ?_ refine le_trans ?_ (mul_le_mul (hUA _ hB') (cast_le.2 <| card_le_card <| mul_subset_mul_right hU.2) (zero_le _) (zero_le _)) rw [← mul_div_right_comm, ← mul_assoc] refine (le_div_iff <| cast_pos.2 hU.1.card_pos).2 ?_ exact mod_cast pluennecke_petridis_inequality_mul C (mul_aux hU.1 hU.2 hUA) /-- **Ruzsa's triangle inequality**. Mul-div-div version. -/ @[to_additive "**Ruzsa's triangle inequality**. Add-sub-sub version."] theorem ruzsa_triangle_inequality_mul_div_div (A B C : Finset α) : (A * C).card * B.card ≤ (A / B).card * (B / C).card := by rw [div_eq_mul_inv, ← card_inv B, ← card_inv (B / C), inv_div', div_inv_eq_mul] exact ruzsa_triangle_inequality_mul_mul_mul _ _ _ /-- **Ruzsa's triangle inequality**. Div-mul-div version. -/ @[to_additive "**Ruzsa's triangle inequality**. Sub-add-sub version."] theorem ruzsa_triangle_inequality_div_mul_div (A B C : Finset α) : (A / C).card * B.card ≤ (A * B).card * (B / C).card := by rw [div_eq_mul_inv, div_eq_mul_inv] exact ruzsa_triangle_inequality_mul_mul_mul _ _ _ /-- **Ruzsa's triangle inequality**. Div-div-mul version. -/ @[to_additive "**Ruzsa's triangle inequality**. Sub-sub-add version."] theorem card_div_mul_le_card_div_mul_card_mul (A B C : Finset α) : (A / C).card * B.card ≤ (A / B).card * (B * C).card := by rw [← div_inv_eq_mul, div_eq_mul_inv] exact ruzsa_triangle_inequality_mul_div_div _ _ _ -- Auxiliary lemma towards the Plünnecke-Ruzsa inequality @[to_additive] private lemma card_mul_pow_le (hAB : ∀ A' ⊆ A, (A * B).card * A'.card ≤ (A' * B).card * A.card) (n : ℕ) : (A * B ^ n).card ≤ ((A * B).card / A.card : ℚ≥0) ^ n * A.card := by obtain rfl | hA := A.eq_empty_or_nonempty · simp induction' n with n ih · simp rw [_root_.pow_succ', ← mul_assoc, _root_.pow_succ', @mul_assoc ℚ≥0, ← mul_div_right_comm, le_div_iff, ← cast_mul] swap · exact cast_pos.2 hA.card_pos refine (Nat.cast_le.2 <| pluennecke_petridis_inequality_mul _ hAB).trans ?_ rw [cast_mul] gcongr /-- The **Plünnecke-Ruzsa inequality**. Multiplication version. Note that this is genuinely harder than the division version because we cannot use a double counting argument. -/ @[to_additive "The **Plünnecke-Ruzsa inequality**. Addition version. Note that this is genuinely harder than the subtraction version because we cannot use a double counting argument."] theorem pluennecke_ruzsa_inequality_pow_div_pow_mul (hA : A.Nonempty) (B : Finset α) (m n : ℕ) : ((B ^ m / B ^ n).card) ≤ ((A * B).card / A.card : ℚ≥0) ^ (m + n) * A.card := by have hA' : A ∈ A.powerset.erase ∅ := mem_erase_of_ne_of_mem hA.ne_empty (mem_powerset_self _) obtain ⟨C, hC, hCA⟩ := exists_min_image (A.powerset.erase ∅) (fun C ↦ (C * B).card / C.card : _ → ℚ≥0) ⟨A, hA'⟩ rw [mem_erase, mem_powerset, ← nonempty_iff_ne_empty] at hC refine (_root_.mul_le_mul_right <| cast_pos.2 hC.1.card_pos).1 ?_ norm_cast refine (Nat.cast_le.2 <| ruzsa_triangle_inequality_div_mul_mul _ _ _).trans ?_ push_cast rw [mul_comm _ C] refine (mul_le_mul (card_mul_pow_le (mul_aux hC.1 hC.2 hCA) _) (card_mul_pow_le (mul_aux hC.1 hC.2 hCA) _) (zero_le _) (zero_le _)).trans ?_ rw [mul_mul_mul_comm, ← pow_add, ← mul_assoc] gcongr ((?_ ^ _) * Nat.cast ?_) * _ · exact hCA _ hA' · exact card_le_card hC.2 /-- The **Plünnecke-Ruzsa inequality**. Division version. -/ @[to_additive "The **Plünnecke-Ruzsa inequality**. Subtraction version."] theorem pluennecke_ruzsa_inequality_pow_div_pow_div (hA : A.Nonempty) (B : Finset α) (m n : ℕ) : (B ^ m / B ^ n).card ≤ ((A / B).card / A.card : ℚ≥0) ^ (m + n) * A.card := by rw [← card_inv, inv_div', ← inv_pow, ← inv_pow, div_eq_mul_inv A] exact pluennecke_ruzsa_inequality_pow_div_pow_mul hA _ _ _ /-- Special case of the **Plünnecke-Ruzsa inequality**. Multiplication version. -/ @[to_additive "Special case of the **Plünnecke-Ruzsa inequality**. Addition version."] theorem pluennecke_ruzsa_inequality_pow_mul (hA : A.Nonempty) (B : Finset α) (n : ℕ) : (B ^ n).card ≤ ((A * B).card / A.card : ℚ≥0) ^ n * A.card := by simpa only [_root_.pow_zero, div_one] using pluennecke_ruzsa_inequality_pow_div_pow_mul hA _ _ 0 /-- Special case of the **Plünnecke-Ruzsa inequality**. Division version. -/ @[to_additive "Special case of the **Plünnecke-Ruzsa inequality**. Subtraction version."] theorem pluennecke_ruzsa_inequality_pow_div (hA : A.Nonempty) (B : Finset α) (n : ℕ) : (B ^ n).card ≤ ((A / B).card / A.card : ℚ≥0) ^ n * A.card := by simpa only [_root_.pow_zero, div_one] using pluennecke_ruzsa_inequality_pow_div_pow_div hA _ _ 0 end Finset
Combinatorics\Additive\RuzsaCovering.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Data.Finset.Pointwise import Mathlib.SetTheory.Cardinal.Finite /-! # Ruzsa's covering lemma This file proves the Ruzsa covering lemma. This says that, for `s`, `t` finsets, we can cover `s` with at most `(s + t).card / t.card` copies of `t - t`. ## TODO Merge this file with other prerequisites to Freiman's theorem once we have them. -/ open Pointwise namespace Finset variable {α : Type*} [DecidableEq α] [CommGroup α] (s : Finset α) {t : Finset α} /-- **Ruzsa's covering lemma**. -/ @[to_additive "**Ruzsa's covering lemma**"] theorem exists_subset_mul_div (ht : t.Nonempty) : ∃ u : Finset α, u.card * t.card ≤ (s * t).card ∧ s ⊆ u * t / t := by haveI : ∀ u, Decidable ((u : Set α).PairwiseDisjoint (· • t)) := fun u ↦ Classical.dec _ set C := s.powerset.filter fun u ↦ u.toSet.PairwiseDisjoint (· • t) obtain ⟨u, hu, hCmax⟩ := C.exists_maximal (filter_nonempty_iff.2 ⟨∅, empty_mem_powerset _, by rw [coe_empty]; exact Set.pairwiseDisjoint_empty⟩) rw [mem_filter, mem_powerset] at hu refine ⟨u, (card_mul_iff.2 <| pairwiseDisjoint_smul_iff.1 hu.2).ge.trans (card_le_card <| mul_subset_mul_right hu.1), fun a ha ↦ ?_⟩ rw [mul_div_assoc] by_cases hau : a ∈ u · exact subset_mul_left _ ht.one_mem_div hau by_cases H : ∀ b ∈ u, Disjoint (a • t) (b • t) · refine (hCmax _ ?_ <| ssubset_insert hau).elim rw [mem_filter, mem_powerset, insert_subset_iff, coe_insert] exact ⟨⟨ha, hu.1⟩, hu.2.insert fun _ hb _ ↦ H _ hb⟩ push_neg at H simp_rw [not_disjoint_iff, ← inv_smul_mem_iff] at H obtain ⟨b, hb, c, hc₁, hc₂⟩ := H refine mem_mul.2 ⟨b, hb, a / b, ?_, by simp⟩ exact mem_div.2 ⟨_, hc₂, _, hc₁, by simp [inv_mul_eq_div]⟩ end Finset namespace Set variable {α : Type*} [CommGroup α] {s t : Set α} /-- **Ruzsa's covering lemma** for sets. See also `Finset.exists_subset_mul_div`. -/ @[to_additive "**Ruzsa's covering lemma**. Version for sets. For finsets, see `Finset.exists_subset_add_sub`."] lemma exists_subset_mul_div (hs : s.Finite) (ht' : t.Finite) (ht : t.Nonempty) : ∃ u : Set α, Nat.card u * Nat.card t ≤ Nat.card (s * t) ∧ s ⊆ u * t / t ∧ u.Finite := by lift s to Finset α using hs lift t to Finset α using ht' classical obtain ⟨u, hu, hsut⟩ := Finset.exists_subset_mul_div s ht refine ⟨u, ?_⟩ -- `norm_cast` would find these automatically, but breaks `to_additive` when it does so rw [← Finset.coe_mul, ← Finset.coe_mul, ← Finset.coe_div] norm_cast simp [*] end Set
Combinatorics\Additive\AP\Three\Behrend.lean
/- Copyright (c) 2022 Yaël Dillies, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta -/ import Mathlib.Analysis.InnerProductSpace.PiL2 import Mathlib.Combinatorics.Additive.AP.Three.Defs import Mathlib.Combinatorics.Pigeonhole import Mathlib.Data.Complex.ExponentialBounds /-! # Behrend's bound on Roth numbers This file proves Behrend's lower bound on Roth numbers. This says that we can find a subset of `{1, ..., n}` of size `n / exp (O (sqrt (log n)))` which does not contain arithmetic progressions of length `3`. The idea is that the sphere (in the `n` dimensional Euclidean space) doesn't contain arithmetic progressions (literally) because the corresponding ball is strictly convex. Thus we can take integer points on that sphere and map them onto `ℕ` in a way that preserves arithmetic progressions (`Behrend.map`). ## Main declarations * `Behrend.sphere`: The intersection of the Euclidean sphere with the positive integer quadrant. This is the set that we will map on `ℕ`. * `Behrend.map`: Given a natural number `d`, `Behrend.map d : ℕⁿ → ℕ` reads off the coordinates as digits in base `d`. * `Behrend.card_sphere_le_rothNumberNat`: Implicit lower bound on Roth numbers in terms of `Behrend.sphere`. * `Behrend.roth_lower_bound`: Behrend's explicit lower bound on Roth numbers. ## References * [Bryan Gillespie, *Behrend’s Construction*] (http://www.epsilonsmall.com/resources/behrends-construction/behrend.pdf) * Behrend, F. A., "On sets of integers which contain no three terms in arithmetical progression" * [Wikipedia, *Salem-Spencer set*](https://en.wikipedia.org/wiki/Salem–Spencer_set) ## Tags 3AP-free, Salem-Spencer, Behrend construction, arithmetic progression, sphere, strictly convex -/ open Nat hiding log open Finset Metric Real open scoped Pointwise /-- The frontier of a closed strictly convex set only contains trivial arithmetic progressions. The idea is that an arithmetic progression is contained on a line and the frontier of a strictly convex set does not contain lines. -/ lemma threeAPFree_frontier {𝕜 E : Type*} [LinearOrderedField 𝕜] [TopologicalSpace E] [AddCommMonoid E] [Module 𝕜 E] {s : Set E} (hs₀ : IsClosed s) (hs₁ : StrictConvex 𝕜 s) : ThreeAPFree (frontier s) := by intro a ha b hb c hc habc obtain rfl : (1 / 2 : 𝕜) • a + (1 / 2 : 𝕜) • c = b := by rwa [← smul_add, one_div, inv_smul_eq_iff₀ (show (2 : 𝕜) ≠ 0 by norm_num), two_smul] have := hs₁.eq (hs₀.frontier_subset ha) (hs₀.frontier_subset hc) one_half_pos one_half_pos (add_halves _) hb.2 simp [this, ← add_smul] ring_nf simp lemma threeAPFree_sphere {E : Type*} [NormedAddCommGroup E] [NormedSpace ℝ E] [StrictConvexSpace ℝ E] (x : E) (r : ℝ) : ThreeAPFree (sphere x r) := by obtain rfl | hr := eq_or_ne r 0 · rw [sphere_zero] exact threeAPFree_singleton _ · convert threeAPFree_frontier isClosed_ball (strictConvex_closedBall ℝ x r) exact (frontier_closedBall _ hr).symm namespace Behrend variable {α β : Type*} {n d k N : ℕ} {x : Fin n → ℕ} /-! ### Turning the sphere into 3AP-free set We define `Behrend.sphere`, the intersection of the $L^2$ sphere with the positive quadrant of integer points. Because the $L^2$ closed ball is strictly convex, the $L^2$ sphere and `Behrend.sphere` are 3AP-free (`threeAPFree_sphere`). Then we can turn this set in `Fin n → ℕ` into a set in `ℕ` using `Behrend.map`, which preserves `ThreeAPFree` because it is an additive monoid homomorphism. -/ /-- The box `{0, ..., d - 1}^n` as a `Finset`. -/ def box (n d : ℕ) : Finset (Fin n → ℕ) := Fintype.piFinset fun _ => range d theorem mem_box : x ∈ box n d ↔ ∀ i, x i < d := by simp only [box, Fintype.mem_piFinset, mem_range] @[simp] theorem card_box : (box n d).card = d ^ n := by simp [box] @[simp] theorem box_zero : box (n + 1) 0 = ∅ := by simp [box] /-- The intersection of the sphere of radius `√k` with the integer points in the positive quadrant. -/ def sphere (n d k : ℕ) : Finset (Fin n → ℕ) := (box n d).filter fun x => ∑ i, x i ^ 2 = k theorem sphere_zero_subset : sphere n d 0 ⊆ 0 := fun x => by simp [sphere, Function.funext_iff] @[simp] theorem sphere_zero_right (n k : ℕ) : sphere (n + 1) 0 k = ∅ := by simp [sphere] theorem sphere_subset_box : sphere n d k ⊆ box n d := filter_subset _ _ theorem norm_of_mem_sphere {x : Fin n → ℕ} (hx : x ∈ sphere n d k) : ‖(WithLp.equiv 2 _).symm ((↑) ∘ x : Fin n → ℝ)‖ = √↑k := by rw [EuclideanSpace.norm_eq] dsimp simp_rw [abs_cast, ← cast_pow, ← cast_sum, (mem_filter.1 hx).2] theorem sphere_subset_preimage_metric_sphere : (sphere n d k : Set (Fin n → ℕ)) ⊆ (fun x : Fin n → ℕ => (WithLp.equiv 2 _).symm ((↑) ∘ x : Fin n → ℝ)) ⁻¹' Metric.sphere (0 : PiLp 2 fun _ : Fin n => ℝ) (√↑k) := fun x hx => by rw [Set.mem_preimage, mem_sphere_zero_iff_norm, norm_of_mem_sphere hx] /-- The map that appears in Behrend's bound on Roth numbers. -/ @[simps] def map (d : ℕ) : (Fin n → ℕ) →+ ℕ where toFun a := ∑ i, a i * d ^ (i : ℕ) map_zero' := by simp_rw [Pi.zero_apply, zero_mul, sum_const_zero] map_add' a b := by simp_rw [Pi.add_apply, add_mul, sum_add_distrib] -- @[simp] -- Porting note (#10618): simp can prove this theorem map_zero (d : ℕ) (a : Fin 0 → ℕ) : map d a = 0 := by simp [map] theorem map_succ (a : Fin (n + 1) → ℕ) : map d a = a 0 + (∑ x : Fin n, a x.succ * d ^ (x : ℕ)) * d := by simp [map, Fin.sum_univ_succ, _root_.pow_succ, ← mul_assoc, ← sum_mul] theorem map_succ' (a : Fin (n + 1) → ℕ) : map d a = a 0 + map d (a ∘ Fin.succ) * d := map_succ _ theorem map_monotone (d : ℕ) : Monotone (map d : (Fin n → ℕ) → ℕ) := fun x y h => by dsimp; exact sum_le_sum fun i _ => Nat.mul_le_mul_right _ <| h i theorem map_mod (a : Fin n.succ → ℕ) : map d a % d = a 0 % d := by rw [map_succ, Nat.add_mul_mod_self_right] theorem map_eq_iff {x₁ x₂ : Fin n.succ → ℕ} (hx₁ : ∀ i, x₁ i < d) (hx₂ : ∀ i, x₂ i < d) : map d x₁ = map d x₂ ↔ x₁ 0 = x₂ 0 ∧ map d (x₁ ∘ Fin.succ) = map d (x₂ ∘ Fin.succ) := by refine ⟨fun h => ?_, fun h => by rw [map_succ', map_succ', h.1, h.2]⟩ have : x₁ 0 = x₂ 0 := by rw [← mod_eq_of_lt (hx₁ _), ← map_mod, ← mod_eq_of_lt (hx₂ _), ← map_mod, h] rw [map_succ, map_succ, this, add_right_inj, mul_eq_mul_right_iff] at h exact ⟨this, h.resolve_right (pos_of_gt (hx₁ 0)).ne'⟩ theorem map_injOn : {x : Fin n → ℕ | ∀ i, x i < d}.InjOn (map d) := by intro x₁ hx₁ x₂ hx₂ h induction' n with n ih · simp [eq_iff_true_of_subsingleton] rw [forall_const] at ih ext i have x := (map_eq_iff hx₁ hx₂).1 h refine Fin.cases x.1 (congr_fun <| ih (fun _ => ?_) (fun _ => ?_) x.2) i · exact hx₁ _ · exact hx₂ _ theorem map_le_of_mem_box (hx : x ∈ box n d) : map (2 * d - 1) x ≤ ∑ i : Fin n, (d - 1) * (2 * d - 1) ^ (i : ℕ) := map_monotone (2 * d - 1) fun _ => Nat.le_sub_one_of_lt <| mem_box.1 hx _ nonrec theorem threeAPFree_sphere : ThreeAPFree (sphere n d k : Set (Fin n → ℕ)) := by set f : (Fin n → ℕ) →+ EuclideanSpace ℝ (Fin n) := { toFun := fun f => ((↑) : ℕ → ℝ) ∘ f map_zero' := funext fun _ => cast_zero map_add' := fun _ _ => funext fun _ => cast_add _ _ } refine ThreeAPFree.of_image (AddMonoidHomClass.isAddFreimanHom f (Set.mapsTo_image _ _)) cast_injective.comp_left.injOn (Set.subset_univ _) ?_ refine (threeAPFree_sphere 0 (√↑k)).mono (Set.image_subset_iff.2 fun x => ?_) rw [Set.mem_preimage, mem_sphere_zero_iff_norm] exact norm_of_mem_sphere theorem threeAPFree_image_sphere : ThreeAPFree ((sphere n d k).image (map (2 * d - 1)) : Set ℕ) := by rw [coe_image] apply ThreeAPFree.image' (α := Fin n → ℕ) (β := ℕ) (s := sphere n d k) (map (2 * d - 1)) (map_injOn.mono _) threeAPFree_sphere · rw [Set.add_subset_iff] rintro a ha b hb i have hai := mem_box.1 (sphere_subset_box ha) i have hbi := mem_box.1 (sphere_subset_box hb) i rw [lt_tsub_iff_right, ← succ_le_iff, two_mul] exact (add_add_add_comm _ _ 1 1).trans_le (_root_.add_le_add hai hbi) · exact x theorem sum_sq_le_of_mem_box (hx : x ∈ box n d) : ∑ i : Fin n, x i ^ 2 ≤ n * (d - 1) ^ 2 := by rw [mem_box] at hx have : ∀ i, x i ^ 2 ≤ (d - 1) ^ 2 := fun i => Nat.pow_le_pow_left (Nat.le_sub_one_of_lt (hx i)) _ exact (sum_le_card_nsmul univ _ _ fun i _ => this i).trans (by rw [card_fin, smul_eq_mul]) theorem sum_eq : (∑ i : Fin n, d * (2 * d + 1) ^ (i : ℕ)) = ((2 * d + 1) ^ n - 1) / 2 := by refine (Nat.div_eq_of_eq_mul_left zero_lt_two ?_).symm rw [← sum_range fun i => d * (2 * d + 1) ^ (i : ℕ), ← mul_sum, mul_right_comm, mul_comm d, ← geom_sum_mul_add, add_tsub_cancel_right, mul_comm] theorem sum_lt : (∑ i : Fin n, d * (2 * d + 1) ^ (i : ℕ)) < (2 * d + 1) ^ n := sum_eq.trans_lt <| (Nat.div_le_self _ 2).trans_lt <| pred_lt (pow_pos (succ_pos _) _).ne' theorem card_sphere_le_rothNumberNat (n d k : ℕ) : (sphere n d k).card ≤ rothNumberNat ((2 * d - 1) ^ n) := by cases n · dsimp; refine (card_le_univ _).trans_eq ?_; rfl cases d · simp apply threeAPFree_image_sphere.le_rothNumberNat _ _ (card_image_of_injOn _) · intro; assumption · simp only [subset_iff, mem_image, and_imp, forall_exists_index, mem_range, forall_apply_eq_imp_iff₂, sphere, mem_filter] rintro _ x hx _ rfl exact (map_le_of_mem_box hx).trans_lt sum_lt apply map_injOn.mono fun x => ?_ · intro; assumption simp only [mem_coe, sphere, mem_filter, mem_box, and_imp, two_mul] exact fun h _ i => (h i).trans_le le_self_add /-! ### Optimization Now that we know how to turn the integer points of any sphere into a 3AP-free set, we find a sphere containing many integer points by the pigeonhole principle. This gives us an implicit bound that we then optimize by tweaking the parameters. The (almost) optimal parameters are `Behrend.nValue` and `Behrend.dValue`. -/ theorem exists_large_sphere_aux (n d : ℕ) : ∃ k ∈ range (n * (d - 1) ^ 2 + 1), (↑(d ^ n) / ((n * (d - 1) ^ 2 :) + 1) : ℝ) ≤ (sphere n d k).card := by refine exists_le_card_fiber_of_nsmul_le_card_of_maps_to (fun x hx => ?_) nonempty_range_succ ?_ · rw [mem_range, Nat.lt_succ_iff] exact sum_sq_le_of_mem_box hx · rw [card_range, _root_.nsmul_eq_mul, mul_div_assoc', cast_add_one, mul_div_cancel_left₀, card_box] exact (cast_add_one_pos _).ne' theorem exists_large_sphere (n d : ℕ) : ∃ k, ((d ^ n :) / (n * d ^ 2 :) : ℝ) ≤ (sphere n d k).card := by obtain ⟨k, -, hk⟩ := exists_large_sphere_aux n d refine ⟨k, ?_⟩ obtain rfl | hn := n.eq_zero_or_pos · simp obtain rfl | hd := d.eq_zero_or_pos · simp refine (div_le_div_of_nonneg_left ?_ ?_ ?_).trans hk · exact cast_nonneg _ · exact cast_add_one_pos _ simp only [← le_sub_iff_add_le', cast_mul, ← mul_sub, cast_pow, cast_sub hd, sub_sq, one_pow, cast_one, mul_one, sub_add, sub_sub_self] apply one_le_mul_of_one_le_of_one_le · rwa [one_le_cast] rw [_root_.le_sub_iff_add_le] norm_num exact one_le_cast.2 hd theorem bound_aux' (n d : ℕ) : ((d ^ n :) / (n * d ^ 2 :) : ℝ) ≤ rothNumberNat ((2 * d - 1) ^ n) := let ⟨_, h⟩ := exists_large_sphere n d h.trans <| cast_le.2 <| card_sphere_le_rothNumberNat _ _ _ theorem bound_aux (hd : d ≠ 0) (hn : 2 ≤ n) : (d ^ (n - 2 :) / n : ℝ) ≤ rothNumberNat ((2 * d - 1) ^ n) := by convert bound_aux' n d using 1 rw [cast_mul, cast_pow, mul_comm, ← div_div, pow_sub₀ _ _ hn, ← div_eq_mul_inv, cast_pow] rwa [cast_ne_zero] open scoped Filter Topology open Real section NumericalBounds theorem log_two_mul_two_le_sqrt_log_eight : log 2 * 2 ≤ √(log 8) := by have : (8 : ℝ) = 2 ^ ((3 : ℕ) : ℝ) := by rw [rpow_natCast]; norm_num rw [this, log_rpow zero_lt_two (3 : ℕ)] apply le_sqrt_of_sq_le rw [mul_pow, sq (log 2), mul_assoc, mul_comm] refine mul_le_mul_of_nonneg_right ?_ (log_nonneg one_le_two) rw [← le_div_iff] on_goal 1 => apply log_two_lt_d9.le.trans all_goals norm_num1 theorem two_div_one_sub_two_div_e_le_eight : 2 / (1 - 2 / exp 1) ≤ 8 := by rw [div_le_iff, mul_sub, mul_one, mul_div_assoc', le_sub_comm, div_le_iff (exp_pos _)] · have : 16 < 6 * (2.7182818283 : ℝ) := by norm_num linarith [exp_one_gt_d9] rw [sub_pos, div_lt_one] <;> exact exp_one_gt_d9.trans' (by norm_num) theorem le_sqrt_log (hN : 4096 ≤ N) : log (2 / (1 - 2 / exp 1)) * (69 / 50) ≤ √(log ↑N) := by have : (12 : ℕ) * log 2 ≤ log N := by rw [← log_rpow zero_lt_two, rpow_natCast] exact log_le_log (by positivity) (mod_cast hN) refine (mul_le_mul_of_nonneg_right (log_le_log ?_ two_div_one_sub_two_div_e_le_eight) <| by norm_num1).trans ?_ · refine div_pos zero_lt_two ?_ rw [sub_pos, div_lt_one (exp_pos _)] exact exp_one_gt_d9.trans_le' (by norm_num1) have l8 : log 8 = (3 : ℕ) * log 2 := by rw [← log_rpow zero_lt_two, rpow_natCast] norm_num rw [l8] apply le_sqrt_of_sq_le (le_trans _ this) rw [mul_right_comm, mul_pow, sq (log 2), ← mul_assoc] apply mul_le_mul_of_nonneg_right _ (log_nonneg one_le_two) rw [← le_div_iff'] · exact log_two_lt_d9.le.trans (by norm_num1) exact sq_pos_of_ne_zero (by norm_num1) theorem exp_neg_two_mul_le {x : ℝ} (hx : 0 < x) : exp (-2 * x) < exp (2 - ⌈x⌉₊) / ⌈x⌉₊ := by have h₁ := ceil_lt_add_one hx.le have h₂ : 1 - x ≤ 2 - ⌈x⌉₊ := by linarith calc _ ≤ exp (1 - x) / (x + 1) := ?_ _ ≤ exp (2 - ⌈x⌉₊) / (x + 1) := by gcongr _ < _ := by gcongr rw [le_div_iff (add_pos hx zero_lt_one), ← le_div_iff' (exp_pos _), ← exp_sub, neg_mul, sub_neg_eq_add, two_mul, sub_add_add_cancel, add_comm _ x] exact le_trans (le_add_of_nonneg_right zero_le_one) (add_one_le_exp _) theorem div_lt_floor {x : ℝ} (hx : 2 / (1 - 2 / exp 1) ≤ x) : x / exp 1 < (⌊x / 2⌋₊ : ℝ) := by apply lt_of_le_of_lt _ (sub_one_lt_floor _) have : 0 < 1 - 2 / exp 1 := by rw [sub_pos, div_lt_one (exp_pos _)] exact lt_of_le_of_lt (by norm_num) exp_one_gt_d9 rwa [le_sub_comm, div_eq_mul_one_div x, div_eq_mul_one_div x, ← mul_sub, div_sub', ← div_eq_mul_one_div, mul_div_assoc', one_le_div, ← div_le_iff this] · exact zero_lt_two · exact two_ne_zero theorem ceil_lt_mul {x : ℝ} (hx : 50 / 19 ≤ x) : (⌈x⌉₊ : ℝ) < 1.38 * x := by refine (ceil_lt_add_one <| hx.trans' <| by norm_num).trans_le ?_ rw [← le_sub_iff_add_le', ← sub_one_mul] have : (1.38 : ℝ) = 69 / 50 := by norm_num rwa [this, show (69 / 50 - 1 : ℝ) = (50 / 19)⁻¹ by norm_num1, ← div_eq_inv_mul, one_le_div] norm_num1 end NumericalBounds /-- The (almost) optimal value of `n` in `Behrend.bound_aux`. -/ noncomputable def nValue (N : ℕ) : ℕ := ⌈√(log N)⌉₊ /-- The (almost) optimal value of `d` in `Behrend.bound_aux`. -/ noncomputable def dValue (N : ℕ) : ℕ := ⌊(N : ℝ) ^ (nValue N : ℝ)⁻¹ / 2⌋₊ theorem nValue_pos (hN : 2 ≤ N) : 0 < nValue N := ceil_pos.2 <| Real.sqrt_pos.2 <| log_pos <| one_lt_cast.2 <| hN theorem three_le_nValue (hN : 64 ≤ N) : 3 ≤ nValue N := by rw [nValue, ← lt_iff_add_one_le, lt_ceil, cast_two] apply lt_sqrt_of_sq_lt have : (2 : ℝ) ^ ((6 : ℕ) : ℝ) ≤ N := by rw [rpow_natCast] exact (cast_le.2 hN).trans' (by norm_num1) apply lt_of_lt_of_le _ (log_le_log (rpow_pos_of_pos zero_lt_two _) this) rw [log_rpow zero_lt_two, ← div_lt_iff'] · exact log_two_gt_d9.trans_le' (by norm_num1) · norm_num1 theorem dValue_pos (hN₃ : 8 ≤ N) : 0 < dValue N := by have hN₀ : 0 < (N : ℝ) := cast_pos.2 (succ_pos'.trans_le hN₃) rw [dValue, floor_pos, ← log_le_log_iff zero_lt_one, log_one, log_div _ two_ne_zero, log_rpow hN₀, inv_mul_eq_div, sub_nonneg, le_div_iff] · have : (nValue N : ℝ) ≤ 2 * √(log N) := by apply (ceil_lt_add_one <| sqrt_nonneg _).le.trans rw [two_mul, add_le_add_iff_left] apply le_sqrt_of_sq_le rw [one_pow, le_log_iff_exp_le hN₀] exact (exp_one_lt_d9.le.trans <| by norm_num).trans (cast_le.2 hN₃) apply (mul_le_mul_of_nonneg_left this <| log_nonneg one_le_two).trans _ rw [← mul_assoc, ← le_div_iff (Real.sqrt_pos.2 <| log_pos <| one_lt_cast.2 _), div_sqrt] · apply log_two_mul_two_le_sqrt_log_eight.trans apply Real.sqrt_le_sqrt exact log_le_log (by norm_num) (mod_cast hN₃) exact hN₃.trans_lt' (by norm_num) · exact cast_pos.2 (nValue_pos <| hN₃.trans' <| by norm_num) · exact (rpow_pos_of_pos hN₀ _).ne' · exact div_pos (rpow_pos_of_pos hN₀ _) zero_lt_two theorem le_N (hN : 2 ≤ N) : (2 * dValue N - 1) ^ nValue N ≤ N := by have : (2 * dValue N - 1) ^ nValue N ≤ (2 * dValue N) ^ nValue N := Nat.pow_le_pow_left (Nat.sub_le _ _) _ apply this.trans suffices ((2 * dValue N) ^ nValue N : ℝ) ≤ N from mod_cast this suffices i : (2 * dValue N : ℝ) ≤ (N : ℝ) ^ (nValue N : ℝ)⁻¹ by rw [← rpow_natCast] apply (rpow_le_rpow (mul_nonneg zero_le_two (cast_nonneg _)) i (cast_nonneg _)).trans rw [← rpow_mul (cast_nonneg _), inv_mul_cancel, rpow_one] rw [cast_ne_zero] apply (nValue_pos hN).ne' rw [← le_div_iff'] · exact floor_le (div_nonneg (rpow_nonneg (cast_nonneg _) _) zero_le_two) apply zero_lt_two theorem bound (hN : 4096 ≤ N) : (N : ℝ) ^ (nValue N : ℝ)⁻¹ / exp 1 < dValue N := by apply div_lt_floor _ rw [← log_le_log_iff, log_rpow, mul_comm, ← div_eq_mul_inv] · apply le_trans _ (div_le_div_of_nonneg_left _ _ (ceil_lt_mul _).le) · rw [mul_comm, ← div_div, div_sqrt, le_div_iff] · norm_num; exact le_sqrt_log hN · norm_num1 · apply log_nonneg rw [one_le_cast] exact hN.trans' (by norm_num1) · rw [cast_pos, lt_ceil, cast_zero, Real.sqrt_pos] refine log_pos ?_ rw [one_lt_cast] exact hN.trans_lt' (by norm_num1) apply le_sqrt_of_sq_le have : (12 : ℕ) * log 2 ≤ log N := by rw [← log_rpow zero_lt_two, rpow_natCast] exact log_le_log (by positivity) (mod_cast hN) refine le_trans ?_ this rw [← div_le_iff'] · exact log_two_gt_d9.le.trans' (by norm_num1) · norm_num1 · rw [cast_pos] exact hN.trans_lt' (by norm_num1) · refine div_pos zero_lt_two ?_ rw [sub_pos, div_lt_one (exp_pos _)] exact lt_of_le_of_lt (by norm_num1) exp_one_gt_d9 positivity theorem roth_lower_bound_explicit (hN : 4096 ≤ N) : (N : ℝ) * exp (-4 * √(log N)) < rothNumberNat N := by let n := nValue N have hn : 0 < (n : ℝ) := cast_pos.2 (nValue_pos <| hN.trans' <| by norm_num1) have hd : 0 < dValue N := dValue_pos (hN.trans' <| by norm_num1) have hN₀ : 0 < (N : ℝ) := cast_pos.2 (hN.trans' <| by norm_num1) have hn₂ : 2 < n := three_le_nValue <| hN.trans' <| by norm_num1 have : (2 * dValue N - 1) ^ n ≤ N := le_N (hN.trans' <| by norm_num1) calc _ ≤ (N ^ (nValue N : ℝ)⁻¹ / rexp 1 : ℝ) ^ (n - 2) / n := ?_ _ < _ := by gcongr; exacts [(tsub_pos_of_lt hn₂).ne', bound hN] _ ≤ rothNumberNat ((2 * dValue N - 1) ^ n) := bound_aux hd.ne' hn₂.le _ ≤ rothNumberNat N := mod_cast rothNumberNat.mono this rw [← rpow_natCast, div_rpow (rpow_nonneg hN₀.le _) (exp_pos _).le, ← rpow_mul hN₀.le, inv_mul_eq_div, cast_sub hn₂.le, cast_two, same_sub_div hn.ne', exp_one_rpow, div_div, rpow_sub hN₀, rpow_one, div_div, div_eq_mul_inv] refine mul_le_mul_of_nonneg_left ?_ (cast_nonneg _) rw [mul_inv, mul_inv, ← exp_neg, ← rpow_neg (cast_nonneg _), neg_sub, ← div_eq_mul_inv] have : exp (-4 * √(log N)) = exp (-2 * √(log N)) * exp (-2 * √(log N)) := by rw [← exp_add, ← add_mul] norm_num rw [this] refine mul_le_mul ?_ (exp_neg_two_mul_le <| Real.sqrt_pos.2 <| log_pos ?_).le (exp_pos _).le <| rpow_nonneg (cast_nonneg _) _ · rw [← le_log_iff_exp_le (rpow_pos_of_pos hN₀ _), log_rpow hN₀, ← le_div_iff, mul_div_assoc, div_sqrt, neg_mul, neg_le_neg_iff, div_mul_eq_mul_div, div_le_iff hn] · exact mul_le_mul_of_nonneg_left (le_ceil _) zero_le_two refine Real.sqrt_pos.2 (log_pos ?_) rw [one_lt_cast] exact hN.trans_lt' (by norm_num1) · rw [one_lt_cast] exact hN.trans_lt' (by norm_num1) theorem exp_four_lt : exp 4 < 64 := by rw [show (64 : ℝ) = 2 ^ ((6 : ℕ) : ℝ) by rw [rpow_natCast]; norm_num1, ← lt_log_iff_exp_lt (rpow_pos_of_pos zero_lt_two _), log_rpow zero_lt_two, ← div_lt_iff'] · exact log_two_gt_d9.trans_le' (by norm_num1) · norm_num theorem four_zero_nine_six_lt_exp_sixteen : 4096 < exp 16 := by rw [← log_lt_iff_lt_exp (show (0 : ℝ) < 4096 by norm_num), show (4096 : ℝ) = 2 ^ 12 by norm_cast, ← rpow_natCast, log_rpow zero_lt_two, cast_ofNat] have : 12 * (0.6931471808 : ℝ) < 16 := by norm_num linarith [log_two_lt_d9] theorem lower_bound_le_one' (hN : 2 ≤ N) (hN' : N ≤ 4096) : (N : ℝ) * exp (-4 * √(log N)) ≤ 1 := by rw [← log_le_log_iff (mul_pos (cast_pos.2 (zero_lt_two.trans_le hN)) (exp_pos _)) zero_lt_one, log_one, log_mul (cast_pos.2 (zero_lt_two.trans_le hN)).ne' (exp_pos _).ne', log_exp, neg_mul, ← sub_eq_add_neg, sub_nonpos, ← div_le_iff (Real.sqrt_pos.2 <| log_pos <| one_lt_cast.2 <| one_lt_two.trans_le hN), div_sqrt, sqrt_le_left zero_le_four, log_le_iff_le_exp (cast_pos.2 (zero_lt_two.trans_le hN))] norm_num1 apply le_trans _ four_zero_nine_six_lt_exp_sixteen.le exact mod_cast hN' theorem lower_bound_le_one (hN : 1 ≤ N) (hN' : N ≤ 4096) : (N : ℝ) * exp (-4 * √(log N)) ≤ 1 := by obtain rfl | hN := hN.eq_or_lt · norm_num · exact lower_bound_le_one' hN hN' theorem roth_lower_bound : (N : ℝ) * exp (-4 * √(log N)) ≤ rothNumberNat N := by obtain rfl | hN := Nat.eq_zero_or_pos N · norm_num obtain h₁ | h₁ := le_or_lt 4096 N · exact (roth_lower_bound_explicit h₁).le · apply (lower_bound_le_one hN h₁.le).trans simpa using rothNumberNat.monotone hN end Behrend
Combinatorics\Additive\AP\Three\Defs.lean
/- Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta -/ import Mathlib.Algebra.Order.Interval.Finset import Mathlib.Combinatorics.Additive.FreimanHom import Mathlib.Data.Set.Pointwise.SMul import Mathlib.Order.Interval.Finset.Fin /-! # Sets without arithmetic progressions of length three and Roth numbers This file defines sets without arithmetic progressions of length three, aka 3AP-free sets, and the Roth number of a set. The corresponding notion, sets without geometric progressions of length three, are called 3GP-free sets. The Roth number of a finset is the size of its biggest 3AP-free subset. This is a more general definition than the one often found in mathematical literature, where the `n`-th Roth number is the size of the biggest 3AP-free subset of `{0, ..., n - 1}`. ## Main declarations * `ThreeGPFree`: Predicate for a set to be 3GP-free. * `ThreeAPFree`: Predicate for a set to be 3AP-free. * `mulRothNumber`: The multiplicative Roth number of a finset. * `addRothNumber`: The additive Roth number of a finset. * `rothNumberNat`: The Roth number of a natural, namely `addRothNumber (Finset.range n)`. ## TODO * Can `threeAPFree_iff_eq_right` be made more general? * Generalize `ThreeGPFree.image` to Freiman homs ## References * [Wikipedia, *Salem-Spencer set*](https://en.wikipedia.org/wiki/Salem–Spencer_set) ## Tags 3AP-free, Salem-Spencer, Roth, arithmetic progression, average, three-free -/ open Finset Function Nat open scoped Pointwise variable {F α β 𝕜 E : Type*} section ThreeAPFree open Set section Monoid variable [Monoid α] [Monoid β] (s t : Set α) /-- A set is **3GP-free** if it does not contain any non-trivial geometric progression of length three. -/ @[to_additive "A set is **3AP-free** if it does not contain any non-trivial arithmetic progression of length three. This is also sometimes called a **non averaging set** or **Salem-Spencer set**."] def ThreeGPFree : Prop := ∀ ⦃a⦄, a ∈ s → ∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → a * c = b * b → a = b /-- Whether a given finset is 3GP-free is decidable. -/ @[to_additive "Whether a given finset is 3AP-free is decidable."] instance ThreeGPFree.instDecidable [DecidableEq α] {s : Finset α} : Decidable (ThreeGPFree (s : Set α)) := decidable_of_iff (∀ a ∈ s, ∀ b ∈ s, ∀ c ∈ s, a * c = b * b → a = b) Iff.rfl variable {s t} @[to_additive] theorem ThreeGPFree.mono (h : t ⊆ s) (hs : ThreeGPFree s) : ThreeGPFree t := fun _ ha _ hb _ hc ↦ hs (h ha) (h hb) (h hc) @[to_additive (attr := simp)] theorem threeGPFree_empty : ThreeGPFree (∅ : Set α) := fun _ _ _ ha => ha.elim @[to_additive] theorem Set.Subsingleton.threeGPFree (hs : s.Subsingleton) : ThreeGPFree s := fun _ ha _ hb _ _ _ ↦ hs ha hb @[to_additive (attr := simp)] theorem threeGPFree_singleton (a : α) : ThreeGPFree ({a} : Set α) := subsingleton_singleton.threeGPFree @[to_additive ThreeAPFree.prod] theorem ThreeGPFree.prod {t : Set β} (hs : ThreeGPFree s) (ht : ThreeGPFree t) : ThreeGPFree (s ×ˢ t) := fun _ ha _ hb _ hc h ↦ Prod.ext (hs ha.1 hb.1 hc.1 (Prod.ext_iff.1 h).1) (ht ha.2 hb.2 hc.2 (Prod.ext_iff.1 h).2) @[to_additive] theorem threeGPFree_pi {ι : Type*} {α : ι → Type*} [∀ i, Monoid (α i)] {s : ∀ i, Set (α i)} (hs : ∀ i, ThreeGPFree (s i)) : ThreeGPFree ((univ : Set ι).pi s) := fun _ ha _ hb _ hc h ↦ funext fun i => hs i (ha i trivial) (hb i trivial) (hc i trivial) <| congr_fun h i end Monoid section CommMonoid variable [CommMonoid α] [CommMonoid β] {s A : Set α} {t B : Set β} {f : α → β} {a : α} /-- Arithmetic progressions of length three are preserved under `2`-Freiman homomorphisms. -/ @[to_additive "Arithmetic progressions of length three are preserved under `2`-Freiman homomorphisms."] lemma ThreeGPFree.of_image (hf : IsMulFreimanHom 2 s t f) (hf' : s.InjOn f) (hAs : A ⊆ s) (hA : ThreeGPFree (f '' A)) : ThreeGPFree A := fun _ ha _ hb _ hc habc ↦ hf' (hAs ha) (hAs hb) <| hA (mem_image_of_mem _ ha) (mem_image_of_mem _ hb) (mem_image_of_mem _ hc) <| hf.mul_eq_mul (hAs ha) (hAs hc) (hAs hb) (hAs hb) habc /-- Arithmetic progressions of length three are preserved under `2`-Freiman isomorphisms. -/ @[to_additive "Arithmetic progressions of length three are preserved under `2`-Freiman isomorphisms."] lemma threeGPFree_image (hf : IsMulFreimanIso 2 s t f) (hAs : A ⊆ s) : ThreeGPFree (f '' A) ↔ ThreeGPFree A := by rw [ThreeGPFree, ThreeGPFree] have := (hf.bijOn.injOn.mono hAs).bijOn_image (f := f) simp (config := { contextual := true }) only [((hf.bijOn.injOn.mono hAs).bijOn_image (f := f)).forall, hf.mul_eq_mul (hAs _) (hAs _) (hAs _) (hAs _), this.injOn.eq_iff] @[to_additive] alias ⟨_, ThreeGPFree.image⟩ := threeGPFree_image /-- Arithmetic progressions of length three are preserved under `2`-Freiman homomorphisms. -/ @[to_additive] lemma IsMulFreimanHom.threeGPFree (hf : IsMulFreimanHom 2 s t f) (hf' : s.InjOn f) (ht : ThreeGPFree t) : ThreeGPFree s := fun _ ha _ hb _ hc habc ↦ hf' ha hb <| ht (hf.mapsTo ha) (hf.mapsTo hb) (hf.mapsTo hc) <| hf.mul_eq_mul ha hc hb hb habc /-- Arithmetic progressions of length three are preserved under `2`-Freiman isomorphisms. -/ @[to_additive] lemma IsMulFreimanIso.threeGPFree_congr (hf : IsMulFreimanIso 2 s t f) : ThreeGPFree s ↔ ThreeGPFree t where mpr := hf.isMulFreimanHom.threeGPFree hf.bijOn.injOn mp hs a hfa b hfb c hfc habc := by obtain ⟨a, ha, rfl⟩ := hf.bijOn.surjOn hfa obtain ⟨b, hb, rfl⟩ := hf.bijOn.surjOn hfb obtain ⟨c, hc, rfl⟩ := hf.bijOn.surjOn hfc exact congr_arg f $ hs ha hb hc $ (hf.mul_eq_mul ha hc hb hb).1 habc @[to_additive] theorem ThreeGPFree.image' [FunLike F α β] [MulHomClass F α β] (f : F) (hf : (s * s).InjOn f) (h : ThreeGPFree s) : ThreeGPFree (f '' s) := by rintro _ ⟨a, ha, rfl⟩ _ ⟨b, hb, rfl⟩ _ ⟨c, hc, rfl⟩ habc rw [h ha hb hc (hf (mul_mem_mul ha hc) (mul_mem_mul hb hb) <| by rwa [map_mul, map_mul])] end CommMonoid section CancelCommMonoid variable [CancelCommMonoid α] {s : Set α} {a : α} lemma ThreeGPFree.eq_right (hs : ThreeGPFree s) : ∀ ⦃a⦄, a ∈ s → ∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → a * c = b * b → b = c := by rintro a ha b hb c hc habc obtain rfl := hs ha hb hc habc simpa using habc.symm @[to_additive] lemma threeGPFree_insert : ThreeGPFree (insert a s) ↔ ThreeGPFree s ∧ (∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → a * c = b * b → a = b) ∧ ∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → b * c = a * a → b = a := by refine ⟨fun hs ↦ ⟨hs.mono (subset_insert _ _), fun b hb c hc ↦ hs (Or.inl rfl) (Or.inr hb) (Or.inr hc), fun b hb c hc ↦ hs (Or.inr hb) (Or.inl rfl) (Or.inr hc)⟩, ?_⟩ rintro ⟨hs, ha, ha'⟩ b hb c hc d hd h rw [mem_insert_iff] at hb hc hd obtain rfl | hb := hb <;> obtain rfl | hc := hc · rfl all_goals obtain rfl | hd := hd · exact (ha' hc hc h.symm).symm · exact ha hc hd h · exact mul_right_cancel h · exact ha' hb hd h · obtain rfl := ha hc hb ((mul_comm _ _).trans h) exact ha' hb hc h · exact hs hb hc hd h @[to_additive] theorem ThreeGPFree.smul_set (hs : ThreeGPFree s) : ThreeGPFree (a • s) := by rintro _ ⟨b, hb, rfl⟩ _ ⟨c, hc, rfl⟩ _ ⟨d, hd, rfl⟩ h exact congr_arg (a • ·) $ hs hb hc hd $ by simpa [mul_mul_mul_comm _ _ a] using h @[to_additive] lemma threeGPFree_smul_set : ThreeGPFree (a • s) ↔ ThreeGPFree s where mp hs b hb c hc d hd h := mul_left_cancel (hs (mem_image_of_mem _ hb) (mem_image_of_mem _ hc) (mem_image_of_mem _ hd) <| by rw [mul_mul_mul_comm, smul_eq_mul, smul_eq_mul, mul_mul_mul_comm, h]) mpr := ThreeGPFree.smul_set end CancelCommMonoid section OrderedCancelCommMonoid variable [OrderedCancelCommMonoid α] {s : Set α} {a : α} @[to_additive] theorem threeGPFree_insert_of_lt (hs : ∀ i ∈ s, i < a) : ThreeGPFree (insert a s) ↔ ThreeGPFree s ∧ ∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → a * c = b * b → a = b := by refine threeGPFree_insert.trans ?_ rw [← and_assoc] exact and_iff_left fun b hb c hc h => ((mul_lt_mul_of_lt_of_lt (hs _ hb) (hs _ hc)).ne h).elim end OrderedCancelCommMonoid section CancelCommMonoidWithZero variable [CancelCommMonoidWithZero α] [NoZeroDivisors α] {s : Set α} {a : α} lemma ThreeGPFree.smul_set₀ (hs : ThreeGPFree s) (ha : a ≠ 0) : ThreeGPFree (a • s) := by rintro _ ⟨b, hb, rfl⟩ _ ⟨c, hc, rfl⟩ _ ⟨d, hd, rfl⟩ h exact congr_arg (a • ·) $ hs hb hc hd $ by simpa [mul_mul_mul_comm _ _ a, ha] using h theorem threeGPFree_smul_set₀ (ha : a ≠ 0) : ThreeGPFree (a • s) ↔ ThreeGPFree s := ⟨fun hs b hb c hc d hd h ↦ mul_left_cancel₀ ha (hs (Set.mem_image_of_mem _ hb) (Set.mem_image_of_mem _ hc) (Set.mem_image_of_mem _ hd) <| by rw [smul_eq_mul, smul_eq_mul, mul_mul_mul_comm, h, mul_mul_mul_comm]), fun hs => hs.smul_set₀ ha⟩ end CancelCommMonoidWithZero section Nat theorem threeAPFree_iff_eq_right {s : Set ℕ} : ThreeAPFree s ↔ ∀ ⦃a⦄, a ∈ s → ∀ ⦃b⦄, b ∈ s → ∀ ⦃c⦄, c ∈ s → a + c = b + b → a = c := by refine forall₄_congr fun a _ha b hb => forall₃_congr fun c hc habc => ⟨?_, ?_⟩ · rintro rfl exact (add_left_cancel habc).symm · rintro rfl simp_rw [← two_mul] at habc exact mul_left_cancel₀ two_ne_zero habc end Nat end ThreeAPFree open Finset section RothNumber variable [DecidableEq α] section Monoid variable [Monoid α] [DecidableEq β] [Monoid β] (s t : Finset α) /-- The multiplicative Roth number of a finset is the cardinality of its biggest 3GP-free subset. -/ @[to_additive "The additive Roth number of a finset is the cardinality of its biggest 3AP-free subset. The usual Roth number corresponds to `addRothNumber (Finset.range n)`, see `rothNumberNat`."] def mulRothNumber : Finset α →o ℕ := ⟨fun s ↦ Nat.findGreatest (fun m ↦ ∃ t ⊆ s, t.card = m ∧ ThreeGPFree (t : Set α)) s.card, by rintro t u htu refine Nat.findGreatest_mono (fun m => ?_) (card_le_card htu) rintro ⟨v, hvt, hv⟩ exact ⟨v, hvt.trans htu, hv⟩⟩ @[to_additive] theorem mulRothNumber_le : mulRothNumber s ≤ s.card := Nat.findGreatest_le s.card @[to_additive] theorem mulRothNumber_spec : ∃ t ⊆ s, t.card = mulRothNumber s ∧ ThreeGPFree (t : Set α) := Nat.findGreatest_spec (P := fun m ↦ ∃ t ⊆ s, t.card = m ∧ ThreeGPFree (t : Set α)) (Nat.zero_le _) ⟨∅, empty_subset _, card_empty, by norm_cast; exact threeGPFree_empty⟩ variable {s t} {n : ℕ} @[to_additive] theorem ThreeGPFree.le_mulRothNumber (hs : ThreeGPFree (s : Set α)) (h : s ⊆ t) : s.card ≤ mulRothNumber t := le_findGreatest (card_le_card h) ⟨s, h, rfl, hs⟩ @[to_additive] theorem ThreeGPFree.mulRothNumber_eq (hs : ThreeGPFree (s : Set α)) : mulRothNumber s = s.card := (mulRothNumber_le _).antisymm <| hs.le_mulRothNumber <| Subset.refl _ @[to_additive (attr := simp)] theorem mulRothNumber_empty : mulRothNumber (∅ : Finset α) = 0 := Nat.eq_zero_of_le_zero <| (mulRothNumber_le _).trans card_empty.le @[to_additive (attr := simp)] theorem mulRothNumber_singleton (a : α) : mulRothNumber ({a} : Finset α) = 1 := by refine ThreeGPFree.mulRothNumber_eq ?_ rw [coe_singleton] exact threeGPFree_singleton a @[to_additive] theorem mulRothNumber_union_le (s t : Finset α) : mulRothNumber (s ∪ t) ≤ mulRothNumber s + mulRothNumber t := let ⟨u, hus, hcard, hu⟩ := mulRothNumber_spec (s ∪ t) calc mulRothNumber (s ∪ t) = u.card := hcard.symm _ = (u ∩ s ∪ u ∩ t).card := by rw [← inter_union_distrib_left, inter_eq_left.2 hus] _ ≤ (u ∩ s).card + (u ∩ t).card := card_union_le _ _ _ ≤ mulRothNumber s + mulRothNumber t := _root_.add_le_add ((hu.mono inter_subset_left).le_mulRothNumber inter_subset_right) ((hu.mono inter_subset_left).le_mulRothNumber inter_subset_right) @[to_additive] theorem le_mulRothNumber_product (s : Finset α) (t : Finset β) : mulRothNumber s * mulRothNumber t ≤ mulRothNumber (s ×ˢ t) := by obtain ⟨u, hus, hucard, hu⟩ := mulRothNumber_spec s obtain ⟨v, hvt, hvcard, hv⟩ := mulRothNumber_spec t rw [← hucard, ← hvcard, ← card_product] refine ThreeGPFree.le_mulRothNumber ?_ (product_subset_product hus hvt) rw [coe_product] exact hu.prod hv @[to_additive] theorem mulRothNumber_lt_of_forall_not_threeGPFree (h : ∀ t ∈ powersetCard n s, ¬ThreeGPFree ((t : Finset α) : Set α)) : mulRothNumber s < n := by obtain ⟨t, hts, hcard, ht⟩ := mulRothNumber_spec s rw [← hcard, ← not_le] intro hn obtain ⟨u, hut, rfl⟩ := exists_subset_card_eq hn exact h _ (mem_powersetCard.2 ⟨hut.trans hts, rfl⟩) (ht.mono hut) end Monoid section CommMonoid variable [CommMonoid α] [CommMonoid β] [DecidableEq β] {A : Finset α} {B : Finset β} {f : α → β} /-- Arithmetic progressions can be pushed forward along bijective 2-Freiman homs. -/ @[to_additive "Arithmetic progressions can be pushed forward along bijective 2-Freiman homs."] lemma IsMulFreimanHom.mulRothNumber_mono (hf : IsMulFreimanHom 2 A B f) (hf' : Set.BijOn f A B) : mulRothNumber B ≤ mulRothNumber A := by obtain ⟨s, hsB, hcard, hs⟩ := mulRothNumber_spec B have hsA : invFunOn f A '' s ⊆ A := (hf'.surjOn.mapsTo_invFunOn.mono (coe_subset.2 hsB) Subset.rfl).image_subset have hfsA : Set.SurjOn f A s := hf'.surjOn.mono Subset.rfl (coe_subset.2 hsB) rw [← hcard, ← s.card_image_of_injOn ((invFunOn_injOn_image f _).mono hfsA)] refine ThreeGPFree.le_mulRothNumber ?_ (mod_cast hsA) rw [coe_image] simpa using (hf.subset hsA hfsA.bijOn_subset.mapsTo).threeGPFree (hf'.injOn.mono hsA) hs /-- Arithmetic progressions are preserved under 2-Freiman isos. -/ @[to_additive "Arithmetic progressions are preserved under 2-Freiman isos."] lemma IsMulFreimanIso.mulRothNumber_congr (hf : IsMulFreimanIso 2 A B f) : mulRothNumber A = mulRothNumber B := by refine le_antisymm ?_ (hf.isMulFreimanHom.mulRothNumber_mono hf.bijOn) obtain ⟨s, hsA, hcard, hs⟩ := mulRothNumber_spec A rw [← coe_subset] at hsA have hfs : Set.InjOn f s := hf.bijOn.injOn.mono hsA have := (hf.subset hsA hfs.bijOn_image).threeGPFree_congr.1 hs rw [← coe_image] at this rw [← hcard, ← Finset.card_image_of_injOn hfs] refine this.le_mulRothNumber ?_ rw [← coe_subset, coe_image] exact (hf.bijOn.mapsTo.mono hsA Subset.rfl).image_subset end CommMonoid section CancelCommMonoid variable [CancelCommMonoid α] (s : Finset α) (a : α) @[to_additive (attr := simp)] theorem mulRothNumber_map_mul_left : mulRothNumber (s.map <| mulLeftEmbedding a) = mulRothNumber s := by refine le_antisymm ?_ ?_ · obtain ⟨u, hus, hcard, hu⟩ := mulRothNumber_spec (s.map <| mulLeftEmbedding a) rw [subset_map_iff] at hus obtain ⟨u, hus, rfl⟩ := hus rw [coe_map] at hu rw [← hcard, card_map] exact (threeGPFree_smul_set.1 hu).le_mulRothNumber hus · obtain ⟨u, hus, hcard, hu⟩ := mulRothNumber_spec s have h : ThreeGPFree (u.map <| mulLeftEmbedding a : Set α) := by rw [coe_map]; exact hu.smul_set convert h.le_mulRothNumber (map_subset_map.2 hus) using 1 rw [card_map, hcard] @[to_additive (attr := simp)] theorem mulRothNumber_map_mul_right : mulRothNumber (s.map <| mulRightEmbedding a) = mulRothNumber s := by rw [← mulLeftEmbedding_eq_mulRightEmbedding, mulRothNumber_map_mul_left s a] end CancelCommMonoid end RothNumber section rothNumberNat variable {s : Finset ℕ} {k n : ℕ} /-- The Roth number of a natural `N` is the largest integer `m` for which there is a subset of `range N` of size `m` with no arithmetic progression of length 3. Trivially, `rothNumberNat N ≤ N`, but Roth's theorem (proved in 1953) shows that `rothNumberNat N = o(N)` and the construction by Behrend gives a lower bound of the form `N * exp(-C sqrt(log(N))) ≤ rothNumberNat N`. A significant refinement of Roth's theorem by Bloom and Sisask announced in 2020 gives `rothNumberNat N = O(N / (log N)^(1+c))` for an absolute constant `c`. -/ def rothNumberNat : ℕ →o ℕ := ⟨fun n => addRothNumber (range n), addRothNumber.mono.comp range_mono⟩ theorem rothNumberNat_def (n : ℕ) : rothNumberNat n = addRothNumber (range n) := rfl theorem rothNumberNat_le (N : ℕ) : rothNumberNat N ≤ N := (addRothNumber_le _).trans (card_range _).le theorem rothNumberNat_spec (n : ℕ) : ∃ t ⊆ range n, t.card = rothNumberNat n ∧ ThreeAPFree (t : Set ℕ) := addRothNumber_spec _ /-- A verbose specialization of `threeAPFree.le_addRothNumber`, sometimes convenient in practice. -/ theorem ThreeAPFree.le_rothNumberNat (s : Finset ℕ) (hs : ThreeAPFree (s : Set ℕ)) (hsn : ∀ x ∈ s, x < n) (hsk : s.card = k) : k ≤ rothNumberNat n := hsk.ge.trans <| hs.le_addRothNumber fun x hx => mem_range.2 <| hsn x hx /-- The Roth number is a subadditive function. Note that by Fekete's lemma this shows that the limit `rothNumberNat N / N` exists, but Roth's theorem gives the stronger result that this limit is actually `0`. -/ theorem rothNumberNat_add_le (M N : ℕ) : rothNumberNat (M + N) ≤ rothNumberNat M + rothNumberNat N := by simp_rw [rothNumberNat_def] rw [range_add_eq_union, ← addRothNumber_map_add_left (range N) M] exact addRothNumber_union_le _ _ @[simp] theorem rothNumberNat_zero : rothNumberNat 0 = 0 := rfl theorem addRothNumber_Ico (a b : ℕ) : addRothNumber (Ico a b) = rothNumberNat (b - a) := by obtain h | h := le_total b a · rw [tsub_eq_zero_of_le h, Ico_eq_empty_of_le h, rothNumberNat_zero, addRothNumber_empty] convert addRothNumber_map_add_left _ a rw [range_eq_Ico, map_eq_image] convert (image_add_left_Ico 0 (b - a) _).symm exact (add_tsub_cancel_of_le h).symm lemma Fin.addRothNumber_eq_rothNumberNat (hkn : 2 * k ≤ n) : addRothNumber (Iio k : Finset (Fin n.succ)) = rothNumberNat k := IsAddFreimanIso.addRothNumber_congr $ mod_cast isAddFreimanIso_Iio two_ne_zero hkn lemma Fin.addRothNumber_le_rothNumberNat (k n : ℕ) (hkn : k ≤ n) : addRothNumber (Iio k : Finset (Fin n.succ)) ≤ rothNumberNat k := by suffices h : Set.BijOn (Nat.cast : ℕ → Fin n.succ) (range k) (Iio k : Finset (Fin n.succ)) by exact (AddMonoidHomClass.isAddFreimanHom (Nat.castRingHom _) h.mapsTo).addRothNumber_mono h refine ⟨?_, (CharP.natCast_injOn_Iio _ n.succ).mono (by simp; omega), ?_⟩ · simpa using fun x ↦ natCast_strictMono hkn simp only [Set.SurjOn, coe_Iio, Set.subset_def, Set.mem_Iio, Set.mem_image, lt_iff_val_lt_val, val_cast_of_lt, Nat.lt_succ_iff.2 hkn, coe_range] exact fun x hx ↦ ⟨x, hx, by simp⟩ end rothNumberNat
Combinatorics\Additive\Corner\Defs.lean
/- Copyright (c) 2024 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Combinatorics.Additive.FreimanHom /-! # Corners This file defines corners, namely triples of the form `(x, y), (x, y + d), (x + d, y)`, and the property of being corner-free. ## References * [Yaël Dillies, Bhavik Mehta, *Formalising Szemerédi’s Regularity Lemma in Lean*][srl_itp] * [Wikipedia, *Corners theorem*](https://en.wikipedia.org/wiki/Corners_theorem) -/ open Set variable {G H : Type*} section AddCommMonoid variable [AddCommMonoid G] [AddCommMonoid H] {A B : Set (G × G)} {s : Set G} {t : Set H} {f : G → H} {a b c x₁ y₁ x₂ y₂ : G} /-- A **corner** of a set `A` in an abelian group is a triple of points of the form `(x, y), (x + d, y), (x, y + d)`. It is **nontrivial** if `d ≠ 0`. Here we define it as triples `(x₁, y₁), (x₂, y₁), (x₁, y₂)` where `x₁ + y₂ = x₂ + y₁` in order for the definition to make sense in commutative monoids, the motivating example being `ℕ`. -/ @[mk_iff] structure IsCorner (A : Set (G × G)) (x₁ y₁ x₂ y₂ : G) : Prop where fst_fst_mem : (x₁, y₁) ∈ A fst_snd_mem : (x₁, y₂) ∈ A snd_fst_mem : (x₂, y₁) ∈ A add_eq_add : x₁ + y₂ = x₂ + y₁ /-- A **corner-free set** in an abelian group is a set containing no non-trivial corner. -/ def IsCornerFree (A : Set (G × G)) : Prop := ∀ ⦃x₁ y₁ x₂ y₂⦄, IsCorner A x₁ y₁ x₂ y₂ → x₁ = x₂ /-- A convenient restatement of corner-freeness in terms of an ambient product set. -/ lemma isCornerFree_iff (hAs : A ⊆ s ×ˢ s) : IsCornerFree A ↔ ∀ ⦃x₁⦄, x₁ ∈ s → ∀ ⦃y₁⦄, y₁ ∈ s → ∀ ⦃x₂⦄, x₂ ∈ s → ∀ ⦃y₂⦄, y₂ ∈ s → IsCorner A x₁ y₁ x₂ y₂ → x₁ = x₂ where mp hA _x₁ _ _y₁ _ _x₂ _ _y₂ _ hxy := hA hxy mpr hA _x₁ _y₁ _x₂ _y₂ hxy := hA (hAs hxy.fst_fst_mem).1 (hAs hxy.fst_fst_mem).2 (hAs hxy.snd_fst_mem).1 (hAs hxy.fst_snd_mem).2 hxy lemma IsCorner.mono (hAB : A ⊆ B) (hA : IsCorner A x₁ y₁ x₂ y₂) : IsCorner B x₁ y₁ x₂ y₂ where fst_fst_mem := hAB hA.fst_fst_mem fst_snd_mem := hAB hA.fst_snd_mem snd_fst_mem := hAB hA.snd_fst_mem add_eq_add := hA.add_eq_add lemma IsCornerFree.mono (hAB : A ⊆ B) (hB : IsCornerFree B) : IsCornerFree A := fun _x₁ _y₁ _x₂ _y₂ hxyd ↦ hB $ hxyd.mono hAB @[simp] lemma not_isCorner_empty : ¬ IsCorner ∅ x₁ y₁ x₂ y₂ := by simp [isCorner_iff] @[simp] lemma Set.Subsingleton.isCornerFree (hA : A.Subsingleton) : IsCornerFree A := fun _x₁ _y₁ _x₂ _y₂ hxyd ↦ by simpa using hA hxyd.fst_fst_mem hxyd.snd_fst_mem lemma isCornerFree_empty : IsCornerFree (∅ : Set (G × G)) := subsingleton_empty.isCornerFree lemma isCornerFree_singleton (x : G × G) : IsCornerFree {x} := subsingleton_singleton.isCornerFree /-- Corners are preserved under `2`-Freiman homomorphisms. --/ lemma IsCorner.image (hf : IsAddFreimanHom 2 s t f) (hAs : (A : Set (G × G)) ⊆ s ×ˢ s) (hA : IsCorner A x₁ y₁ x₂ y₂) : IsCorner (Prod.map f f '' A) (f x₁) (f y₁) (f x₂) (f y₂) := by obtain ⟨hx₁y₁, hx₁y₂, hx₂y₁, hxy⟩ := hA exact ⟨mem_image_of_mem _ hx₁y₁, mem_image_of_mem _ hx₁y₂, mem_image_of_mem _ hx₂y₁, hf.add_eq_add (hAs hx₁y₁).1 (hAs hx₁y₂).2 (hAs hx₂y₁).1 (hAs hx₁y₁).2 hxy⟩ /-- Corners are preserved under `2`-Freiman homomorphisms. --/ lemma IsCornerFree.of_image (hf : IsAddFreimanHom 2 s t f) (hf' : s.InjOn f) (hAs : (A : Set (G × G)) ⊆ s ×ˢ s) (hA : IsCornerFree (Prod.map f f '' A)) : IsCornerFree A := fun _x₁ _y₁ _x₂ _y₂ hxy ↦ hf' (hAs hxy.fst_fst_mem).1 (hAs hxy.snd_fst_mem).1 $ hA $ hxy.image hf hAs lemma isCorner_image (hf : IsAddFreimanIso 2 s t f) (hAs : A ⊆ s ×ˢ s) (hx₁ : x₁ ∈ s) (hy₁ : y₁ ∈ s) (hx₂ : x₂ ∈ s) (hy₂ : y₂ ∈ s) : IsCorner (Prod.map f f '' A) (f x₁) (f y₁) (f x₂) (f y₂) ↔ IsCorner A x₁ y₁ x₂ y₂ := by have hf' := hf.bijOn.injOn.prodMap hf.bijOn.injOn rw [isCorner_iff, isCorner_iff] congr! · exact hf'.mem_image_iff hAs (mk_mem_prod hx₁ hy₁) · exact hf'.mem_image_iff hAs (mk_mem_prod hx₁ hy₂) · exact hf'.mem_image_iff hAs (mk_mem_prod hx₂ hy₁) · exact hf.add_eq_add hx₁ hy₂ hx₂ hy₁ lemma isCornerFree_image (hf : IsAddFreimanIso 2 s t f) (hAs : A ⊆ s ×ˢ s) : IsCornerFree (Prod.map f f '' A) ↔ IsCornerFree A := by have : Prod.map f f '' A ⊆ t ×ˢ t := ((hf.bijOn.mapsTo.prodMap hf.bijOn.mapsTo).mono hAs Subset.rfl).image_subset rw [isCornerFree_iff hAs, isCornerFree_iff this] simp (config := { contextual := true }) only [hf.bijOn.forall, isCorner_image hf hAs, hf.bijOn.injOn.eq_iff] alias ⟨IsCorner.of_image, _⟩ := isCorner_image alias ⟨_, IsCornerFree.image⟩ := isCornerFree_image end AddCommMonoid
Combinatorics\Additive\Corner\Roth.lean
/- Copyright (c) 2022 Yaël Dillies, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta -/ import Mathlib.Combinatorics.Additive.AP.Three.Defs import Mathlib.Combinatorics.Additive.Corner.Defs import Mathlib.Combinatorics.SimpleGraph.Triangle.Removal import Mathlib.Combinatorics.SimpleGraph.Triangle.Tripartite /-! # The corners theorem and Roth's theorem This file proves the corners theorem and Roth's theorem on arithmetic progressions of length three. ## References * [Yaël Dillies, Bhavik Mehta, *Formalising Szemerédi’s Regularity Lemma in Lean*][srl_itp] * [Wikipedia, *Corners theorem*](https://en.wikipedia.org/wiki/Corners_theorem) -/ open Finset SimpleGraph TripartiteFromTriangles open Function hiding graph open Fintype (card) variable {G : Type*} [AddCommGroup G] {A B : Finset (G × G)} {a b c d x y : G} {n : ℕ} {ε : ℝ} namespace Corners /-- The triangle indices for the proof of the corners theorem construction. -/ private def triangleIndices (A : Finset (G × G)) : Finset (G × G × G) := A.map ⟨fun (a, b) ↦ (a, b, a + b), by rintro ⟨x₁, x₂⟩ ⟨y₁, y₂⟩ ⟨⟩; rfl⟩ @[simp] private lemma mk_mem_triangleIndices : (a, b, c) ∈ triangleIndices A ↔ (a, b) ∈ A ∧ c = a + b := by simp only [triangleIndices, Prod.ext_iff, mem_map, Embedding.coeFn_mk, exists_prop, Prod.exists, eq_comm] refine ⟨?_, fun h ↦ ⟨_, _, h.1, rfl, rfl, h.2⟩⟩ rintro ⟨_, _, h₁, rfl, rfl, h₂⟩ exact ⟨h₁, h₂⟩ @[simp] private lemma card_triangleIndices : (triangleIndices A).card = A.card := card_map _ private instance triangleIndices.instExplicitDisjoint : ExplicitDisjoint (triangleIndices A) := by constructor all_goals simp only [mk_mem_triangleIndices, Prod.mk.inj_iff, exists_prop, forall_exists_index, and_imp] rintro a b _ a' - rfl - h' simp [Fin.val_eq_val, *] at * <;> assumption private lemma noAccidental (hs : IsCornerFree (A : Set (G × G))) : NoAccidental (triangleIndices A) where eq_or_eq_or_eq a a' b b' c c' ha hb hc := by simp only [mk_mem_triangleIndices] at ha hb hc exact .inl $ hs ⟨hc.1, hb.1, ha.1, hb.2.symm.trans ha.2⟩ private lemma farFromTriangleFree_graph [Fintype G] [DecidableEq G] (hε : ε * card G ^ 2 ≤ A.card) : (graph <| triangleIndices A).FarFromTriangleFree (ε / 9) := by refine farFromTriangleFree _ ?_ simp_rw [card_triangleIndices, mul_comm_div, Nat.cast_pow, Nat.cast_add] ring_nf simpa only [mul_comm] using hε end Corners variable [Fintype G] open Corners /-- An explicit form for the constant in the corners theorem. Note that this depends on `SzemerediRegularity.bound`, which is a tower-type exponential. This means `cornersTheoremBound` is in practice absolutely tiny. -/ noncomputable def cornersTheoremBound (ε : ℝ) : ℕ := ⌊(triangleRemovalBound (ε / 9) * 27)⁻¹⌋₊ + 1 /-- The **corners theorem** for finite abelian groups. The maximum density of a corner-free set in `G × G` goes to zero as `|G|` tends to infinity. -/ theorem corners_theorem (ε : ℝ) (hε : 0 < ε) (hG : cornersTheoremBound ε ≤ card G) (A : Finset (G × G)) (hAε : ε * card G ^ 2 ≤ A.card) : ¬ IsCornerFree (A : Set (G × G)) := by rintro hA rw [cornersTheoremBound, Nat.add_one_le_iff] at hG have hε₁ : ε ≤ 1 := by have := hAε.trans (Nat.cast_le.2 A.card_le_univ) simp only [sq, Nat.cast_mul, Fintype.card_prod, Fintype.card_fin] at this rwa [mul_le_iff_le_one_left] at this positivity have := noAccidental hA rw [Nat.floor_lt' (by positivity), inv_pos_lt_iff_one_lt_mul'] at hG swap · have : ε / 9 ≤ 1 := by linarith positivity refine hG.not_le (le_of_mul_le_mul_right ?_ (by positivity : (0 : ℝ) < card G ^ 2)) classical have h₁ := (farFromTriangleFree_graph hAε).le_card_cliqueFinset rw [card_triangles, card_triangleIndices] at h₁ convert h₁.trans (Nat.cast_le.2 $ card_le_univ _) using 1 <;> simp <;> ring /-- The **corners theorem** for `ℕ`. The maximum density of a corner-free set in `{1, ..., n} × {1, ..., n}` goes to zero as `n` tends to infinity. -/ theorem corners_theorem_nat (hε : 0 < ε) (hn : cornersTheoremBound (ε / 9) ≤ n) (A : Finset (ℕ × ℕ)) (hAn : A ⊆ range n ×ˢ range n) (hAε : ε * n ^ 2 ≤ A.card) : ¬ IsCornerFree (A : Set (ℕ × ℕ)) := by rintro hA rw [← coe_subset, coe_product] at hAn have : A = Prod.map Fin.val Fin.val '' (Prod.map Nat.cast Nat.cast '' A : Set (Fin (2 * n).succ × Fin (2 * n).succ)) := by rw [Set.image_image, Set.image_congr, Set.image_id] simp only [mem_coe, Nat.succ_eq_add_one, Prod.map_apply, Fin.val_natCast, id_eq, Prod.forall, Prod.mk.injEq, Nat.mod_succ_eq_iff_lt] rintro a b hab have := hAn hab simp at this omega rw [this] at hA have := Fin.isAddFreimanIso_Iio two_ne_zero (le_refl (2 * n)) have := hA.of_image this.isAddFreimanHom Fin.val_injective.injOn $ by refine Set.image_subset_iff.2 $ hAn.trans fun x hx ↦ ?_ simp only [coe_range, Set.mem_prod, Set.mem_Iio] at hx exact ⟨Fin.natCast_strictMono (by omega) hx.1, Fin.natCast_strictMono (by omega) hx.2⟩ rw [← coe_image] at this refine corners_theorem (ε / 9) (by positivity) (by simp; omega) _ ?_ this calc _ = ε / 9 * (2 * n + 1) ^ 2 := by simp _ ≤ ε / 9 * (2 * n + n) ^ 2 := by gcongr; simp; unfold cornersTheoremBound at hn; omega _ = ε * n ^ 2 := by ring _ ≤ A.card := hAε _ = _ := by rw [card_image_of_injOn] have : Set.InjOn Nat.cast (range n) := (CharP.natCast_injOn_Iio (Fin (2 * n).succ) (2 * n).succ).mono (by simp; omega) exact (this.prodMap this).mono hAn /-- **Roth's theorem** for finite abelian groups. The maximum density of a 3AP-free set in `G` goes to zero as `|G|` tends to infinity. -/ theorem roth_3ap_theorem (ε : ℝ) (hε : 0 < ε) (hG : cornersTheoremBound ε ≤ card G) (A : Finset G) (hAε : ε * card G ≤ A.card) : ¬ ThreeAPFree (A : Set G) := by rintro hA classical let B : Finset (G × G) := univ.filter fun (x, y) ↦ y - x ∈ A have : ε * card G ^ 2 ≤ B.card := by calc _ = card G * (ε * card G) := by ring _ ≤ card G * A.card := by gcongr _ = B.card := ?_ norm_cast rw [← card_univ, ← card_product] exact card_equiv ((Equiv.refl _).prodShear fun a ↦ Equiv.addLeft a) (by simp [B]) obtain ⟨x₁, y₁, x₂, y₂, hx₁y₁, hx₁y₂, hx₂y₁, hxy, hx₁x₂⟩ : ∃ x₁ y₁ x₂ y₂, y₁ - x₁ ∈ A ∧ y₂ - x₁ ∈ A ∧ y₁ - x₂ ∈ A ∧ x₁ + y₂ = x₂ + y₁ ∧ x₁ ≠ x₂ := by simpa [IsCornerFree, isCorner_iff, B, -exists_and_left, -exists_and_right] using corners_theorem ε hε hG B this have := hA hx₂y₁ hx₁y₁ hx₁y₂ $ by -- TODO: This really ought to just be `by linear_combination h` rw [sub_add_sub_comm, add_comm, add_sub_add_comm, add_right_cancel_iff, sub_eq_sub_iff_add_eq_add, add_comm, hxy, add_comm] exact hx₁x₂ $ by simpa using this.symm /-- **Roth's theorem** for `ℕ`. The maximum density of a 3AP-free set in `{1, ..., n}` goes to zero as `n` tends to infinity. -/ theorem roth_3ap_theorem_nat (ε : ℝ) (hε : 0 < ε) (hG : cornersTheoremBound (ε / 3) ≤ n) (A : Finset ℕ) (hAn : A ⊆ range n) (hAε : ε * n ≤ A.card) : ¬ ThreeAPFree (A : Set ℕ) := by rintro hA rw [← coe_subset, coe_range] at hAn have : A = Fin.val '' (Nat.cast '' A : Set (Fin (2 * n).succ)) := by rw [Set.image_image, Set.image_congr, Set.image_id] simp only [mem_coe, Nat.succ_eq_add_one, Fin.val_natCast, id_eq, Nat.mod_succ_eq_iff_lt] rintro a ha have := hAn ha simp at this omega rw [this] at hA have := Fin.isAddFreimanIso_Iio two_ne_zero (le_refl (2 * n)) have := hA.of_image this.isAddFreimanHom Fin.val_injective.injOn $ Set.image_subset_iff.2 $ hAn.trans fun x hx ↦ Fin.natCast_strictMono (by omega) $ by simpa only [coe_range, Set.mem_Iio] using hx rw [← coe_image] at this refine roth_3ap_theorem (ε / 3) (by positivity) (by simp; omega) _ ?_ this calc _ = ε / 3 * (2 * n + 1) := by simp _ ≤ ε / 3 * (2 * n + n) := by gcongr; simp; unfold cornersTheoremBound at hG; omega _ = ε * n := by ring _ ≤ A.card := hAε _ = _ := by rw [card_image_of_injOn] exact (CharP.natCast_injOn_Iio (Fin (2 * n).succ) (2 * n).succ).mono $ hAn.trans $ by simp; omega open Asymptotics Filter /-- **Roth's theorem** for `ℕ` as an asymptotic statement. The maximum density of a 3AP-free set in `{1, ..., n}` goes to zero as `n` tends to infinity. -/ theorem rothNumberNat_isLittleO_id : IsLittleO atTop (fun N ↦ (rothNumberNat N : ℝ)) (fun N ↦ (N : ℝ)) := by simp only [isLittleO_iff, eventually_atTop, RCLike.norm_natCast] refine fun ε hε ↦ ⟨cornersTheoremBound (ε / 3), fun n hn ↦ ?_⟩ obtain ⟨A, hs₁, hs₂, hs₃⟩ := rothNumberNat_spec n rw [← hs₂, ← not_lt] exact fun hδn ↦ roth_3ap_theorem_nat ε hε hn _ hs₁ hδn.le hs₃
Combinatorics\Derangements\Basic.lean
/- Copyright (c) 2021 Henry Swanson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Henry Swanson -/ import Mathlib.Dynamics.FixedPoints.Basic import Mathlib.GroupTheory.Perm.Option import Mathlib.Logic.Equiv.Defs import Mathlib.Logic.Equiv.Option /-! # Derangements on types In this file we define `derangements α`, the set of derangements on a type `α`. We also define some equivalences involving various subtypes of `Perm α` and `derangements α`: * `derangementsOptionEquivSigmaAtMostOneFixedPoint`: An equivalence between `derangements (Option α)` and the sigma-type `Σ a : α, {f : Perm α // fixed_points f ⊆ a}`. * `derangementsRecursionEquiv`: An equivalence between `derangements (Option α)` and the sigma-type `Σ a : α, (derangements (({a}ᶜ : Set α) : Type*) ⊕ derangements α)` which is later used to inductively count the number of derangements. In order to prove the above, we also prove some results about the effect of `Equiv.removeNone` on derangements: `RemoveNone.fiber_none` and `RemoveNone.fiber_some`. -/ open Equiv Function /-- A permutation is a derangement if it has no fixed points. -/ def derangements (α : Type*) : Set (Perm α) := { f : Perm α | ∀ x : α, f x ≠ x } variable {α β : Type*} theorem mem_derangements_iff_fixedPoints_eq_empty {f : Perm α} : f ∈ derangements α ↔ fixedPoints f = ∅ := Set.eq_empty_iff_forall_not_mem.symm /-- If `α` is equivalent to `β`, then `derangements α` is equivalent to `derangements β`. -/ def Equiv.derangementsCongr (e : α ≃ β) : derangements α ≃ derangements β := e.permCongr.subtypeEquiv fun {f} => e.forall_congr <| by intro b; simp only [ne_eq, permCongr_apply, symm_apply_apply, EmbeddingLike.apply_eq_iff_eq] namespace derangements /-- Derangements on a subtype are equivalent to permutations on the original type where points are fixed iff they are not in the subtype. -/ protected def subtypeEquiv (p : α → Prop) [DecidablePred p] : derangements (Subtype p) ≃ { f : Perm α // ∀ a, ¬p a ↔ a ∈ fixedPoints f } := calc derangements (Subtype p) ≃ { f : { f : Perm α // ∀ a, ¬p a → a ∈ fixedPoints f } // ∀ a, a ∈ fixedPoints f → ¬p a } := by refine (Perm.subtypeEquivSubtypePerm p).subtypeEquiv fun f => ⟨fun hf a hfa ha => ?_, ?_⟩ · refine hf ⟨a, ha⟩ (Subtype.ext ?_) simp_rw [mem_fixedPoints, IsFixedPt, Perm.subtypeEquivSubtypePerm, Equiv.coe_fn_mk, Perm.ofSubtype_apply_of_mem _ ha] at hfa assumption rintro hf ⟨a, ha⟩ hfa refine hf _ ?_ ha simp only [Perm.subtypeEquivSubtypePerm_apply_coe, mem_fixedPoints] dsimp [IsFixedPt] simp_rw [Perm.ofSubtype_apply_of_mem _ ha, hfa] _ ≃ { f : Perm α // ∃ _h : ∀ a, ¬p a → a ∈ fixedPoints f, ∀ a, a ∈ fixedPoints f → ¬p a } := subtypeSubtypeEquivSubtypeExists _ _ _ ≃ { f : Perm α // ∀ a, ¬p a ↔ a ∈ fixedPoints f } := subtypeEquivRight fun f => by simp_rw [exists_prop, ← forall_and, ← iff_iff_implies_and_implies] universe u /-- The set of permutations that fix either `a` or nothing is equivalent to the sum of: - derangements on `α` - derangements on `α` minus `a`. -/ def atMostOneFixedPointEquivSum_derangements [DecidableEq α] (a : α) : { f : Perm α // fixedPoints f ⊆ {a} } ≃ (derangements ({a}ᶜ : Set α)) ⊕ (derangements α) := calc { f : Perm α // fixedPoints f ⊆ {a} } ≃ { f : { f : Perm α // fixedPoints f ⊆ {a} } // a ∈ fixedPoints f } ⊕ { f : { f : Perm α // fixedPoints f ⊆ {a} } // a ∉ fixedPoints f } := (Equiv.sumCompl _).symm _ ≃ { f : Perm α // fixedPoints f ⊆ {a} ∧ a ∈ fixedPoints f } ⊕ { f : Perm α // fixedPoints f ⊆ {a} ∧ a ∉ fixedPoints f } := by -- Porting note: `subtypeSubtypeEquivSubtypeInter` no longer works with placeholder `_`s. refine Equiv.sumCongr ?_ ?_ · exact subtypeSubtypeEquivSubtypeInter (fun x : Perm α => fixedPoints x ⊆ {a}) (a ∈ fixedPoints ·) · exact subtypeSubtypeEquivSubtypeInter (fun x : Perm α => fixedPoints x ⊆ {a}) (¬a ∈ fixedPoints ·) _ ≃ { f : Perm α // fixedPoints f = {a} } ⊕ { f : Perm α // fixedPoints f = ∅ } := by refine Equiv.sumCongr (subtypeEquivRight fun f => ?_) (subtypeEquivRight fun f => ?_) · rw [Set.eq_singleton_iff_unique_mem, and_comm] rfl · rw [Set.eq_empty_iff_forall_not_mem] exact ⟨fun h x hx => h.2 (h.1 hx ▸ hx), fun h => ⟨fun x hx => (h _ hx).elim, h _⟩⟩ _ ≃ derangements ({a}ᶜ : Set α) ⊕ derangements α := by -- Porting note: was `subtypeEquiv _` but now needs the placeholder to be provided explicitly refine Equiv.sumCongr ((derangements.subtypeEquiv (· ∈ ({a}ᶜ : Set α))).trans <| subtypeEquivRight fun x => ?_).symm (subtypeEquivRight fun f => mem_derangements_iff_fixedPoints_eq_empty.symm) rw [eq_comm, Set.ext_iff] simp_rw [Set.mem_compl_iff, Classical.not_not] namespace Equiv variable [DecidableEq α] /-- The set of permutations `f` such that the preimage of `(a, f)` under `Equiv.Perm.decomposeOption` is a derangement. -/ def RemoveNone.fiber (a : Option α) : Set (Perm α) := { f : Perm α | (a, f) ∈ Equiv.Perm.decomposeOption '' derangements (Option α) } theorem RemoveNone.mem_fiber (a : Option α) (f : Perm α) : f ∈ RemoveNone.fiber a ↔ ∃ F : Perm (Option α), F ∈ derangements (Option α) ∧ F none = a ∧ removeNone F = f := by simp [RemoveNone.fiber, derangements] theorem RemoveNone.fiber_none : RemoveNone.fiber (@none α) = ∅ := by rw [Set.eq_empty_iff_forall_not_mem] intro f hyp rw [RemoveNone.mem_fiber] at hyp rcases hyp with ⟨F, F_derangement, F_none, _⟩ exact F_derangement none F_none /-- For any `a : α`, the fiber over `some a` is the set of permutations where `a` is the only possible fixed point. -/ theorem RemoveNone.fiber_some (a : α) : RemoveNone.fiber (some a) = { f : Perm α | fixedPoints f ⊆ {a} } := by ext f constructor · rw [RemoveNone.mem_fiber] rintro ⟨F, F_derangement, F_none, rfl⟩ x x_fixed rw [mem_fixedPoints_iff] at x_fixed apply_fun some at x_fixed cases' Fx : F (some x) with y · rwa [removeNone_none F Fx, F_none, Option.some_inj, eq_comm] at x_fixed · exfalso rw [removeNone_some F ⟨y, Fx⟩] at x_fixed exact F_derangement _ x_fixed · intro h_opfp use Equiv.Perm.decomposeOption.symm (some a, f) constructor · intro x apply_fun fun x => Equiv.swap none (some a) x simp only [Perm.decomposeOption_symm_apply, swap_apply_self, Perm.coe_mul] cases' x with x · simp simp only [comp, optionCongr_apply, Option.map_some', swap_apply_self] by_cases x_vs_a : x = a · rw [x_vs_a, swap_apply_right] apply Option.some_ne_none have ne_1 : some x ≠ none := Option.some_ne_none _ have ne_2 : some x ≠ some a := (Option.some_injective α).ne_iff.mpr x_vs_a rw [swap_apply_of_ne_of_ne ne_1 ne_2, (Option.some_injective α).ne_iff] intro contra exact x_vs_a (h_opfp contra) · rw [apply_symm_apply] end Equiv section Option variable [DecidableEq α] /-- The set of derangements on `Option α` is equivalent to the union over `a : α` of "permutations with `a` the only possible fixed point". -/ def derangementsOptionEquivSigmaAtMostOneFixedPoint : derangements (Option α) ≃ Σa : α, { f : Perm α | fixedPoints f ⊆ {a} } := by have fiber_none_is_false : Equiv.RemoveNone.fiber (@none α) → False := by rw [Equiv.RemoveNone.fiber_none] exact IsEmpty.false calc derangements (Option α) ≃ Equiv.Perm.decomposeOption '' derangements (Option α) := Equiv.image _ _ _ ≃ Σa : Option α, ↥(Equiv.RemoveNone.fiber a) := setProdEquivSigma _ _ ≃ Σa : α, ↥(Equiv.RemoveNone.fiber (some a)) := sigmaOptionEquivOfSome _ fiber_none_is_false _ ≃ Σa : α, { f : Perm α | fixedPoints f ⊆ {a} } := by simp_rw [Equiv.RemoveNone.fiber_some] rfl /-- The set of derangements on `Option α` is equivalent to the union over all `a : α` of "derangements on `α` ⊕ derangements on `{a}ᶜ`". -/ def derangementsRecursionEquiv : derangements (Option α) ≃ Σa : α, derangements (({a}ᶜ : Set α) : Type _) ⊕ derangements α := derangementsOptionEquivSigmaAtMostOneFixedPoint.trans (sigmaCongrRight atMostOneFixedPointEquivSum_derangements) end Option end derangements
Combinatorics\Derangements\Exponential.lean
/- Copyright (c) 2021 Henry Swanson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Henry Swanson, Patrick Massot -/ import Mathlib.Analysis.SpecialFunctions.Exponential import Mathlib.Combinatorics.Derangements.Finite import Mathlib.Order.Filter.Basic /-! # Derangement exponential series This file proves that the probability of a permutation on n elements being a derangement is 1/e. The specific lemma is `numDerangements_tendsto_inv_e`. -/ open Filter NormedSpace open scoped Topology theorem numDerangements_tendsto_inv_e : Tendsto (fun n => (numDerangements n : ℝ) / n.factorial) atTop (𝓝 (Real.exp (-1))) := by -- we show that d(n)/n! is the partial sum of exp(-1), but offset by 1. -- this isn't entirely obvious, since we have to ensure that asc_factorial and -- factorial interact in the right way, e.g., that k ≤ n always let s : ℕ → ℝ := fun n => ∑ k ∈ Finset.range n, (-1 : ℝ) ^ k / k.factorial suffices ∀ n : ℕ, (numDerangements n : ℝ) / n.factorial = s (n + 1) by simp_rw [this] -- shift the function by 1, and then use the fact that the partial sums -- converge to the infinite sum rw [tendsto_add_atTop_iff_nat (f := fun n => ∑ k ∈ Finset.range n, (-1 : ℝ) ^ k / k.factorial) 1] apply HasSum.tendsto_sum_nat -- there's no specific lemma for ℝ that ∑ x^k/k! sums to exp(x), but it's -- true in more general fields, so use that lemma rw [Real.exp_eq_exp_ℝ] exact expSeries_div_hasSum_exp ℝ (-1 : ℝ) intro n rw [← Int.cast_natCast, numDerangements_sum] push_cast rw [Finset.sum_div] -- get down to individual terms refine Finset.sum_congr (refl _) ?_ intro k hk have h_le : k ≤ n := Finset.mem_range_succ_iff.mp hk rw [Nat.ascFactorial_eq_div, add_tsub_cancel_of_le h_le] push_cast [Nat.factorial_dvd_factorial h_le] field_simp [Nat.factorial_ne_zero] ring
Combinatorics\Derangements\Finite.lean
/- Copyright (c) 2021 Henry Swanson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Henry Swanson -/ import Mathlib.Algebra.BigOperators.Ring import Mathlib.Combinatorics.Derangements.Basic import Mathlib.Data.Fintype.BigOperators import Mathlib.Tactic.Ring /-! # Derangements on fintypes This file contains lemmas that describe the cardinality of `derangements α` when `α` is a fintype. # Main definitions * `card_derangements_invariant`: A lemma stating that the number of derangements on a type `α` depends only on the cardinality of `α`. * `numDerangements n`: The number of derangements on an n-element set, defined in a computation- friendly way. * `card_derangements_eq_numDerangements`: Proof that `numDerangements` really does compute the number of derangements. * `numDerangements_sum`: A lemma giving an expression for `numDerangements n` in terms of factorials. -/ open derangements Equiv Fintype variable {α : Type*} [DecidableEq α] [Fintype α] instance : DecidablePred (derangements α) := fun _ => Fintype.decidableForallFintype -- Porting note: used to use the tactic delta_instance instance : Fintype (derangements α) := Subtype.fintype (fun (_ : Perm α) => ∀ (x_1 : α), ¬_ = x_1) theorem card_derangements_invariant {α β : Type*} [Fintype α] [DecidableEq α] [Fintype β] [DecidableEq β] (h : card α = card β) : card (derangements α) = card (derangements β) := Fintype.card_congr (Equiv.derangementsCongr <| equivOfCardEq h) theorem card_derangements_fin_add_two (n : ℕ) : card (derangements (Fin (n + 2))) = (n + 1) * card (derangements (Fin n)) + (n + 1) * card (derangements (Fin (n + 1))) := by -- get some basic results about the size of fin (n+1) plus or minus an element have h1 : ∀ a : Fin (n + 1), card ({a}ᶜ : Set (Fin (n + 1))) = card (Fin n) := by intro a simp only [Fintype.card_fin, Finset.card_fin, Fintype.card_ofFinset, Finset.filter_ne' _ a, Set.mem_compl_singleton_iff, Finset.card_erase_of_mem (Finset.mem_univ a), add_tsub_cancel_right] have h2 : card (Fin (n + 2)) = card (Option (Fin (n + 1))) := by simp only [card_fin, card_option] -- rewrite the LHS and substitute in our fintype-level equivalence simp only [card_derangements_invariant h2, card_congr (@derangementsRecursionEquiv (Fin (n + 1)) _),-- push the cardinality through the Σ and ⊕ so that we can use `card_n` card_sigma, card_sum, card_derangements_invariant (h1 _), Finset.sum_const, nsmul_eq_mul, Finset.card_fin, mul_add, Nat.cast_id] /-- The number of derangements of an `n`-element set. -/ def numDerangements : ℕ → ℕ | 0 => 1 | 1 => 0 | n + 2 => (n + 1) * (numDerangements n + numDerangements (n + 1)) @[simp] theorem numDerangements_zero : numDerangements 0 = 1 := rfl @[simp] theorem numDerangements_one : numDerangements 1 = 0 := rfl theorem numDerangements_add_two (n : ℕ) : numDerangements (n + 2) = (n + 1) * (numDerangements n + numDerangements (n + 1)) := rfl theorem numDerangements_succ (n : ℕ) : (numDerangements (n + 1) : ℤ) = (n + 1) * (numDerangements n : ℤ) - (-1) ^ n := by induction' n with n hn · rfl · simp only [numDerangements_add_two, hn, pow_succ, Int.ofNat_mul, Int.ofNat_add, Int.ofNat_succ] ring theorem card_derangements_fin_eq_numDerangements {n : ℕ} : card (derangements (Fin n)) = numDerangements n := by induction' n using Nat.strong_induction_on with n hyp rcases n with _ | _ | n -- knock out cases 0 and 1 · rfl · rfl -- now we have n ≥ 2. rewrite everything in terms of card_derangements, so that we can use -- `card_derangements_fin_add_two` rw [numDerangements_add_two, card_derangements_fin_add_two, mul_add, hyp, hyp] <;> omega theorem card_derangements_eq_numDerangements (α : Type*) [Fintype α] [DecidableEq α] : card (derangements α) = numDerangements (card α) := by rw [← card_derangements_invariant (card_fin _)] exact card_derangements_fin_eq_numDerangements theorem numDerangements_sum (n : ℕ) : (numDerangements n : ℤ) = ∑ k ∈ Finset.range (n + 1), (-1 : ℤ) ^ k * Nat.ascFactorial (k + 1) (n - k) := by induction' n with n hn; · rfl rw [Finset.sum_range_succ, numDerangements_succ, hn, Finset.mul_sum, tsub_self, Nat.ascFactorial_zero, Int.ofNat_one, mul_one, pow_succ', neg_one_mul, sub_eq_add_neg, add_left_inj, Finset.sum_congr rfl] -- show that (n + 1) * (-1)^x * asc_fac x (n - x) = (-1)^x * asc_fac x (n.succ - x) intro x hx have h_le : x ≤ n := Finset.mem_range_succ_iff.mp hx rw [Nat.succ_sub h_le, Nat.ascFactorial_succ, add_right_comm, add_tsub_cancel_of_le h_le, Int.ofNat_mul, Int.ofNat_add, mul_left_comm, Nat.cast_one]
Combinatorics\Enumerative\Catalan.lean
/- Copyright (c) 2022 Julian Kuelshammer. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Julian Kuelshammer -/ import Mathlib.Algebra.BigOperators.Fin import Mathlib.Algebra.BigOperators.NatAntidiagonal import Mathlib.Algebra.CharZero.Lemmas import Mathlib.Data.Finset.NatAntidiagonal import Mathlib.Data.Nat.Choose.Central import Mathlib.Data.Tree.Basic import Mathlib.Tactic.FieldSimp import Mathlib.Tactic.GCongr import Mathlib.Tactic.Positivity /-! # Catalan numbers The Catalan numbers (http://oeis.org/A000108) are probably the most ubiquitous sequence of integers in mathematics. They enumerate several important objects like binary trees, Dyck paths, and triangulations of convex polygons. ## Main definitions * `catalan n`: the `n`th Catalan number, defined recursively as `catalan (n + 1) = ∑ i : Fin n.succ, catalan i * catalan (n - i)`. ## Main results * `catalan_eq_centralBinom_div`: The explicit formula for the Catalan number using the central binomial coefficient, `catalan n = Nat.centralBinom n / (n + 1)`. * `treesOfNodesEq_card_eq_catalan`: The number of binary trees with `n` internal nodes is `catalan n` ## Implementation details The proof of `catalan_eq_centralBinom_div` follows https://math.stackexchange.com/questions/3304415 ## TODO * Prove that the Catalan numbers enumerate many interesting objects. * Provide the many variants of Catalan numbers, e.g. associated to complex reflection groups, Fuss-Catalan, etc. -/ open Finset open Finset.antidiagonal (fst_le snd_le) /-- The recursive definition of the sequence of Catalan numbers: `catalan (n + 1) = ∑ i : Fin n.succ, catalan i * catalan (n - i)` -/ def catalan : ℕ → ℕ | 0 => 1 | n + 1 => ∑ i : Fin n.succ, catalan i * catalan (n - i) @[simp] theorem catalan_zero : catalan 0 = 1 := by rw [catalan] theorem catalan_succ (n : ℕ) : catalan (n + 1) = ∑ i : Fin n.succ, catalan i * catalan (n - i) := by rw [catalan] theorem catalan_succ' (n : ℕ) : catalan (n + 1) = ∑ ij ∈ antidiagonal n, catalan ij.1 * catalan ij.2 := by rw [catalan_succ, Nat.sum_antidiagonal_eq_sum_range_succ (fun x y => catalan x * catalan y) n, sum_range] @[simp] theorem catalan_one : catalan 1 = 1 := by simp [catalan_succ] /-- A helper sequence that can be used to prove the equality of the recursive and the explicit definition using a telescoping sum argument. -/ private def gosperCatalan (n j : ℕ) : ℚ := Nat.centralBinom j * Nat.centralBinom (n - j) * (2 * j - n) / (2 * n * (n + 1)) private theorem gosper_trick {n i : ℕ} (h : i ≤ n) : gosperCatalan (n + 1) (i + 1) - gosperCatalan (n + 1) i = Nat.centralBinom i / (i + 1) * Nat.centralBinom (n - i) / (n - i + 1) := by have l₁ : (i : ℚ) + 1 ≠ 0 := by norm_cast have l₂ : (n : ℚ) - i + 1 ≠ 0 := by norm_cast have h₁ := (mul_div_cancel_left₀ (↑(Nat.centralBinom (i + 1))) l₁).symm have h₂ := (mul_div_cancel_left₀ (↑(Nat.centralBinom (n - i + 1))) l₂).symm have h₃ : ((i : ℚ) + 1) * (i + 1).centralBinom = 2 * (2 * i + 1) * i.centralBinom := mod_cast Nat.succ_mul_centralBinom_succ i have h₄ : ((n : ℚ) - i + 1) * (n - i + 1).centralBinom = 2 * (2 * (n - i) + 1) * (n - i).centralBinom := mod_cast Nat.succ_mul_centralBinom_succ (n - i) simp only [gosperCatalan] push_cast rw [show n + 1 - i = n - i + 1 by rw [Nat.add_comm (n - i) 1, ← (Nat.add_sub_assoc h 1), add_comm]] rw [h₁, h₂, h₃, h₄] field_simp ring private theorem gosper_catalan_sub_eq_central_binom_div (n : ℕ) : gosperCatalan (n + 1) (n + 1) - gosperCatalan (n + 1) 0 = Nat.centralBinom (n + 1) / (n + 2) := by have : (n : ℚ) + 1 ≠ 0 := by norm_cast have : (n : ℚ) + 1 + 1 ≠ 0 := by norm_cast have h : (n : ℚ) + 2 ≠ 0 := by norm_cast simp only [gosperCatalan, Nat.sub_zero, Nat.centralBinom_zero, Nat.sub_self] field_simp ring theorem catalan_eq_centralBinom_div (n : ℕ) : catalan n = n.centralBinom / (n + 1) := by suffices (catalan n : ℚ) = Nat.centralBinom n / (n + 1) by have h := Nat.succ_dvd_centralBinom n exact mod_cast this induction' n using Nat.case_strong_induction_on with d hd · simp · simp_rw [catalan_succ, Nat.cast_sum, Nat.cast_mul] trans (∑ i : Fin d.succ, Nat.centralBinom i / (i + 1) * (Nat.centralBinom (d - i) / (d - i + 1)) : ℚ) · congr ext1 x have m_le_d : x.val ≤ d := by apply Nat.le_of_lt_succ; apply x.2 have d_minus_x_le_d : (d - x.val) ≤ d := tsub_le_self rw [hd _ m_le_d, hd _ d_minus_x_le_d] norm_cast · trans (∑ i : Fin d.succ, (gosperCatalan (d + 1) (i + 1) - gosperCatalan (d + 1) i)) · refine sum_congr rfl fun i _ => ?_ rw [gosper_trick i.is_le, mul_div] · rw [← sum_range fun i => gosperCatalan (d + 1) (i + 1) - gosperCatalan (d + 1) i, sum_range_sub, Nat.succ_eq_add_one] rw [gosper_catalan_sub_eq_central_binom_div d] norm_cast theorem succ_mul_catalan_eq_centralBinom (n : ℕ) : (n + 1) * catalan n = n.centralBinom := (Nat.eq_mul_of_div_eq_right n.succ_dvd_centralBinom (catalan_eq_centralBinom_div n).symm).symm theorem catalan_two : catalan 2 = 2 := by norm_num [catalan_eq_centralBinom_div, Nat.centralBinom, Nat.choose] theorem catalan_three : catalan 3 = 5 := by norm_num [catalan_eq_centralBinom_div, Nat.centralBinom, Nat.choose] namespace Tree open Tree /-- Given two finsets, find all trees that can be formed with left child in `a` and right child in `b` -/ abbrev pairwiseNode (a b : Finset (Tree Unit)) : Finset (Tree Unit) := (a ×ˢ b).map ⟨fun x => x.1 △ x.2, fun ⟨x₁, x₂⟩ ⟨y₁, y₂⟩ => fun h => by simpa using h⟩ /-- A Finset of all trees with `n` nodes. See `mem_treesOfNodesEq` -/ def treesOfNumNodesEq : ℕ → Finset (Tree Unit) | 0 => {nil} | n + 1 => (antidiagonal n).attach.biUnion fun ijh => -- Porting note: `unusedHavesSuffices` linter is not happy with this. Commented out. -- have := Nat.lt_succ_of_le (fst_le ijh.2) -- have := Nat.lt_succ_of_le (snd_le ijh.2) pairwiseNode (treesOfNumNodesEq ijh.1.1) (treesOfNumNodesEq ijh.1.2) -- Porting note: Add this to satisfy the linter. decreasing_by · simp_wf; have := fst_le ijh.2; omega · simp_wf; have := snd_le ijh.2; omega @[simp] theorem treesOfNumNodesEq_zero : treesOfNumNodesEq 0 = {nil} := by rw [treesOfNumNodesEq] theorem treesOfNumNodesEq_succ (n : ℕ) : treesOfNumNodesEq (n + 1) = (antidiagonal n).biUnion fun ij => pairwiseNode (treesOfNumNodesEq ij.1) (treesOfNumNodesEq ij.2) := by rw [treesOfNumNodesEq] ext simp @[simp] theorem mem_treesOfNumNodesEq {x : Tree Unit} {n : ℕ} : x ∈ treesOfNumNodesEq n ↔ x.numNodes = n := by induction x using Tree.unitRecOn generalizing n <;> cases n <;> simp [treesOfNumNodesEq_succ, *] theorem mem_treesOfNumNodesEq_numNodes (x : Tree Unit) : x ∈ treesOfNumNodesEq x.numNodes := mem_treesOfNumNodesEq.mpr rfl @[simp, norm_cast] theorem coe_treesOfNumNodesEq (n : ℕ) : ↑(treesOfNumNodesEq n) = { x : Tree Unit | x.numNodes = n } := Set.ext (by simp) theorem treesOfNumNodesEq_card_eq_catalan (n : ℕ) : (treesOfNumNodesEq n).card = catalan n := by induction' n using Nat.case_strong_induction_on with n ih · simp rw [treesOfNumNodesEq_succ, card_biUnion, catalan_succ'] · apply sum_congr rfl rintro ⟨i, j⟩ H rw [card_map, card_product, ih _ (fst_le H), ih _ (snd_le H)] · simp_rw [disjoint_left] rintro ⟨i, j⟩ _ ⟨i', j'⟩ _ -- Porting note: was clear * -; tidy intros h a cases' a with a l r · intro h; simp at h · intro h1 h2 apply h trans (numNodes l, numNodes r) · simp at h1; simp [h1] · simp at h2; simp [h2] end Tree
Combinatorics\Enumerative\Composition.lean
/- Copyright (c) 2020 Sébastien Gouëzel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Sébastien Gouëzel -/ import Mathlib.Algebra.BigOperators.Fin import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Data.Finset.Sort import Mathlib.Data.Set.Subsingleton /-! # Compositions A composition of a natural number `n` is a decomposition `n = i₀ + ... + i_{k-1}` of `n` into a sum of positive integers. Combinatorially, it corresponds to a decomposition of `{0, ..., n-1}` into non-empty blocks of consecutive integers, where the `iⱼ` are the lengths of the blocks. This notion is closely related to that of a partition of `n`, but in a composition of `n` the order of the `iⱼ`s matters. We implement two different structures covering these two viewpoints on compositions. The first one, made of a list of positive integers summing to `n`, is the main one and is called `Composition n`. The second one is useful for combinatorial arguments (for instance to show that the number of compositions of `n` is `2^(n-1)`). It is given by a subset of `{0, ..., n}` containing `0` and `n`, where the elements of the subset (other than `n`) correspond to the leftmost points of each block. The main API is built on `Composition n`, and we provide an equivalence between the two types. ## Main functions * `c : Composition n` is a structure, made of a list of integers which are all positive and add up to `n`. * `composition_card` states that the cardinality of `Composition n` is exactly `2^(n-1)`, which is proved by constructing an equiv with `CompositionAsSet n` (see below), which is itself in bijection with the subsets of `Fin (n-1)` (this holds even for `n = 0`, where `-` is nat subtraction). Let `c : Composition n` be a composition of `n`. Then * `c.blocks` is the list of blocks in `c`. * `c.length` is the number of blocks in the composition. * `c.blocksFun : Fin c.length → ℕ` is the realization of `c.blocks` as a function on `Fin c.length`. This is the main object when using compositions to understand the composition of analytic functions. * `c.sizeUpTo : ℕ → ℕ` is the sum of the size of the blocks up to `i`.; * `c.embedding i : Fin (c.blocksFun i) → Fin n` is the increasing embedding of the `i`-th block in `Fin n`; * `c.index j`, for `j : Fin n`, is the index of the block containing `j`. * `Composition.ones n` is the composition of `n` made of ones, i.e., `[1, ..., 1]`. * `Composition.single n (hn : 0 < n)` is the composition of `n` made of a single block of size `n`. Compositions can also be used to split lists. Let `l` be a list of length `n` and `c` a composition of `n`. * `l.splitWrtComposition c` is a list of lists, made of the slices of `l` corresponding to the blocks of `c`. * `join_splitWrtComposition` states that splitting a list and then joining it gives back the original list. * `joinSplitWrtComposition_join` states that joining a list of lists, and then splitting it back according to the right composition, gives back the original list of lists. We turn to the second viewpoint on compositions, that we realize as a finset of `Fin (n+1)`. `c : CompositionAsSet n` is a structure made of a finset of `Fin (n+1)` called `c.boundaries` and proofs that it contains `0` and `n`. (Taking a finset of `Fin n` containing `0` would not make sense in the edge case `n = 0`, while the previous description works in all cases). The elements of this set (other than `n`) correspond to leftmost points of blocks. Thus, there is an equiv between `Composition n` and `CompositionAsSet n`. We only construct basic API on `CompositionAsSet` (notably `c.length` and `c.blocks`) to be able to construct this equiv, called `compositionEquiv n`. Since there is a straightforward equiv between `CompositionAsSet n` and finsets of `{1, ..., n-1}` (obtained by removing `0` and `n` from a `CompositionAsSet` and called `compositionAsSetEquiv n`), we deduce that `CompositionAsSet n` and `Composition n` are both fintypes of cardinality `2^(n - 1)` (see `compositionAsSet_card` and `composition_card`). ## Implementation details The main motivation for this structure and its API is in the construction of the composition of formal multilinear series, and the proof that the composition of analytic functions is analytic. The representation of a composition as a list is very handy as lists are very flexible and already have a well-developed API. ## Tags Composition, partition ## References <https://en.wikipedia.org/wiki/Composition_(combinatorics)> -/ open List variable {n : ℕ} /-- A composition of `n` is a list of positive integers summing to `n`. -/ @[ext] structure Composition (n : ℕ) where /-- List of positive integers summing to `n`-/ blocks : List ℕ /-- Proof of positivity for `blocks`-/ blocks_pos : ∀ {i}, i ∈ blocks → 0 < i /-- Proof that `blocks` sums to `n`-/ blocks_sum : blocks.sum = n /-- Combinatorial viewpoint on a composition of `n`, by seeing it as non-empty blocks of consecutive integers in `{0, ..., n-1}`. We register every block by its left end-point, yielding a finset containing `0`. As this does not make sense for `n = 0`, we add `n` to this finset, and get a finset of `{0, ..., n}` containing `0` and `n`. This is the data in the structure `CompositionAsSet n`. -/ @[ext] structure CompositionAsSet (n : ℕ) where /-- Combinatorial viewpoint on a composition of `n` as consecutive integers `{0, ..., n-1}`-/ boundaries : Finset (Fin n.succ) /-- Proof that `0` is a member of `boundaries`-/ zero_mem : (0 : Fin n.succ) ∈ boundaries /-- Last element of the composition-/ getLast_mem : Fin.last n ∈ boundaries instance {n : ℕ} : Inhabited (CompositionAsSet n) := ⟨⟨Finset.univ, Finset.mem_univ _, Finset.mem_univ _⟩⟩ /-! ### Compositions A composition of an integer `n` is a decomposition `n = i₀ + ... + i_{k-1}` of `n` into a sum of positive integers. -/ namespace Composition variable (c : Composition n) instance (n : ℕ) : ToString (Composition n) := ⟨fun c => toString c.blocks⟩ /-- The length of a composition, i.e., the number of blocks in the composition. -/ abbrev length : ℕ := c.blocks.length theorem blocks_length : c.blocks.length = c.length := rfl /-- The blocks of a composition, seen as a function on `Fin c.length`. When composing analytic functions using compositions, this is the main player. -/ def blocksFun : Fin c.length → ℕ := c.blocks.get theorem ofFn_blocksFun : ofFn c.blocksFun = c.blocks := ofFn_get _ theorem sum_blocksFun : ∑ i, c.blocksFun i = n := by conv_rhs => rw [← c.blocks_sum, ← ofFn_blocksFun, sum_ofFn] theorem blocksFun_mem_blocks (i : Fin c.length) : c.blocksFun i ∈ c.blocks := get_mem _ _ _ @[simp] theorem one_le_blocks {i : ℕ} (h : i ∈ c.blocks) : 1 ≤ i := c.blocks_pos h @[simp] theorem one_le_blocks' {i : ℕ} (h : i < c.length) : 1 ≤ c.blocks[i] := c.one_le_blocks (get_mem (blocks c) i h) @[simp] theorem blocks_pos' (i : ℕ) (h : i < c.length) : 0 < c.blocks[i] := c.one_le_blocks' h theorem one_le_blocksFun (i : Fin c.length) : 1 ≤ c.blocksFun i := c.one_le_blocks (c.blocksFun_mem_blocks i) theorem length_le : c.length ≤ n := by conv_rhs => rw [← c.blocks_sum] exact length_le_sum_of_one_le _ fun i hi => c.one_le_blocks hi theorem length_pos_of_pos (h : 0 < n) : 0 < c.length := by apply length_pos_of_sum_pos convert h exact c.blocks_sum /-- The sum of the sizes of the blocks in a composition up to `i`. -/ def sizeUpTo (i : ℕ) : ℕ := (c.blocks.take i).sum @[simp] theorem sizeUpTo_zero : c.sizeUpTo 0 = 0 := by simp [sizeUpTo] theorem sizeUpTo_ofLength_le (i : ℕ) (h : c.length ≤ i) : c.sizeUpTo i = n := by dsimp [sizeUpTo] convert c.blocks_sum exact take_of_length_le h @[simp] theorem sizeUpTo_length : c.sizeUpTo c.length = n := c.sizeUpTo_ofLength_le c.length le_rfl theorem sizeUpTo_le (i : ℕ) : c.sizeUpTo i ≤ n := by conv_rhs => rw [← c.blocks_sum, ← sum_take_add_sum_drop _ i] exact Nat.le_add_right _ _ theorem sizeUpTo_succ {i : ℕ} (h : i < c.length) : c.sizeUpTo (i + 1) = c.sizeUpTo i + c.blocks[i] := by simp only [sizeUpTo] rw [sum_take_succ _ _ h] theorem sizeUpTo_succ' (i : Fin c.length) : c.sizeUpTo ((i : ℕ) + 1) = c.sizeUpTo i + c.blocksFun i := c.sizeUpTo_succ i.2 theorem sizeUpTo_strict_mono {i : ℕ} (h : i < c.length) : c.sizeUpTo i < c.sizeUpTo (i + 1) := by rw [c.sizeUpTo_succ h] simp theorem monotone_sizeUpTo : Monotone c.sizeUpTo := monotone_sum_take _ /-- The `i`-th boundary of a composition, i.e., the leftmost point of the `i`-th block. We include a virtual point at the right of the last block, to make for a nice equiv with `CompositionAsSet n`. -/ def boundary : Fin (c.length + 1) ↪o Fin (n + 1) := (OrderEmbedding.ofStrictMono fun i => ⟨c.sizeUpTo i, Nat.lt_succ_of_le (c.sizeUpTo_le i)⟩) <| Fin.strictMono_iff_lt_succ.2 fun ⟨_, hi⟩ => c.sizeUpTo_strict_mono hi @[simp] theorem boundary_zero : c.boundary 0 = 0 := by simp [boundary, Fin.ext_iff] @[simp] theorem boundary_last : c.boundary (Fin.last c.length) = Fin.last n := by simp [boundary, Fin.ext_iff] /-- The boundaries of a composition, i.e., the leftmost point of all the blocks. We include a virtual point at the right of the last block, to make for a nice equiv with `CompositionAsSet n`. -/ def boundaries : Finset (Fin (n + 1)) := Finset.univ.map c.boundary.toEmbedding theorem card_boundaries_eq_succ_length : c.boundaries.card = c.length + 1 := by simp [boundaries] /-- To `c : Composition n`, one can associate a `CompositionAsSet n` by registering the leftmost point of each block, and adding a virtual point at the right of the last block. -/ def toCompositionAsSet : CompositionAsSet n where boundaries := c.boundaries zero_mem := by simp only [boundaries, Finset.mem_univ, exists_prop_of_true, Finset.mem_map] exact ⟨0, And.intro True.intro rfl⟩ getLast_mem := by simp only [boundaries, Finset.mem_univ, exists_prop_of_true, Finset.mem_map] exact ⟨Fin.last c.length, And.intro True.intro c.boundary_last⟩ /-- The canonical increasing bijection between `Fin (c.length + 1)` and `c.boundaries` is exactly `c.boundary`. -/ theorem orderEmbOfFin_boundaries : c.boundaries.orderEmbOfFin c.card_boundaries_eq_succ_length = c.boundary := by refine (Finset.orderEmbOfFin_unique' _ ?_).symm exact fun i => (Finset.mem_map' _).2 (Finset.mem_univ _) /-- Embedding the `i`-th block of a composition (identified with `Fin (c.blocksFun i)`) into `Fin n` at the relevant position. -/ def embedding (i : Fin c.length) : Fin (c.blocksFun i) ↪o Fin n := (Fin.natAddOrderEmb <| c.sizeUpTo i).trans <| Fin.castLEOrderEmb <| calc c.sizeUpTo i + c.blocksFun i = c.sizeUpTo (i + 1) := (c.sizeUpTo_succ i.2).symm _ ≤ c.sizeUpTo c.length := monotone_sum_take _ i.2 _ = n := c.sizeUpTo_length @[simp] theorem coe_embedding (i : Fin c.length) (j : Fin (c.blocksFun i)) : (c.embedding i j : ℕ) = c.sizeUpTo i + j := rfl /-- `index_exists` asserts there is some `i` with `j < c.sizeUpTo (i+1)`. In the next definition `index` we use `Nat.find` to produce the minimal such index. -/ theorem index_exists {j : ℕ} (h : j < n) : ∃ i : ℕ, j < c.sizeUpTo (i + 1) ∧ i < c.length := by have n_pos : 0 < n := lt_of_le_of_lt (zero_le j) h have : 0 < c.blocks.sum := by rwa [← c.blocks_sum] at n_pos have length_pos : 0 < c.blocks.length := length_pos_of_sum_pos (blocks c) this refine ⟨c.length - 1, ?_, Nat.pred_lt (ne_of_gt length_pos)⟩ have : c.length - 1 + 1 = c.length := Nat.succ_pred_eq_of_pos length_pos simp [this, h] /-- `c.index j` is the index of the block in the composition `c` containing `j`. -/ def index (j : Fin n) : Fin c.length := ⟨Nat.find (c.index_exists j.2), (Nat.find_spec (c.index_exists j.2)).2⟩ theorem lt_sizeUpTo_index_succ (j : Fin n) : (j : ℕ) < c.sizeUpTo (c.index j).succ := (Nat.find_spec (c.index_exists j.2)).1 theorem sizeUpTo_index_le (j : Fin n) : c.sizeUpTo (c.index j) ≤ j := by by_contra H set i := c.index j push_neg at H have i_pos : (0 : ℕ) < i := by by_contra! i_pos revert H simp [nonpos_iff_eq_zero.1 i_pos, c.sizeUpTo_zero] let i₁ := (i : ℕ).pred have i₁_lt_i : i₁ < i := Nat.pred_lt (ne_of_gt i_pos) have i₁_succ : i₁ + 1 = i := Nat.succ_pred_eq_of_pos i_pos have := Nat.find_min (c.index_exists j.2) i₁_lt_i simp [lt_trans i₁_lt_i (c.index j).2, i₁_succ] at this exact Nat.lt_le_asymm H this /-- Mapping an element `j` of `Fin n` to the element in the block containing it, identified with `Fin (c.blocksFun (c.index j))` through the canonical increasing bijection. -/ def invEmbedding (j : Fin n) : Fin (c.blocksFun (c.index j)) := ⟨j - c.sizeUpTo (c.index j), by rw [tsub_lt_iff_right, add_comm, ← sizeUpTo_succ'] · exact lt_sizeUpTo_index_succ _ _ · exact sizeUpTo_index_le _ _⟩ @[simp] theorem coe_invEmbedding (j : Fin n) : (c.invEmbedding j : ℕ) = j - c.sizeUpTo (c.index j) := rfl theorem embedding_comp_inv (j : Fin n) : c.embedding (c.index j) (c.invEmbedding j) = j := by rw [Fin.ext_iff] apply add_tsub_cancel_of_le (c.sizeUpTo_index_le j) theorem mem_range_embedding_iff {j : Fin n} {i : Fin c.length} : j ∈ Set.range (c.embedding i) ↔ c.sizeUpTo i ≤ j ∧ (j : ℕ) < c.sizeUpTo (i : ℕ).succ := by constructor · intro h rcases Set.mem_range.2 h with ⟨k, hk⟩ rw [Fin.ext_iff] at hk dsimp at hk rw [← hk] simp [sizeUpTo_succ', k.is_lt] · intro h apply Set.mem_range.2 refine ⟨⟨j - c.sizeUpTo i, ?_⟩, ?_⟩ · rw [tsub_lt_iff_left, ← sizeUpTo_succ'] · exact h.2 · exact h.1 · rw [Fin.ext_iff] exact add_tsub_cancel_of_le h.1 /-- The embeddings of different blocks of a composition are disjoint. -/ theorem disjoint_range {i₁ i₂ : Fin c.length} (h : i₁ ≠ i₂) : Disjoint (Set.range (c.embedding i₁)) (Set.range (c.embedding i₂)) := by classical wlog h' : i₁ < i₂ · exact (this c h.symm (h.lt_or_lt.resolve_left h')).symm by_contra d obtain ⟨x, hx₁, hx₂⟩ : ∃ x : Fin n, x ∈ Set.range (c.embedding i₁) ∧ x ∈ Set.range (c.embedding i₂) := Set.not_disjoint_iff.1 d have A : (i₁ : ℕ).succ ≤ i₂ := Nat.succ_le_of_lt h' apply lt_irrefl (x : ℕ) calc (x : ℕ) < c.sizeUpTo (i₁ : ℕ).succ := (c.mem_range_embedding_iff.1 hx₁).2 _ ≤ c.sizeUpTo (i₂ : ℕ) := monotone_sum_take _ A _ ≤ x := (c.mem_range_embedding_iff.1 hx₂).1 theorem mem_range_embedding (j : Fin n) : j ∈ Set.range (c.embedding (c.index j)) := by have : c.embedding (c.index j) (c.invEmbedding j) ∈ Set.range (c.embedding (c.index j)) := Set.mem_range_self _ rwa [c.embedding_comp_inv j] at this theorem mem_range_embedding_iff' {j : Fin n} {i : Fin c.length} : j ∈ Set.range (c.embedding i) ↔ i = c.index j := by constructor · rw [← not_imp_not] intro h exact Set.disjoint_right.1 (c.disjoint_range h) (c.mem_range_embedding j) · intro h rw [h] exact c.mem_range_embedding j theorem index_embedding (i : Fin c.length) (j : Fin (c.blocksFun i)) : c.index (c.embedding i j) = i := by symm rw [← mem_range_embedding_iff'] apply Set.mem_range_self theorem invEmbedding_comp (i : Fin c.length) (j : Fin (c.blocksFun i)) : (c.invEmbedding (c.embedding i j) : ℕ) = j := by simp_rw [coe_invEmbedding, index_embedding, coe_embedding, add_tsub_cancel_left] /-- Equivalence between the disjoint union of the blocks (each of them seen as `Fin (c.blocksFun i)`) with `Fin n`. -/ def blocksFinEquiv : (Σi : Fin c.length, Fin (c.blocksFun i)) ≃ Fin n where toFun x := c.embedding x.1 x.2 invFun j := ⟨c.index j, c.invEmbedding j⟩ left_inv x := by rcases x with ⟨i, y⟩ dsimp congr; · exact c.index_embedding _ _ rw [Fin.heq_ext_iff] · exact c.invEmbedding_comp _ _ · rw [c.index_embedding] right_inv j := c.embedding_comp_inv j theorem blocksFun_congr {n₁ n₂ : ℕ} (c₁ : Composition n₁) (c₂ : Composition n₂) (i₁ : Fin c₁.length) (i₂ : Fin c₂.length) (hn : n₁ = n₂) (hc : c₁.blocks = c₂.blocks) (hi : (i₁ : ℕ) = i₂) : c₁.blocksFun i₁ = c₂.blocksFun i₂ := by cases hn rw [← Composition.ext_iff] at hc cases hc congr rwa [Fin.ext_iff] /-- Two compositions (possibly of different integers) coincide if and only if they have the same sequence of blocks. -/ theorem sigma_eq_iff_blocks_eq {c : Σn, Composition n} {c' : Σn, Composition n} : c = c' ↔ c.2.blocks = c'.2.blocks := by refine ⟨fun H => by rw [H], fun H => ?_⟩ rcases c with ⟨n, c⟩ rcases c' with ⟨n', c'⟩ have : n = n' := by rw [← c.blocks_sum, ← c'.blocks_sum, H] induction this congr ext1 exact H /-! ### The composition `Composition.ones` -/ /-- The composition made of blocks all of size `1`. -/ def ones (n : ℕ) : Composition n := ⟨replicate n (1 : ℕ), fun {i} hi => by simp [List.eq_of_mem_replicate hi], by simp⟩ instance {n : ℕ} : Inhabited (Composition n) := ⟨Composition.ones n⟩ @[simp] theorem ones_length (n : ℕ) : (ones n).length = n := List.length_replicate n 1 @[simp] theorem ones_blocks (n : ℕ) : (ones n).blocks = replicate n (1 : ℕ) := rfl @[simp] theorem ones_blocksFun (n : ℕ) (i : Fin (ones n).length) : (ones n).blocksFun i = 1 := by simp only [blocksFun, ones, get_eq_getElem, getElem_replicate] @[simp] theorem ones_sizeUpTo (n : ℕ) (i : ℕ) : (ones n).sizeUpTo i = min i n := by simp [sizeUpTo, ones_blocks, take_replicate] @[simp] theorem ones_embedding (i : Fin (ones n).length) (h : 0 < (ones n).blocksFun i) : (ones n).embedding i ⟨0, h⟩ = ⟨i, lt_of_lt_of_le i.2 (ones n).length_le⟩ := by ext simpa using i.2.le theorem eq_ones_iff {c : Composition n} : c = ones n ↔ ∀ i ∈ c.blocks, i = 1 := by constructor · rintro rfl exact fun i => eq_of_mem_replicate · intro H ext1 have A : c.blocks = replicate c.blocks.length 1 := eq_replicate_of_mem H have : c.blocks.length = n := by conv_rhs => rw [← c.blocks_sum, A] simp rw [A, this, ones_blocks] theorem ne_ones_iff {c : Composition n} : c ≠ ones n ↔ ∃ i ∈ c.blocks, 1 < i := by refine (not_congr eq_ones_iff).trans ?_ have : ∀ j ∈ c.blocks, j = 1 ↔ j ≤ 1 := fun j hj => by simp [le_antisymm_iff, c.one_le_blocks hj] simp (config := { contextual := true }) [this] theorem eq_ones_iff_length {c : Composition n} : c = ones n ↔ c.length = n := by constructor · rintro rfl exact ones_length n · contrapose intro H length_n apply lt_irrefl n calc n = ∑ i : Fin c.length, 1 := by simp [length_n] _ < ∑ i : Fin c.length, c.blocksFun i := by { obtain ⟨i, hi, i_blocks⟩ : ∃ i ∈ c.blocks, 1 < i := ne_ones_iff.1 H rw [← ofFn_blocksFun, mem_ofFn c.blocksFun, Set.mem_range] at hi obtain ⟨j : Fin c.length, hj : c.blocksFun j = i⟩ := hi rw [← hj] at i_blocks exact Finset.sum_lt_sum (fun i _ => one_le_blocksFun c i) ⟨j, Finset.mem_univ _, i_blocks⟩ } _ = n := c.sum_blocksFun theorem eq_ones_iff_le_length {c : Composition n} : c = ones n ↔ n ≤ c.length := by simp [eq_ones_iff_length, le_antisymm_iff, c.length_le] /-! ### The composition `Composition.single` -/ /-- The composition made of a single block of size `n`. -/ def single (n : ℕ) (h : 0 < n) : Composition n := ⟨[n], by simp [h], by simp⟩ @[simp] theorem single_length {n : ℕ} (h : 0 < n) : (single n h).length = 1 := rfl @[simp] theorem single_blocks {n : ℕ} (h : 0 < n) : (single n h).blocks = [n] := rfl @[simp] theorem single_blocksFun {n : ℕ} (h : 0 < n) (i : Fin (single n h).length) : (single n h).blocksFun i = n := by simp [blocksFun, single, blocks, i.2] @[simp] theorem single_embedding {n : ℕ} (h : 0 < n) (i : Fin n) : ((single n h).embedding (0 : Fin 1)) i = i := by ext simp theorem eq_single_iff_length {n : ℕ} (h : 0 < n) {c : Composition n} : c = single n h ↔ c.length = 1 := by constructor · intro H rw [H] exact single_length h · intro H ext1 have A : c.blocks.length = 1 := H ▸ c.blocks_length have B : c.blocks.sum = n := c.blocks_sum rw [eq_cons_of_length_one A] at B ⊢ simpa [single_blocks] using B theorem ne_single_iff {n : ℕ} (hn : 0 < n) {c : Composition n} : c ≠ single n hn ↔ ∀ i, c.blocksFun i < n := by rw [← not_iff_not] push_neg constructor · rintro rfl exact ⟨⟨0, by simp⟩, by simp⟩ · rintro ⟨i, hi⟩ rw [eq_single_iff_length] have : ∀ j : Fin c.length, j = i := by intro j by_contra ji apply lt_irrefl (∑ k, c.blocksFun k) calc ∑ k, c.blocksFun k ≤ c.blocksFun i := by simp only [c.sum_blocksFun, hi] _ < ∑ k, c.blocksFun k := Finset.single_lt_sum ji (Finset.mem_univ _) (Finset.mem_univ _) (c.one_le_blocksFun j) fun _ _ _ => zero_le _ simpa using Fintype.card_eq_one_of_forall_eq this end Composition /-! ### Splitting a list Given a list of length `n` and a composition `c` of `n`, one can split `l` into `c.length` sublists of respective lengths `c.blocksFun 0`, ..., `c.blocksFun (c.length-1)`. This is inverse to the join operation. -/ namespace List variable {α : Type*} /-- Auxiliary for `List.splitWrtComposition`. -/ def splitWrtCompositionAux : List α → List ℕ → List (List α) | _, [] => [] | l, n::ns => let (l₁, l₂) := l.splitAt n l₁::splitWrtCompositionAux l₂ ns /-- Given a list of length `n` and a composition `[i₁, ..., iₖ]` of `n`, split `l` into a list of `k` lists corresponding to the blocks of the composition, of respective lengths `i₁`, ..., `iₖ`. This makes sense mostly when `n = l.length`, but this is not necessary for the definition. -/ def splitWrtComposition (l : List α) (c : Composition n) : List (List α) := splitWrtCompositionAux l c.blocks -- Porting note: can't refer to subeqn in Lean 4 this way, and seems to definitionally simp --attribute [local simp] splitWrtCompositionAux.equations._eqn_1 @[local simp] theorem splitWrtCompositionAux_cons (l : List α) (n ns) : l.splitWrtCompositionAux (n::ns) = take n l::(drop n l).splitWrtCompositionAux ns := by simp [splitWrtCompositionAux] theorem length_splitWrtCompositionAux (l : List α) (ns) : length (l.splitWrtCompositionAux ns) = ns.length := by induction ns generalizing l · simp [splitWrtCompositionAux, *] · simp [*] /-- When one splits a list along a composition `c`, the number of sublists thus created is `c.length`. -/ @[simp] theorem length_splitWrtComposition (l : List α) (c : Composition n) : length (l.splitWrtComposition c) = c.length := length_splitWrtCompositionAux _ _ theorem map_length_splitWrtCompositionAux {ns : List ℕ} : ∀ {l : List α}, ns.sum ≤ l.length → map length (l.splitWrtCompositionAux ns) = ns := by induction' ns with n ns IH <;> intro l h <;> simp at h · simp [splitWrtCompositionAux] have := le_trans (Nat.le_add_right _ _) h simp only [splitWrtCompositionAux_cons, this]; dsimp rw [length_take, IH] <;> simp [length_drop] · assumption · exact le_tsub_of_add_le_left h /-- When one splits a list along a composition `c`, the lengths of the sublists thus created are given by the block sizes in `c`. -/ theorem map_length_splitWrtComposition (l : List α) (c : Composition l.length) : map length (l.splitWrtComposition c) = c.blocks := map_length_splitWrtCompositionAux (le_of_eq c.blocks_sum) theorem length_pos_of_mem_splitWrtComposition {l l' : List α} {c : Composition l.length} (h : l' ∈ l.splitWrtComposition c) : 0 < length l' := by have : l'.length ∈ (l.splitWrtComposition c).map List.length := List.mem_map_of_mem List.length h rw [map_length_splitWrtComposition] at this exact c.blocks_pos this theorem sum_take_map_length_splitWrtComposition (l : List α) (c : Composition l.length) (i : ℕ) : (((l.splitWrtComposition c).map length).take i).sum = c.sizeUpTo i := by congr exact map_length_splitWrtComposition l c theorem getElem_splitWrtCompositionAux (l : List α) (ns : List ℕ) {i : ℕ} (hi : i < (l.splitWrtCompositionAux ns).length) : (l.splitWrtCompositionAux ns)[i] = (l.take (ns.take (i + 1)).sum).drop (ns.take i).sum := by induction' ns with n ns IH generalizing l i · cases hi cases' i with i · rw [Nat.add_zero, List.take_zero, sum_nil] simp · simp only [splitWrtCompositionAux, getElem_cons_succ, IH, take, sum_cons, Nat.add_eq, add_zero, splitAt_eq_take_drop, drop_take, drop_drop] rw [add_comm (sum _) n, Nat.add_sub_add_left] /-- The `i`-th sublist in the splitting of a list `l` along a composition `c`, is the slice of `l` between the indices `c.sizeUpTo i` and `c.sizeUpTo (i+1)`, i.e., the indices in the `i`-th block of the composition. -/ theorem getElem_splitWrtComposition' (l : List α) (c : Composition n) {i : ℕ} (hi : i < (l.splitWrtComposition c).length) : (l.splitWrtComposition c)[i] = (l.take (c.sizeUpTo (i + 1))).drop (c.sizeUpTo i) := getElem_splitWrtCompositionAux _ _ hi -- Porting note: restatement of `get_splitWrtComposition` theorem getElem_splitWrtComposition (l : List α) (c : Composition n) (i : Nat) (h : i < (l.splitWrtComposition c).length) : (l.splitWrtComposition c)[i] = (l.take (c.sizeUpTo (i + 1))).drop (c.sizeUpTo i) := getElem_splitWrtComposition' _ _ h @[deprecated getElem_splitWrtCompositionAux (since := "2024-06-12")] theorem get_splitWrtCompositionAux (l : List α) (ns : List ℕ) {i : ℕ} (hi) : (l.splitWrtCompositionAux ns).get ⟨i, hi⟩ = (l.take (ns.take (i + 1)).sum).drop (ns.take i).sum := by simp [getElem_splitWrtCompositionAux] /-- The `i`-th sublist in the splitting of a list `l` along a composition `c`, is the slice of `l` between the indices `c.sizeUpTo i` and `c.sizeUpTo (i+1)`, i.e., the indices in the `i`-th block of the composition. -/ @[deprecated getElem_splitWrtComposition' (since := "2024-06-12")] theorem get_splitWrtComposition' (l : List α) (c : Composition n) {i : ℕ} (hi : i < (l.splitWrtComposition c).length) : (l.splitWrtComposition c).get ⟨i, hi⟩ = (l.take (c.sizeUpTo (i + 1))).drop (c.sizeUpTo i) := by simp [getElem_splitWrtComposition'] -- Porting note: restatement of `get_splitWrtComposition` @[deprecated getElem_splitWrtComposition (since := "2024-06-12")] theorem get_splitWrtComposition (l : List α) (c : Composition n) (i : Fin (l.splitWrtComposition c).length) : get (l.splitWrtComposition c) i = (l.take (c.sizeUpTo (i + 1))).drop (c.sizeUpTo i) := by simp [getElem_splitWrtComposition] theorem join_splitWrtCompositionAux {ns : List ℕ} : ∀ {l : List α}, ns.sum = l.length → (l.splitWrtCompositionAux ns).join = l := by induction' ns with n ns IH <;> intro l h <;> simp at h · exact (length_eq_zero.1 h.symm).symm simp only [splitWrtCompositionAux_cons]; dsimp rw [IH] · simp · rw [length_drop, ← h, add_tsub_cancel_left] /-- If one splits a list along a composition, and then joins the sublists, one gets back the original list. -/ @[simp] theorem join_splitWrtComposition (l : List α) (c : Composition l.length) : (l.splitWrtComposition c).join = l := join_splitWrtCompositionAux c.blocks_sum /-- If one joins a list of lists and then splits the join along the right composition, one gets back the original list of lists. -/ @[simp] theorem splitWrtComposition_join (L : List (List α)) (c : Composition L.join.length) (h : map length L = c.blocks) : splitWrtComposition (join L) c = L := by simp only [eq_self_iff_true, and_self_iff, eq_iff_join_eq, join_splitWrtComposition, map_length_splitWrtComposition, h] end List /-! ### Compositions as sets Combinatorial viewpoints on compositions, seen as finite subsets of `Fin (n+1)` containing `0` and `n`, where the points of the set (other than `n`) correspond to the leftmost points of each block. -/ /-- Bijection between compositions of `n` and subsets of `{0, ..., n-2}`, defined by considering the restriction of the subset to `{1, ..., n-1}` and shifting to the left by one. -/ def compositionAsSetEquiv (n : ℕ) : CompositionAsSet n ≃ Finset (Fin (n - 1)) where toFun c := { i : Fin (n - 1) | (⟨1 + (i : ℕ), by apply (add_lt_add_left i.is_lt 1).trans_le rw [Nat.succ_eq_add_one, add_comm] exact add_le_add (Nat.sub_le n 1) (le_refl 1)⟩ : Fin n.succ) ∈ c.boundaries }.toFinset invFun s := { boundaries := { i : Fin n.succ | i = 0 ∨ i = Fin.last n ∨ ∃ (j : Fin (n - 1)) (_hj : j ∈ s), (i : ℕ) = j + 1 }.toFinset zero_mem := by simp getLast_mem := by simp } left_inv := by intro c ext i simp only [add_comm, Set.toFinset_setOf, Finset.mem_univ, forall_true_left, Finset.mem_filter, true_and, exists_prop] constructor · rintro (rfl | rfl | ⟨j, hj1, hj2⟩) · exact c.zero_mem · exact c.getLast_mem · convert hj1 · simp only [or_iff_not_imp_left] intro i_mem i_ne_zero i_ne_last simp? [Fin.ext_iff] at i_ne_zero i_ne_last says simp only [Nat.succ_eq_add_one, Fin.ext_iff, Fin.val_zero, Fin.val_last] at i_ne_zero i_ne_last have A : (1 + (i - 1) : ℕ) = (i : ℕ) := by rw [add_comm] exact Nat.succ_pred_eq_of_pos (pos_iff_ne_zero.mpr i_ne_zero) refine ⟨⟨i - 1, ?_⟩, ?_, ?_⟩ · have : (i : ℕ) < n + 1 := i.2 simp? [Nat.lt_succ_iff_lt_or_eq, i_ne_last] at this says simp only [Nat.succ_eq_add_one, Nat.lt_succ_iff_lt_or_eq, i_ne_last, or_false] at this exact Nat.pred_lt_pred i_ne_zero this · convert i_mem simp only rwa [add_comm] · simp only symm rwa [add_comm] right_inv := by intro s ext i have : 1 + (i : ℕ) ≠ n := by apply ne_of_lt convert add_lt_add_left i.is_lt 1 rw [add_comm] apply (Nat.succ_pred_eq_of_pos _).symm exact (zero_le i.val).trans_lt (i.2.trans_le (Nat.sub_le n 1)) simp only [add_comm, Fin.ext_iff, Fin.val_zero, Fin.val_last, exists_prop, Set.toFinset_setOf, Finset.mem_univ, forall_true_left, Finset.mem_filter, add_eq_zero_iff, and_false, add_left_inj, false_or, true_and] erw [Set.mem_setOf_eq] simp [this, false_or_iff, add_right_inj, add_eq_zero_iff, one_ne_zero, false_and_iff, Fin.val_mk] constructor · intro h cases' h with n h · rw [add_comm] at this contradiction · cases' h with w h; cases' h with h₁ h₂ rw [← Fin.ext_iff] at h₂ rwa [h₂] · intro h apply Or.inr use i, h instance compositionAsSetFintype (n : ℕ) : Fintype (CompositionAsSet n) := Fintype.ofEquiv _ (compositionAsSetEquiv n).symm theorem compositionAsSet_card (n : ℕ) : Fintype.card (CompositionAsSet n) = 2 ^ (n - 1) := by have : Fintype.card (Finset (Fin (n - 1))) = 2 ^ (n - 1) := by simp rw [← this] exact Fintype.card_congr (compositionAsSetEquiv n) namespace CompositionAsSet variable (c : CompositionAsSet n) theorem boundaries_nonempty : c.boundaries.Nonempty := ⟨0, c.zero_mem⟩ theorem card_boundaries_pos : 0 < Finset.card c.boundaries := Finset.card_pos.mpr c.boundaries_nonempty /-- Number of blocks in a `CompositionAsSet`. -/ def length : ℕ := Finset.card c.boundaries - 1 theorem card_boundaries_eq_succ_length : c.boundaries.card = c.length + 1 := (tsub_eq_iff_eq_add_of_le (Nat.succ_le_of_lt c.card_boundaries_pos)).mp rfl theorem length_lt_card_boundaries : c.length < c.boundaries.card := by rw [c.card_boundaries_eq_succ_length] exact lt_add_one _ theorem lt_length (i : Fin c.length) : (i : ℕ) + 1 < c.boundaries.card := lt_tsub_iff_right.mp i.2 theorem lt_length' (i : Fin c.length) : (i : ℕ) < c.boundaries.card := lt_of_le_of_lt (Nat.le_succ i) (c.lt_length i) /-- Canonical increasing bijection from `Fin c.boundaries.card` to `c.boundaries`. -/ def boundary : Fin c.boundaries.card ↪o Fin (n + 1) := c.boundaries.orderEmbOfFin rfl @[simp] theorem boundary_zero : (c.boundary ⟨0, c.card_boundaries_pos⟩ : Fin (n + 1)) = 0 := by rw [boundary, Finset.orderEmbOfFin_zero rfl c.card_boundaries_pos] exact le_antisymm (Finset.min'_le _ _ c.zero_mem) (Fin.zero_le _) @[simp] theorem boundary_length : c.boundary ⟨c.length, c.length_lt_card_boundaries⟩ = Fin.last n := by convert Finset.orderEmbOfFin_last rfl c.card_boundaries_pos exact le_antisymm (Finset.le_max' _ _ c.getLast_mem) (Fin.le_last _) /-- Size of the `i`-th block in a `CompositionAsSet`, seen as a function on `Fin c.length`. -/ def blocksFun (i : Fin c.length) : ℕ := c.boundary ⟨(i : ℕ) + 1, c.lt_length i⟩ - c.boundary ⟨i, c.lt_length' i⟩ theorem blocksFun_pos (i : Fin c.length) : 0 < c.blocksFun i := haveI : (⟨i, c.lt_length' i⟩ : Fin c.boundaries.card) < ⟨i + 1, c.lt_length i⟩ := Nat.lt_succ_self _ lt_tsub_iff_left.mpr ((c.boundaries.orderEmbOfFin rfl).strictMono this) /-- List of the sizes of the blocks in a `CompositionAsSet`. -/ def blocks (c : CompositionAsSet n) : List ℕ := ofFn c.blocksFun @[simp] theorem blocks_length : c.blocks.length = c.length := length_ofFn _ theorem blocks_partial_sum {i : ℕ} (h : i < c.boundaries.card) : (c.blocks.take i).sum = c.boundary ⟨i, h⟩ := by induction' i with i IH · simp have A : i < c.blocks.length := by rw [c.card_boundaries_eq_succ_length] at h simp [blocks, Nat.lt_of_succ_lt_succ h] have B : i < c.boundaries.card := lt_of_lt_of_le A (by simp [blocks, length, Nat.sub_le]) rw [sum_take_succ _ _ A, IH B] simp [blocks, blocksFun, get_ofFn] theorem mem_boundaries_iff_exists_blocks_sum_take_eq {j : Fin (n + 1)} : j ∈ c.boundaries ↔ ∃ i < c.boundaries.card, (c.blocks.take i).sum = j := by constructor · intro hj rcases (c.boundaries.orderIsoOfFin rfl).surjective ⟨j, hj⟩ with ⟨i, hi⟩ rw [Subtype.ext_iff, Subtype.coe_mk] at hi refine ⟨i.1, i.2, ?_⟩ dsimp at hi rw [← hi, c.blocks_partial_sum i.2] rfl · rintro ⟨i, hi, H⟩ convert (c.boundaries.orderIsoOfFin rfl ⟨i, hi⟩).2 have : c.boundary ⟨i, hi⟩ = j := by rwa [Fin.ext_iff, ← c.blocks_partial_sum hi] exact this.symm theorem blocks_sum : c.blocks.sum = n := by have : c.blocks.take c.length = c.blocks := take_of_length_le (by simp [blocks]) rw [← this, c.blocks_partial_sum c.length_lt_card_boundaries, c.boundary_length] rfl /-- Associating a `Composition n` to a `CompositionAsSet n`, by registering the sizes of the blocks as a list of positive integers. -/ def toComposition : Composition n where blocks := c.blocks blocks_pos := by simp only [blocks, forall_mem_ofFn_iff, blocksFun_pos c, forall_true_iff] blocks_sum := c.blocks_sum end CompositionAsSet /-! ### Equivalence between compositions and compositions as sets In this section, we explain how to go back and forth between a `Composition` and a `CompositionAsSet`, by showing that their `blocks` and `length` and `boundaries` correspond to each other, and construct an equivalence between them called `compositionEquiv`. -/ @[simp] theorem Composition.toCompositionAsSet_length (c : Composition n) : c.toCompositionAsSet.length = c.length := by simp [Composition.toCompositionAsSet, CompositionAsSet.length, c.card_boundaries_eq_succ_length] @[simp] theorem CompositionAsSet.toComposition_length (c : CompositionAsSet n) : c.toComposition.length = c.length := by simp [CompositionAsSet.toComposition, Composition.length, Composition.blocks] @[simp] theorem Composition.toCompositionAsSet_blocks (c : Composition n) : c.toCompositionAsSet.blocks = c.blocks := by let d := c.toCompositionAsSet change d.blocks = c.blocks have length_eq : d.blocks.length = c.blocks.length := by simp [d, blocks_length] suffices H : ∀ i ≤ d.blocks.length, (d.blocks.take i).sum = (c.blocks.take i).sum from eq_of_sum_take_eq length_eq H intro i hi have i_lt : i < d.boundaries.card := by -- Porting note: relied on `convert` unfolding definitions, switched to using a `simpa` simpa [CompositionAsSet.blocks, length_ofFn, d.card_boundaries_eq_succ_length] using Nat.lt_succ_iff.2 hi have i_lt' : i < c.boundaries.card := i_lt have i_lt'' : i < c.length + 1 := by rwa [c.card_boundaries_eq_succ_length] at i_lt' have A : d.boundaries.orderEmbOfFin rfl ⟨i, i_lt⟩ = c.boundaries.orderEmbOfFin c.card_boundaries_eq_succ_length ⟨i, i_lt''⟩ := rfl have B : c.sizeUpTo i = c.boundary ⟨i, i_lt''⟩ := rfl rw [d.blocks_partial_sum i_lt, CompositionAsSet.boundary, ← Composition.sizeUpTo, B, A, c.orderEmbOfFin_boundaries] @[simp] theorem CompositionAsSet.toComposition_blocks (c : CompositionAsSet n) : c.toComposition.blocks = c.blocks := rfl @[simp] theorem CompositionAsSet.toComposition_boundaries (c : CompositionAsSet n) : c.toComposition.boundaries = c.boundaries := by ext j simp only [c.mem_boundaries_iff_exists_blocks_sum_take_eq, Composition.boundaries, Finset.mem_map] constructor · rintro ⟨i, _, hi⟩ refine ⟨i.1, ?_, ?_⟩ · simpa [c.card_boundaries_eq_succ_length] using i.2 · simp [Composition.boundary, Composition.sizeUpTo, ← hi] · rintro ⟨i, i_lt, hi⟩ refine ⟨i, by simp, ?_⟩ rw [c.card_boundaries_eq_succ_length] at i_lt simp [Composition.boundary, Nat.mod_eq_of_lt i_lt, Composition.sizeUpTo, hi] @[simp] theorem Composition.toCompositionAsSet_boundaries (c : Composition n) : c.toCompositionAsSet.boundaries = c.boundaries := rfl /-- Equivalence between `Composition n` and `CompositionAsSet n`. -/ def compositionEquiv (n : ℕ) : Composition n ≃ CompositionAsSet n where toFun c := c.toCompositionAsSet invFun c := c.toComposition left_inv c := by ext1 exact c.toCompositionAsSet_blocks right_inv c := by ext1 exact c.toComposition_boundaries instance compositionFintype (n : ℕ) : Fintype (Composition n) := Fintype.ofEquiv _ (compositionEquiv n).symm theorem composition_card (n : ℕ) : Fintype.card (Composition n) = 2 ^ (n - 1) := by rw [← compositionAsSet_card n] exact Fintype.card_congr (compositionEquiv n)
Combinatorics\Enumerative\DoubleCounting.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Data.Set.Subsingleton /-! # Double countings This file gathers a few double counting arguments. ## Bipartite graphs In a bipartite graph (considered as a relation `r : α → β → Prop`), we can bound the number of edges between `s : Finset α` and `t : Finset β` by the minimum/maximum of edges over all `a ∈ s` times the size of `s`. Similarly for `t`. Combining those two yields inequalities between the sizes of `s` and `t`. * `bipartiteBelow`: `s.bipartiteBelow r b` are the elements of `s` below `b` wrt to `r`. Its size is the number of edges of `b` in `s`. * `bipartiteAbove`: `t.bipartite_Above r a` are the elements of `t` above `a` wrt to `r`. Its size is the number of edges of `a` in `t`. * `card_mul_le_card_mul`, `card_mul_le_card_mul'`: Double counting the edges of a bipartite graph from below and from above. * `card_mul_eq_card_mul`: Equality combination of the previous. -/ open Finset Function Relator variable {α β : Type*} /-! ### Bipartite graph -/ namespace Finset section Bipartite variable (r : α → β → Prop) (s : Finset α) (t : Finset β) (a a' : α) (b b' : β) [DecidablePred (r a)] [∀ a, Decidable (r a b)] {m n : ℕ} /-- Elements of `s` which are "below" `b` according to relation `r`. -/ def bipartiteBelow : Finset α := s.filter fun a ↦ r a b /-- Elements of `t` which are "above" `a` according to relation `r`. -/ def bipartiteAbove : Finset β := t.filter (r a) theorem bipartiteBelow_swap : t.bipartiteBelow (swap r) a = t.bipartiteAbove r a := rfl theorem bipartiteAbove_swap : s.bipartiteAbove (swap r) b = s.bipartiteBelow r b := rfl @[simp, norm_cast] theorem coe_bipartiteBelow : (s.bipartiteBelow r b : Set α) = { a ∈ s | r a b } := coe_filter _ _ @[simp, norm_cast] theorem coe_bipartiteAbove : (t.bipartiteAbove r a : Set β) = { b ∈ t | r a b } := coe_filter _ _ variable {s t a a' b b'} @[simp] theorem mem_bipartiteBelow {a : α} : a ∈ s.bipartiteBelow r b ↔ a ∈ s ∧ r a b := mem_filter @[simp] theorem mem_bipartiteAbove {b : β} : b ∈ t.bipartiteAbove r a ↔ b ∈ t ∧ r a b := mem_filter theorem sum_card_bipartiteAbove_eq_sum_card_bipartiteBelow [∀ a b, Decidable (r a b)] : (∑ a ∈ s, (t.bipartiteAbove r a).card) = ∑ b ∈ t, (s.bipartiteBelow r b).card := by simp_rw [card_eq_sum_ones, bipartiteAbove, bipartiteBelow, sum_filter] exact sum_comm /-- Double counting argument. Considering `r` as a bipartite graph, the LHS is a lower bound on the number of edges while the RHS is an upper bound. -/ theorem card_mul_le_card_mul [∀ a b, Decidable (r a b)] (hm : ∀ a ∈ s, m ≤ (t.bipartiteAbove r a).card) (hn : ∀ b ∈ t, (s.bipartiteBelow r b).card ≤ n) : s.card * m ≤ t.card * n := calc _ ≤ ∑ a ∈ s, (t.bipartiteAbove r a).card := s.card_nsmul_le_sum _ _ hm _ = ∑ b ∈ t, (s.bipartiteBelow r b).card := sum_card_bipartiteAbove_eq_sum_card_bipartiteBelow _ _ ≤ _ := t.sum_le_card_nsmul _ _ hn theorem card_mul_le_card_mul' [∀ a b, Decidable (r a b)] (hn : ∀ b ∈ t, n ≤ (s.bipartiteBelow r b).card) (hm : ∀ a ∈ s, (t.bipartiteAbove r a).card ≤ m) : t.card * n ≤ s.card * m := card_mul_le_card_mul (swap r) hn hm theorem card_mul_eq_card_mul [∀ a b, Decidable (r a b)] (hm : ∀ a ∈ s, (t.bipartiteAbove r a).card = m) (hn : ∀ b ∈ t, (s.bipartiteBelow r b).card = n) : s.card * m = t.card * n := (card_mul_le_card_mul _ (fun a ha ↦ (hm a ha).ge) fun b hb ↦ (hn b hb).le).antisymm <| card_mul_le_card_mul' _ (fun a ha ↦ (hn a ha).ge) fun b hb ↦ (hm b hb).le theorem card_le_card_of_forall_subsingleton (hs : ∀ a ∈ s, ∃ b, b ∈ t ∧ r a b) (ht : ∀ b ∈ t, ({ a ∈ s | r a b } : Set α).Subsingleton) : s.card ≤ t.card := by classical rw [← mul_one s.card, ← mul_one t.card] exact card_mul_le_card_mul r (fun a h ↦ card_pos.2 (by rw [← coe_nonempty, coe_bipartiteAbove] exact hs _ h : (t.bipartiteAbove r a).Nonempty)) (fun b h ↦ card_le_one.2 (by simp_rw [mem_bipartiteBelow] exact ht _ h)) theorem card_le_card_of_forall_subsingleton' (ht : ∀ b ∈ t, ∃ a, a ∈ s ∧ r a b) (hs : ∀ a ∈ s, ({ b ∈ t | r a b } : Set β).Subsingleton) : t.card ≤ s.card := card_le_card_of_forall_subsingleton (swap r) ht hs end Bipartite end Finset open Finset namespace Fintype variable [Fintype α] [Fintype β] {r : α → β → Prop} theorem card_le_card_of_leftTotal_unique (h₁ : LeftTotal r) (h₂ : LeftUnique r) : Fintype.card α ≤ Fintype.card β := card_le_card_of_forall_subsingleton r (by simpa using h₁) fun b _ a₁ ha₁ a₂ ha₂ ↦ h₂ ha₁.2 ha₂.2 theorem card_le_card_of_rightTotal_unique (h₁ : RightTotal r) (h₂ : RightUnique r) : Fintype.card β ≤ Fintype.card α := card_le_card_of_forall_subsingleton' r (by simpa using h₁) fun b _ a₁ ha₁ a₂ ha₂ ↦ h₂ ha₁.2 ha₂.2 end Fintype
Combinatorics\Enumerative\Partition.lean
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import Mathlib.Combinatorics.Enumerative.Composition import Mathlib.Tactic.ApplyFun /-! # Partitions A partition of a natural number `n` is a way of writing `n` as a sum of positive integers, where the order does not matter: two sums that differ only in the order of their summands are considered the same partition. This notion is closely related to that of a composition of `n`, but in a composition of `n` the order does matter. A summand of the partition is called a part. ## Main functions * `p : Partition n` is a structure, made of a multiset of integers which are all positive and add up to `n`. ## Implementation details The main motivation for this structure and its API is to show Euler's partition theorem, and related results. The representation of a partition as a multiset is very handy as multisets are very flexible and already have a well-developed API. ## TODO Link this to Young diagrams. ## Tags Partition ## References <https://en.wikipedia.org/wiki/Partition_(number_theory)> -/ open Multiset namespace Nat /-- A partition of `n` is a multiset of positive integers summing to `n`. -/ @[ext] structure Partition (n : ℕ) where /-- positive integers summing to `n`-/ parts : Multiset ℕ /-- proof that the `parts` are positive-/ parts_pos : ∀ {i}, i ∈ parts → 0 < i /-- proof that the `parts` sum to `n`-/ parts_sum : parts.sum = n -- Porting note: chokes on `parts_pos` --deriving DecidableEq namespace Partition -- TODO: This should be automatically derived, see lean4#2914 instance decidableEqPartition {n : ℕ} : DecidableEq (Partition n) := fun _ _ => decidable_of_iff' _ Partition.ext_iff /-- A composition induces a partition (just convert the list to a multiset). -/ @[simps] def ofComposition (n : ℕ) (c : Composition n) : Partition n where parts := c.blocks parts_pos hi := c.blocks_pos hi parts_sum := by rw [Multiset.sum_coe, c.blocks_sum] theorem ofComposition_surj {n : ℕ} : Function.Surjective (ofComposition n) := by rintro ⟨b, hb₁, hb₂⟩ induction b using Quotient.inductionOn with | _ b => ?_ exact ⟨⟨b, hb₁, by simpa using hb₂⟩, Partition.ext rfl⟩ -- The argument `n` is kept explicit here since it is useful in tactic mode proofs to generate the -- proof obligation `l.sum = n`. /-- Given a multiset which sums to `n`, construct a partition of `n` with the same multiset, but without the zeros. -/ @[simps] def ofSums (n : ℕ) (l : Multiset ℕ) (hl : l.sum = n) : Partition n where parts := l.filter (· ≠ 0) parts_pos hi := (of_mem_filter hi).bot_lt parts_sum := by have lz : (l.filter (· = 0)).sum = 0 := by simp [sum_eq_zero_iff] rwa [← filter_add_not (· = 0) l, sum_add, lz, zero_add] at hl /-- A `Multiset ℕ` induces a partition on its sum. -/ @[simps!] def ofMultiset (l : Multiset ℕ) : Partition l.sum := ofSums _ l rfl /-- An element `s` of `Sym σ n` induces a partition given by its multiplicities. -/ def ofSym {n : ℕ} {σ : Type*} (s : Sym σ n) [DecidableEq σ] : n.Partition where parts := s.1.dedup.map s.1.count parts_pos := by simp [Multiset.count_pos] parts_sum := by show ∑ a ∈ s.1.toFinset, count a s.1 = n rw [toFinset_sum_count_eq] exact s.2 variable {n : ℕ} {σ τ : Type*} [DecidableEq σ] [DecidableEq τ] @[simp] lemma ofSym_map (e : σ ≃ τ) (s : Sym σ n) : ofSym (s.map e) = ofSym s := by simp only [ofSym, Sym.val_eq_coe, Sym.coe_map, toFinset_val, mk.injEq] rw [Multiset.dedup_map_of_injective e.injective] simp only [map_map, Function.comp_apply] congr; funext i rw [← Multiset.count_map_eq_count' e _ e.injective] /-- An equivalence between `σ` and `τ` induces an equivalence between the subtypes of `Sym σ n` and `Sym τ n` corresponding to a given partition. -/ def ofSymShapeEquiv (μ : Partition n) (e : σ ≃ τ) : {x : Sym σ n // ofSym x = μ} ≃ {x : Sym τ n // ofSym x = μ} where toFun := fun x => ⟨Sym.equivCongr e x, by simp [ofSym_map, x.2]⟩ invFun := fun x => ⟨Sym.equivCongr e.symm x, by simp [ofSym_map, x.2]⟩ left_inv := by intro x; simp right_inv := by intro x; simp /-- The partition of exactly one part. -/ def indiscrete (n : ℕ) : Partition n := ofSums n {n} rfl instance {n : ℕ} : Inhabited (Partition n) := ⟨indiscrete n⟩ @[simp] lemma indiscrete_parts {n : ℕ} (hn : n ≠ 0) : (indiscrete n).parts = {n} := by simp [indiscrete, filter_eq_self, hn] @[simp] lemma partition_zero_parts (p : Partition 0) : p.parts = 0 := eq_zero_of_forall_not_mem fun _ h => (p.parts_pos h).ne' <| sum_eq_zero_iff.1 p.parts_sum _ h instance UniquePartitionZero : Unique (Partition 0) where uniq _ := Partition.ext <| by simp @[simp] lemma partition_one_parts (p : Partition 1) : p.parts = {1} := by have h : p.parts = replicate (card p.parts) 1 := eq_replicate_card.2 fun x hx => ((le_sum_of_mem hx).trans_eq p.parts_sum).antisymm (p.parts_pos hx) have h' : card p.parts = 1 := by simpa using (congrArg sum h.symm).trans p.parts_sum rw [h, h', replicate_one] instance UniquePartitionOne : Unique (Partition 1) where uniq _ := Partition.ext <| by simp @[simp] lemma ofSym_one (s : Sym σ 1) : ofSym s = indiscrete 1 := by ext; simp /-- The number of times a positive integer `i` appears in the partition `ofSums n l hl` is the same as the number of times it appears in the multiset `l`. (For `i = 0`, `Partition.non_zero` combined with `Multiset.count_eq_zero_of_not_mem` gives that this is `0` instead.) -/ theorem count_ofSums_of_ne_zero {n : ℕ} {l : Multiset ℕ} (hl : l.sum = n) {i : ℕ} (hi : i ≠ 0) : (ofSums n l hl).parts.count i = l.count i := count_filter_of_pos hi theorem count_ofSums_zero {n : ℕ} {l : Multiset ℕ} (hl : l.sum = n) : (ofSums n l hl).parts.count 0 = 0 := count_filter_of_neg fun h => h rfl /-- Show there are finitely many partitions by considering the surjection from compositions to partitions. -/ instance (n : ℕ) : Fintype (Partition n) := Fintype.ofSurjective (ofComposition n) ofComposition_surj /-- The finset of those partitions in which every part is odd. -/ def odds (n : ℕ) : Finset (Partition n) := Finset.univ.filter fun c => ∀ i ∈ c.parts, ¬Even i /-- The finset of those partitions in which each part is used at most once. -/ def distincts (n : ℕ) : Finset (Partition n) := Finset.univ.filter fun c => c.parts.Nodup /-- The finset of those partitions in which every part is odd and used at most once. -/ def oddDistincts (n : ℕ) : Finset (Partition n) := odds n ∩ distincts n end Partition end Nat
Combinatorics\Hall\Basic.lean
/- Copyright (c) 2021 Alena Gusakov, Bhavik Mehta, Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Alena Gusakov, Bhavik Mehta, Kyle Miller -/ import Mathlib.Combinatorics.Hall.Finite import Mathlib.CategoryTheory.CofilteredSystem import Mathlib.Data.Rel /-! # Hall's Marriage Theorem Given a list of finite subsets $X_1, X_2, \dots, X_n$ of some given set $S$, P. Hall in [Hall1935] gave a necessary and sufficient condition for there to be a list of distinct elements $x_1, x_2, \dots, x_n$ with $x_i\in X_i$ for each $i$: it is when for each $k$, the union of every $k$ of these subsets has at least $k$ elements. Rather than a list of finite subsets, one may consider indexed families `t : ι → Finset α` of finite subsets with `ι` a `Fintype`, and then the list of distinct representatives is given by an injective function `f : ι → α` such that `∀ i, f i ∈ t i`, called a *matching*. This version is formalized as `Finset.all_card_le_biUnion_card_iff_exists_injective'` in a separate module. The theorem can be generalized to remove the constraint that `ι` be a `Fintype`. As observed in [Halpern1966], one may use the constrained version of the theorem in a compactness argument to remove this constraint. The formulation of compactness we use is that inverse limits of nonempty finite sets are nonempty (`nonempty_sections_of_finite_inverse_system`), which uses the Tychonoff theorem. The core of this module is constructing the inverse system: for every finite subset `ι'` of `ι`, we can consider the matchings on the restriction of the indexed family `t` to `ι'`. ## Main statements * `Finset.all_card_le_biUnion_card_iff_exists_injective` is in terms of `t : ι → Finset α`. * `Fintype.all_card_le_rel_image_card_iff_exists_injective` is in terms of a relation `r : α → β → Prop` such that `Rel.image r {a}` is a finite set for all `a : α`. * `Fintype.all_card_le_filter_rel_iff_exists_injective` is in terms of a relation `r : α → β → Prop` on finite types, with the Hall condition given in terms of `finset.univ.filter`. ## TODO * The statement of the theorem in terms of bipartite graphs is in preparation. ## Tags Hall's Marriage Theorem, indexed families -/ open Finset CategoryTheory universe u v /-- The set of matchings for `t` when restricted to a `Finset` of `ι`. -/ def hallMatchingsOn {ι : Type u} {α : Type v} (t : ι → Finset α) (ι' : Finset ι) := { f : ι' → α | Function.Injective f ∧ ∀ x, f x ∈ t x } /-- Given a matching on a finset, construct the restriction of that matching to a subset. -/ def hallMatchingsOn.restrict {ι : Type u} {α : Type v} (t : ι → Finset α) {ι' ι'' : Finset ι} (h : ι' ⊆ ι'') (f : hallMatchingsOn t ι'') : hallMatchingsOn t ι' := by refine ⟨fun i => f.val ⟨i, h i.property⟩, ?_⟩ cases' f.property with hinj hc refine ⟨?_, fun i => hc ⟨i, h i.property⟩⟩ rintro ⟨i, hi⟩ ⟨j, hj⟩ hh simpa only [Subtype.mk_eq_mk] using hinj hh /-- When the Hall condition is satisfied, the set of matchings on a finite set is nonempty. This is where `Finset.all_card_le_biUnion_card_iff_existsInjective'` comes into the argument. -/ theorem hallMatchingsOn.nonempty {ι : Type u} {α : Type v} [DecidableEq α] (t : ι → Finset α) (h : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) (ι' : Finset ι) : Nonempty (hallMatchingsOn t ι') := by classical refine ⟨Classical.indefiniteDescription _ ?_⟩ apply (all_card_le_biUnion_card_iff_existsInjective' fun i : ι' => t i).mp intro s' convert h (s'.image (↑)) using 1 · simp only [card_image_of_injective s' Subtype.coe_injective] · rw [image_biUnion] /-- This is the `hallMatchingsOn` sets assembled into a directed system. -/ def hallMatchingsFunctor {ι : Type u} {α : Type v} (t : ι → Finset α) : (Finset ι)ᵒᵖ ⥤ Type max u v where obj ι' := hallMatchingsOn t ι'.unop map {ι' ι''} g f := hallMatchingsOn.restrict t (CategoryTheory.leOfHom g.unop) f instance hallMatchingsOn.finite {ι : Type u} {α : Type v} (t : ι → Finset α) (ι' : Finset ι) : Finite (hallMatchingsOn t ι') := by classical rw [hallMatchingsOn] let g : hallMatchingsOn t ι' → ι' → ι'.biUnion t := by rintro f i refine ⟨f.val i, ?_⟩ rw [mem_biUnion] exact ⟨i, i.property, f.property.2 i⟩ apply Finite.of_injective g intro f f' h ext a rw [Function.funext_iff] at h simpa [g] using h a /-- This is the version of **Hall's Marriage Theorem** in terms of indexed families of finite sets `t : ι → Finset α`. It states that there is a set of distinct representatives if and only if every union of `k` of the sets has at least `k` elements. Recall that `s.biUnion t` is the union of all the sets `t i` for `i ∈ s`. This theorem is bootstrapped from `Finset.all_card_le_biUnion_card_iff_exists_injective'`, which has the additional constraint that `ι` is a `Fintype`. -/ theorem Finset.all_card_le_biUnion_card_iff_exists_injective {ι : Type u} {α : Type v} [DecidableEq α] (t : ι → Finset α) : (∀ s : Finset ι, s.card ≤ (s.biUnion t).card) ↔ ∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x := by constructor · intro h -- Set up the functor haveI : ∀ ι' : (Finset ι)ᵒᵖ, Nonempty ((hallMatchingsFunctor t).obj ι') := fun ι' => hallMatchingsOn.nonempty t h ι'.unop classical haveI : ∀ ι' : (Finset ι)ᵒᵖ, Finite ((hallMatchingsFunctor t).obj ι') := by intro ι' rw [hallMatchingsFunctor] infer_instance -- Apply the compactness argument obtain ⟨u, hu⟩ := nonempty_sections_of_finite_inverse_system (hallMatchingsFunctor t) -- Interpret the resulting section of the inverse limit refine ⟨?_, ?_, ?_⟩ ·-- Build the matching function from the section exact fun i => (u (Opposite.op ({i} : Finset ι))).val ⟨i, by simp only [Opposite.unop_op, mem_singleton]⟩ · -- Show that it is injective intro i i' have subi : ({i} : Finset ι) ⊆ {i, i'} := by simp have subi' : ({i'} : Finset ι) ⊆ {i, i'} := by simp rw [← Finset.le_iff_subset] at subi subi' simp only rw [← hu (CategoryTheory.homOfLE subi).op, ← hu (CategoryTheory.homOfLE subi').op] let uii' := u (Opposite.op ({i, i'} : Finset ι)) exact fun h => Subtype.mk_eq_mk.mp (uii'.property.1 h) · -- Show that it maps each index to the corresponding finite set intro i apply (u (Opposite.op ({i} : Finset ι))).property.2 · -- The reverse direction is a straightforward cardinality argument rintro ⟨f, hf₁, hf₂⟩ s rw [← Finset.card_image_of_injective s hf₁] apply Finset.card_le_card intro rw [Finset.mem_image, Finset.mem_biUnion] rintro ⟨x, hx, rfl⟩ exact ⟨x, hx, hf₂ x⟩ /-- Given a relation such that the image of every singleton set is finite, then the image of every finite set is finite. -/ instance {α : Type u} {β : Type v} [DecidableEq β] (r : α → β → Prop) [∀ a : α, Fintype (Rel.image r {a})] (A : Finset α) : Fintype (Rel.image r A) := by have h : Rel.image r A = (A.biUnion fun a => (Rel.image r {a}).toFinset : Set β) := by ext -- Porting note: added `Set.mem_toFinset` simp [Rel.image, (Set.mem_toFinset)] rw [h] apply FinsetCoe.fintype /-- This is a version of **Hall's Marriage Theorem** in terms of a relation between types `α` and `β` such that `α` is finite and the image of each `x : α` is finite (it suffices for `β` to be finite; see `Fintype.all_card_le_filter_rel_iff_exists_injective`). There is a transversal of the relation (an injective function `α → β` whose graph is a subrelation of the relation) iff every subset of `k` terms of `α` is related to at least `k` terms of `β`. Note: if `[Fintype β]`, then there exist instances for `[∀ (a : α), Fintype (Rel.image r {a})]`. -/ theorem Fintype.all_card_le_rel_image_card_iff_exists_injective {α : Type u} {β : Type v} [DecidableEq β] (r : α → β → Prop) [∀ a : α, Fintype (Rel.image r {a})] : (∀ A : Finset α, A.card ≤ Fintype.card (Rel.image r A)) ↔ ∃ f : α → β, Function.Injective f ∧ ∀ x, r x (f x) := by let r' a := (Rel.image r {a}).toFinset have h : ∀ A : Finset α, Fintype.card (Rel.image r A) = (A.biUnion r').card := by intro A rw [← Set.toFinset_card] apply congr_arg ext b -- Porting note: added `Set.mem_toFinset` simp [Rel.image, (Set.mem_toFinset)] -- Porting note: added `Set.mem_toFinset` have h' : ∀ (f : α → β) (x), r x (f x) ↔ f x ∈ r' x := by simp [Rel.image, (Set.mem_toFinset)] simp only [h, h'] apply Finset.all_card_le_biUnion_card_iff_exists_injective -- TODO: decidable_pred makes Yael sad. When an appropriate decidable_rel-like exists, fix it. /-- This is a version of **Hall's Marriage Theorem** in terms of a relation to a finite type. There is a transversal of the relation (an injective function `α → β` whose graph is a subrelation of the relation) iff every subset of `k` terms of `α` is related to at least `k` terms of `β`. It is like `Fintype.all_card_le_rel_image_card_iff_exists_injective` but uses `Finset.filter` rather than `Rel.image`. -/ theorem Fintype.all_card_le_filter_rel_iff_exists_injective {α : Type u} {β : Type v} [Fintype β] (r : α → β → Prop) [∀ a, DecidablePred (r a)] : (∀ A : Finset α, A.card ≤ (univ.filter fun b : β => ∃ a ∈ A, r a b).card) ↔ ∃ f : α → β, Function.Injective f ∧ ∀ x, r x (f x) := by haveI := Classical.decEq β let r' a := univ.filter fun b => r a b have h : ∀ A : Finset α, (univ.filter fun b : β => ∃ a ∈ A, r a b) = A.biUnion r' := by intro A ext b simp [r'] have h' : ∀ (f : α → β) (x), r x (f x) ↔ f x ∈ r' x := by simp [r'] simp_rw [h, h'] apply Finset.all_card_le_biUnion_card_iff_exists_injective
Combinatorics\Hall\Finite.lean
/- Copyright (c) 2021 Alena Gusakov, Bhavik Mehta, Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Alena Gusakov, Bhavik Mehta, Kyle Miller -/ import Mathlib.Data.Fintype.Basic import Mathlib.Data.Set.Finite /-! # Hall's Marriage Theorem for finite index types This module proves the basic form of Hall's theorem. In contrast to the theorem described in `Combinatorics.Hall.Basic`, this version requires that the indexed family `t : ι → Finset α` have `ι` be finite. The `Combinatorics.Hall.Basic` module applies a compactness argument to this version to remove the `Finite` constraint on `ι`. The modules are split like this since the generalized statement depends on the topology and category theory libraries, but the finite case in this module has few dependencies. A description of this formalization is in [Gusakov2021]. ## Main statements * `Finset.all_card_le_biUnion_card_iff_existsInjective'` is Hall's theorem with a finite index set. This is elsewhere generalized to `Finset.all_card_le_biUnion_card_iff_existsInjective`. ## Tags Hall's Marriage Theorem, indexed families -/ open Finset universe u v namespace HallMarriageTheorem variable {ι : Type u} {α : Type v} [DecidableEq α] {t : ι → Finset α} section Fintype variable [Fintype ι] theorem hall_cond_of_erase {x : ι} (a : α) (ha : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.biUnion t).card) (s' : Finset { x' : ι | x' ≠ x }) : s'.card ≤ (s'.biUnion fun x' => (t x').erase a).card := by haveI := Classical.decEq ι specialize ha (s'.image fun z => z.1) rw [image_nonempty, Finset.card_image_of_injective s' Subtype.coe_injective] at ha by_cases he : s'.Nonempty · have ha' : s'.card < (s'.biUnion fun x => t x).card := by convert ha he fun h => by simpa [← h] using mem_univ x using 2 ext x simp only [mem_image, mem_biUnion, exists_prop, SetCoe.exists, exists_and_right, exists_eq_right, Subtype.coe_mk] rw [← erase_biUnion] by_cases hb : a ∈ s'.biUnion fun x => t x · rw [card_erase_of_mem hb] exact Nat.le_sub_one_of_lt ha' · rw [erase_eq_of_not_mem hb] exact Nat.le_of_lt ha' · rw [nonempty_iff_ne_empty, not_not] at he subst s' simp /-- First case of the inductive step: assuming that `∀ (s : Finset ι), s.Nonempty → s ≠ univ → s.card < (s.biUnion t).card` and that the statement of **Hall's Marriage Theorem** is true for all `ι'` of cardinality ≤ `n`, then it is true for `ι` of cardinality `n + 1`. -/ theorem hall_hard_inductive_step_A {n : ℕ} (hn : Fintype.card ι = n + 1) (ht : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) (ih : ∀ {ι' : Type u} [Fintype ι'] (t' : ι' → Finset α), Fintype.card ι' ≤ n → (∀ s' : Finset ι', s'.card ≤ (s'.biUnion t').card) → ∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x) (ha : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.biUnion t).card) : ∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x := by haveI : Nonempty ι := Fintype.card_pos_iff.mp (hn.symm ▸ Nat.succ_pos _) haveI := Classical.decEq ι -- Choose an arbitrary element `x : ι` and `y : t x`. let x := Classical.arbitrary ι have tx_ne : (t x).Nonempty := by rw [← Finset.card_pos] calc 0 < 1 := Nat.one_pos _ ≤ (Finset.biUnion {x} t).card := ht {x} _ = (t x).card := by rw [Finset.singleton_biUnion] choose y hy using tx_ne -- Restrict to everything except `x` and `y`. let ι' := { x' : ι | x' ≠ x } let t' : ι' → Finset α := fun x' => (t x').erase y have card_ι' : Fintype.card ι' = n := calc Fintype.card ι' = Fintype.card ι - 1 := Set.card_ne_eq _ _ = n := by rw [hn, Nat.add_succ_sub_one, add_zero] rcases ih t' card_ι'.le (hall_cond_of_erase y ha) with ⟨f', hfinj, hfr⟩ -- Extend the resulting function. refine ⟨fun z => if h : z = x then y else f' ⟨z, h⟩, ?_, ?_⟩ · rintro z₁ z₂ have key : ∀ {x}, y ≠ f' x := by intro x h simpa [t', ← h] using hfr x by_cases h₁ : z₁ = x <;> by_cases h₂ : z₂ = x <;> simp [h₁, h₂, hfinj.eq_iff, key, key.symm] · intro z simp only [ne_eq, Set.mem_setOf_eq] split_ifs with hz · rwa [hz] · specialize hfr ⟨z, hz⟩ rw [mem_erase] at hfr exact hfr.2 theorem hall_cond_of_restrict {ι : Type u} {t : ι → Finset α} {s : Finset ι} (ht : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) (s' : Finset (s : Set ι)) : s'.card ≤ (s'.biUnion fun a' => t a').card := by classical rw [← card_image_of_injective s' Subtype.coe_injective] convert ht (s'.image fun z => z.1) using 1 apply congr_arg ext y simp theorem hall_cond_of_compl {ι : Type u} {t : ι → Finset α} {s : Finset ι} (hus : s.card = (s.biUnion t).card) (ht : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) (s' : Finset (sᶜ : Set ι)) : s'.card ≤ (s'.biUnion fun x' => t x' \ s.biUnion t).card := by haveI := Classical.decEq ι have disj : Disjoint s (s'.image fun z => z.1) := by simp only [disjoint_left, not_exists, mem_image, exists_prop, SetCoe.exists, exists_and_right, exists_eq_right, Subtype.coe_mk] intro x hx hc _ exact absurd hx hc have : s'.card = (s ∪ s'.image fun z => z.1).card - s.card := by simp [disj, card_image_of_injective _ Subtype.coe_injective, Nat.add_sub_cancel_left] rw [this, hus] refine (Nat.sub_le_sub_right (ht _) _).trans ?_ rw [← card_sdiff] · refine (card_le_card ?_).trans le_rfl intro t simp only [mem_biUnion, mem_sdiff, not_exists, mem_image, and_imp, mem_union, exists_and_right, exists_imp] rintro x (hx | ⟨x', hx', rfl⟩) rat hs · exact False.elim <| (hs x) <| And.intro hx rat · use x', hx', rat, hs · apply biUnion_subset_biUnion_of_subset_left apply subset_union_left /-- Second case of the inductive step: assuming that `∃ (s : Finset ι), s ≠ univ → s.card = (s.biUnion t).card` and that the statement of **Hall's Marriage Theorem** is true for all `ι'` of cardinality ≤ `n`, then it is true for `ι` of cardinality `n + 1`. -/ theorem hall_hard_inductive_step_B {n : ℕ} (hn : Fintype.card ι = n + 1) (ht : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) (ih : ∀ {ι' : Type u} [Fintype ι'] (t' : ι' → Finset α), Fintype.card ι' ≤ n → (∀ s' : Finset ι', s'.card ≤ (s'.biUnion t').card) → ∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x) (s : Finset ι) (hs : s.Nonempty) (hns : s ≠ univ) (hus : s.card = (s.biUnion t).card) : ∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x := by haveI := Classical.decEq ι -- Restrict to `s` rw [Nat.add_one] at hn have card_ι'_le : Fintype.card s ≤ n := by apply Nat.le_of_lt_succ calc Fintype.card s = s.card := Fintype.card_coe _ _ < Fintype.card ι := (card_lt_iff_ne_univ _).mpr hns _ = n.succ := hn let t' : s → Finset α := fun x' => t x' rcases ih t' card_ι'_le (hall_cond_of_restrict ht) with ⟨f', hf', hsf'⟩ -- Restrict to `sᶜ` in the domain and `(s.biUnion t)ᶜ` in the codomain. set ι'' := (s : Set ι)ᶜ let t'' : ι'' → Finset α := fun a'' => t a'' \ s.biUnion t have card_ι''_le : Fintype.card ι'' ≤ n := by simp_rw [ι'', ← Nat.lt_succ_iff, ← hn, ← Finset.coe_compl, coe_sort_coe] rwa [Fintype.card_coe, card_compl_lt_iff_nonempty] rcases ih t'' card_ι''_le (hall_cond_of_compl hus ht) with ⟨f'', hf'', hsf''⟩ -- Put them together have f'_mem_biUnion : ∀ (x') (hx' : x' ∈ s), f' ⟨x', hx'⟩ ∈ s.biUnion t := by intro x' hx' rw [mem_biUnion] exact ⟨x', hx', hsf' _⟩ have f''_not_mem_biUnion : ∀ (x'') (hx'' : ¬x'' ∈ s), ¬f'' ⟨x'', hx''⟩ ∈ s.biUnion t := by intro x'' hx'' have h := hsf'' ⟨x'', hx''⟩ rw [mem_sdiff] at h exact h.2 have im_disj : ∀ (x' x'' : ι) (hx' : x' ∈ s) (hx'' : ¬x'' ∈ s), f' ⟨x', hx'⟩ ≠ f'' ⟨x'', hx''⟩ := by intro x x' hx' hx'' h apply f''_not_mem_biUnion x' hx'' rw [← h] apply f'_mem_biUnion x refine ⟨fun x => if h : x ∈ s then f' ⟨x, h⟩ else f'' ⟨x, h⟩, ?_, ?_⟩ · refine hf'.dite _ hf'' (@fun x x' => im_disj x x' _ _) · intro x simp only [of_eq_true] split_ifs with h · exact hsf' ⟨x, h⟩ · exact sdiff_subset (hsf'' ⟨x, h⟩) end Fintype variable [Finite ι] /-- Here we combine the two inductive steps into a full strong induction proof, completing the proof the harder direction of **Hall's Marriage Theorem**. -/ theorem hall_hard_inductive (ht : ∀ s : Finset ι, s.card ≤ (s.biUnion t).card) : ∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x := by cases nonempty_fintype ι induction' hn : Fintype.card ι using Nat.strong_induction_on with n ih generalizing ι rcases n with (_ | n) · rw [Fintype.card_eq_zero_iff] at hn exact ⟨isEmptyElim, isEmptyElim, isEmptyElim⟩ · have ih' : ∀ (ι' : Type u) [Fintype ι'] (t' : ι' → Finset α), Fintype.card ι' ≤ n → (∀ s' : Finset ι', s'.card ≤ (s'.biUnion t').card) → ∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x := by intro ι' _ _ hι' ht' exact ih _ (Nat.lt_succ_of_le hι') ht' _ rfl by_cases h : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.biUnion t).card · refine hall_hard_inductive_step_A hn ht (@fun ι' => ih' ι') h · push_neg at h rcases h with ⟨s, sne, snu, sle⟩ exact hall_hard_inductive_step_B hn ht (@fun ι' => ih' ι') s sne snu (Nat.le_antisymm (ht _) sle) end HallMarriageTheorem /-- This is the version of **Hall's Marriage Theorem** in terms of indexed families of finite sets `t : ι → Finset α` with `ι` finite. It states that there is a set of distinct representatives if and only if every union of `k` of the sets has at least `k` elements. See `Finset.all_card_le_biUnion_card_iff_exists_injective` for a version where the `Finite ι` constraint is removed. -/ theorem Finset.all_card_le_biUnion_card_iff_existsInjective' {ι α : Type*} [Finite ι] [DecidableEq α] (t : ι → Finset α) : (∀ s : Finset ι, s.card ≤ (s.biUnion t).card) ↔ ∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x := by constructor · exact HallMarriageTheorem.hall_hard_inductive · rintro ⟨f, hf₁, hf₂⟩ s rw [← card_image_of_injective s hf₁] apply card_le_card intro rw [mem_image, mem_biUnion] rintro ⟨x, hx, rfl⟩ exact ⟨x, hx, hf₂ x⟩
Combinatorics\Optimization\ValuedCSP.lean
/- Copyright (c) 2023 Martin Dvorak. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Martin Dvorak -/ import Mathlib.Algebra.BigOperators.Fin import Mathlib.Algebra.Order.BigOperators.Group.Multiset import Mathlib.Data.Fin.VecNotation import Mathlib.Data.Matrix.Notation /-! # General-Valued Constraint Satisfaction Problems General-Valued CSP is a very broad class of problems in discrete optimization. General-Valued CSP subsumes Min-Cost-Hom (including 3-SAT for example) and Finite-Valued CSP. ## Main definitions * `ValuedCSP`: A VCSP template; fixes a domain, a codomain, and allowed cost functions. * `ValuedCSP.Term`: One summand in a VCSP instance; calls a concrete function from given template. * `ValuedCSP.Term.evalSolution`: An evaluation of the VCSP term for given solution. * `ValuedCSP.Instance`: An instance of a VCSP problem over given template. * `ValuedCSP.Instance.evalSolution`: An evaluation of the VCSP instance for given solution. * `ValuedCSP.Instance.IsOptimumSolution`: Is given solution a minimum of the VCSP instance? * `Function.HasMaxCutProperty`: Can given binary function express the Max-Cut problem? * `FractionalOperation`: Multiset of operations on given domain of the same arity. * `FractionalOperation.IsSymmetricFractionalPolymorphismFor`: Is given fractional operation a symmetric fractional polymorphism for given VCSP template? ## References * [D. A. Cohen, M. C. Cooper, P. Creed, P. G. Jeavons, S. Živný, *An Algebraic Theory of Complexity for Discrete Optimisation*][cohen2012] -/ /-- A template for a valued CSP problem over a domain `D` with costs in `C`. Regarding `C` we want to support `Bool`, `Nat`, `ENat`, `Int`, `Rat`, `NNRat`, `Real`, `NNReal`, `EReal`, `ENNReal`, and tuples made of any of those types. -/ @[nolint unusedArguments] abbrev ValuedCSP (D C : Type*) [OrderedAddCommMonoid C] := Set (Σ (n : ℕ), (Fin n → D) → C) -- Cost functions `D^n → C` for any `n` variable {D C : Type*} [OrderedAddCommMonoid C] /-- A term in a valued CSP instance over the template `Γ`. -/ structure ValuedCSP.Term (Γ : ValuedCSP D C) (ι : Type*) where /-- Arity of the function -/ n : ℕ /-- Which cost function is instantiated -/ f : (Fin n → D) → C /-- The cost function comes from the template -/ inΓ : ⟨n, f⟩ ∈ Γ /-- Which variables are plugged as arguments to the cost function -/ app : Fin n → ι /-- Evaluation of a `Γ` term `t` for given solution `x`. -/ def ValuedCSP.Term.evalSolution {Γ : ValuedCSP D C} {ι : Type*} (t : Γ.Term ι) (x : ι → D) : C := t.f (x ∘ t.app) /-- A valued CSP instance over the template `Γ` with variables indexed by `ι`. -/ abbrev ValuedCSP.Instance (Γ : ValuedCSP D C) (ι : Type*) : Type _ := Multiset (Γ.Term ι) /-- Evaluation of a `Γ` instance `I` for given solution `x`. -/ def ValuedCSP.Instance.evalSolution {Γ : ValuedCSP D C} {ι : Type*} (I : Γ.Instance ι) (x : ι → D) : C := (I.map (·.evalSolution x)).sum /-- Condition for `x` being an optimum solution (min) to given `Γ` instance `I`. -/ def ValuedCSP.Instance.IsOptimumSolution {Γ : ValuedCSP D C} {ι : Type*} (I : Γ.Instance ι) (x : ι → D) : Prop := ∀ y : ι → D, I.evalSolution x ≤ I.evalSolution y /-- Function `f` has Max-Cut property at labels `a` and `b` when `argmin f` is exactly `{ ![a, b] , ![b, a] }`. -/ def Function.HasMaxCutPropertyAt (f : (Fin 2 → D) → C) (a b : D) : Prop := f ![a, b] = f ![b, a] ∧ ∀ x y : D, f ![a, b] ≤ f ![x, y] ∧ (f ![a, b] = f ![x, y] → a = x ∧ b = y ∨ a = y ∧ b = x) /-- Function `f` has Max-Cut property at some two non-identical labels. -/ def Function.HasMaxCutProperty (f : (Fin 2 → D) → C) : Prop := ∃ a b : D, a ≠ b ∧ f.HasMaxCutPropertyAt a b /-- Fractional operation is a finite unordered collection of D^m → D possibly with duplicates. -/ abbrev FractionalOperation (D : Type*) (m : ℕ) : Type _ := Multiset ((Fin m → D) → D) variable {m : ℕ} /-- Arity of the "output" of the fractional operation. -/ @[simp] def FractionalOperation.size (ω : FractionalOperation D m) : ℕ := Multiset.card.toFun ω /-- Fractional operation is valid iff nonempty. -/ def FractionalOperation.IsValid (ω : FractionalOperation D m) : Prop := ω ≠ ∅ /-- Valid fractional operation contains an operation. -/ lemma FractionalOperation.IsValid.contains {ω : FractionalOperation D m} (valid : ω.IsValid) : ∃ g : (Fin m → D) → D, g ∈ ω := Multiset.exists_mem_of_ne_zero valid /-- Fractional operation applied to a transposed table of values. -/ def FractionalOperation.tt {ι : Type*} (ω : FractionalOperation D m) (x : Fin m → ι → D) : Multiset (ι → D) := ω.map (fun (g : (Fin m → D) → D) (i : ι) => g ((Function.swap x) i)) /-- Cost function admits given fractional operation, i.e., `ω` improves `f` in the `≤` sense. -/ def Function.AdmitsFractional {n : ℕ} (f : (Fin n → D) → C) (ω : FractionalOperation D m) : Prop := ∀ x : (Fin m → (Fin n → D)), m • ((ω.tt x).map f).sum ≤ ω.size • Finset.univ.sum (fun i => f (x i)) /-- Fractional operation is a fractional polymorphism for given VCSP template. -/ def FractionalOperation.IsFractionalPolymorphismFor (ω : FractionalOperation D m) (Γ : ValuedCSP D C) : Prop := ∀ f ∈ Γ, f.snd.AdmitsFractional ω /-- Fractional operation is symmetric. -/ def FractionalOperation.IsSymmetric (ω : FractionalOperation D m) : Prop := ∀ x y : (Fin m → D), List.Perm (List.ofFn x) (List.ofFn y) → ∀ g ∈ ω, g x = g y /-- Fractional operation is a symmetric fractional polymorphism for given VCSP template. -/ def FractionalOperation.IsSymmetricFractionalPolymorphismFor (ω : FractionalOperation D m) (Γ : ValuedCSP D C) : Prop := ω.IsFractionalPolymorphismFor Γ ∧ ω.IsSymmetric variable {C : Type*} [OrderedCancelAddCommMonoid C] lemma Function.HasMaxCutPropertyAt.rows_lt_aux {f : (Fin 2 → D) → C} {a b : D} (mcf : f.HasMaxCutPropertyAt a b) (hab : a ≠ b) {ω : FractionalOperation D 2} (symmega : ω.IsSymmetric) {r : Fin 2 → D} (rin : r ∈ (ω.tt ![![a, b], ![b, a]])) : f ![a, b] < f r := by rw [FractionalOperation.tt, Multiset.mem_map] at rin rw [show r = ![r 0, r 1] from List.ofFn_inj.mp rfl] apply lt_of_le_of_ne (mcf.right (r 0) (r 1)).left intro equ have asymm : r 0 ≠ r 1 := by rcases (mcf.right (r 0) (r 1)).right equ with ⟨ha0, hb1⟩ | ⟨ha1, hb0⟩ · rw [ha0, hb1] at hab exact hab · rw [ha1, hb0] at hab exact hab.symm apply asymm obtain ⟨o, in_omega, rfl⟩ := rin show o (fun j => ![![a, b], ![b, a]] j 0) = o (fun j => ![![a, b], ![b, a]] j 1) convert symmega ![a, b] ![b, a] (List.Perm.swap b a []) o in_omega using 2 <;> simp [Matrix.const_fin1_eq] lemma Function.HasMaxCutProperty.forbids_commutativeFractionalPolymorphism {f : (Fin 2 → D) → C} (mcf : f.HasMaxCutProperty) {ω : FractionalOperation D 2} (valid : ω.IsValid) (symmega : ω.IsSymmetric) : ¬ f.AdmitsFractional ω := by intro contr obtain ⟨a, b, hab, mcfab⟩ := mcf specialize contr ![![a, b], ![b, a]] rw [Fin.sum_univ_two', ← mcfab.left, ← two_nsmul] at contr have sharp : 2 • ((ω.tt ![![a, b], ![b, a]]).map (fun _ => f ![a, b])).sum < 2 • ((ω.tt ![![a, b], ![b, a]]).map (fun r => f r)).sum := by have half_sharp : ((ω.tt ![![a, b], ![b, a]]).map (fun _ => f ![a, b])).sum < ((ω.tt ![![a, b], ![b, a]]).map (fun r => f r)).sum := by apply Multiset.sum_lt_sum · intro r rin exact le_of_lt (mcfab.rows_lt_aux hab symmega rin) · obtain ⟨g, _⟩ := valid.contains have : (fun i => g ((Function.swap ![![a, b], ![b, a]]) i)) ∈ ω.tt ![![a, b], ![b, a]] := by simp only [FractionalOperation.tt, Multiset.mem_map] use g exact ⟨_, this, mcfab.rows_lt_aux hab symmega this⟩ rw [two_nsmul, two_nsmul] exact add_lt_add half_sharp half_sharp have impos : 2 • (ω.map (fun _ => f ![a, b])).sum < ω.size • 2 • f ![a, b] := by convert lt_of_lt_of_le sharp contr simp [FractionalOperation.tt, Multiset.map_map] have rhs_swap : ω.size • 2 • f ![a, b] = 2 • ω.size • f ![a, b] := nsmul_left_comm .. have distrib : (ω.map (fun _ => f ![a, b])).sum = ω.size • f ![a, b] := by simp rw [rhs_swap, distrib] at impos exact ne_of_lt impos rfl
Combinatorics\Quiver\Arborescence.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import Mathlib.Combinatorics.Quiver.Path import Mathlib.Combinatorics.Quiver.Subquiver import Mathlib.Order.WellFounded /-! # Arborescences A quiver `V` is an arborescence (or directed rooted tree) when we have a root vertex `root : V` such that for every `b : V` there is a unique path from `root` to `b`. ## Main definitions - `Quiver.Arborescence V`: a typeclass asserting that `V` is an arborescence - `arborescenceMk`: a convenient way of proving that a quiver is an arborescence - `RootedConnected r`: a typeclass asserting that there is at least one path from `r` to `b` for every `b`. - `geodesicSubtree r`: given `[RootedConnected r]`, this is a subquiver of `V` which contains just enough edges to include a shortest path from `r` to `b` for every `b`. - `geodesicArborescence : Arborescence (geodesicSubtree r)`: an instance saying that the geodesic subtree is an arborescence. This proves the directed analogue of 'every connected graph has a spanning tree'. This proof avoids the use of Zorn's lemma. -/ open Opposite universe v u namespace Quiver /-- A quiver is an arborescence when there is a unique path from the default vertex to every other vertex. -/ class Arborescence (V : Type u) [Quiver.{v} V] : Type max u v where /-- The root of the arborescence. -/ root : V /-- There is a unique path from the root to any other vertex. -/ uniquePath : ∀ b : V, Unique (Path root b) /-- The root of an arborescence. -/ def root (V : Type u) [Quiver V] [Arborescence V] : V := Arborescence.root instance {V : Type u} [Quiver V] [Arborescence V] (b : V) : Unique (Path (root V) b) := Arborescence.uniquePath b /-- To show that `[Quiver V]` is an arborescence with root `r : V`, it suffices to - provide a height function `V → ℕ` such that every arrow goes from a lower vertex to a higher vertex, - show that every vertex has at most one arrow to it, and - show that every vertex other than `r` has an arrow to it. -/ noncomputable def arborescenceMk {V : Type u} [Quiver V] (r : V) (height : V → ℕ) (height_lt : ∀ ⦃a b⦄, (a ⟶ b) → height a < height b) (unique_arrow : ∀ ⦃a b c : V⦄ (e : a ⟶ c) (f : b ⟶ c), a = b ∧ HEq e f) (root_or_arrow : ∀ b, b = r ∨ ∃ a, Nonempty (a ⟶ b)) : Arborescence V where root := r uniquePath b := ⟨Classical.inhabited_of_nonempty (by rcases show ∃ n, height b < n from ⟨_, Nat.lt.base _⟩ with ⟨n, hn⟩ induction' n with n ih generalizing b · exact False.elim (Nat.not_lt_zero _ hn) rcases root_or_arrow b with (⟨⟨⟩⟩ | ⟨a, ⟨e⟩⟩) · exact ⟨Path.nil⟩ · rcases ih a (lt_of_lt_of_le (height_lt e) (Nat.lt_succ_iff.mp hn)) with ⟨p⟩ exact ⟨p.cons e⟩), by have height_le : ∀ {a b}, Path a b → height a ≤ height b := by intro a b p induction' p with b c _ e ih · rfl · exact le_of_lt (lt_of_le_of_lt ih (height_lt e)) suffices ∀ p q : Path r b, p = q by intro p apply this intro p q induction' p with a c p e ih <;> cases' q with b _ q f · rfl · exact False.elim (lt_irrefl _ (lt_of_le_of_lt (height_le q) (height_lt f))) · exact False.elim (lt_irrefl _ (lt_of_le_of_lt (height_le p) (height_lt e))) · rcases unique_arrow e f with ⟨⟨⟩, ⟨⟩⟩ rw [ih]⟩ /-- `RootedConnected r` means that there is a path from `r` to any other vertex. -/ class RootedConnected {V : Type u} [Quiver V] (r : V) : Prop where nonempty_path : ∀ b : V, Nonempty (Path r b) attribute [instance] RootedConnected.nonempty_path section GeodesicSubtree variable {V : Type u} [Quiver.{v + 1} V] (r : V) [RootedConnected r] /-- A path from `r` of minimal length. -/ noncomputable def shortestPath (b : V) : Path r b := WellFounded.min (measure Path.length).wf Set.univ Set.univ_nonempty /-- The length of a path is at least the length of the shortest path -/ theorem shortest_path_spec {a : V} (p : Path r a) : (shortestPath r a).length ≤ p.length := not_lt.mp (WellFounded.not_lt_min (measure _).wf Set.univ _ trivial) /-- A subquiver which by construction is an arborescence. -/ def geodesicSubtree : WideSubquiver V := fun a b => { e | ∃ p : Path r a, shortestPath r b = p.cons e } noncomputable instance geodesicArborescence : Arborescence (geodesicSubtree r) := arborescenceMk r (fun a => (shortestPath r a).length) (by rintro a b ⟨e, p, h⟩ simp_rw [h, Path.length_cons, Nat.lt_succ_iff] apply shortest_path_spec) (by rintro a b c ⟨e, p, h⟩ ⟨f, q, j⟩ cases h.symm.trans j constructor <;> rfl) (by intro b rcases hp : shortestPath r b with (_ | ⟨p, e⟩) · exact Or.inl rfl · exact Or.inr ⟨_, ⟨⟨e, p, hp⟩⟩⟩) end GeodesicSubtree end Quiver
Combinatorics\Quiver\Basic.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn, Scott Morrison -/ import Mathlib.Data.Opposite import Mathlib.Tactic.Cases /-! # Quivers This module defines quivers. A quiver on a type `V` of vertices assigns to every pair `a b : V` of vertices a type `a ⟶ b` of arrows from `a` to `b`. This is a very permissive notion of directed graph. ## Implementation notes Currently `Quiver` is defined with `Hom : V → V → Sort v`. This is different from the category theory setup, where we insist that morphisms live in some `Type`. There's some balance here: it's nice to allow `Prop` to ensure there are no multiple arrows, but it is also results in error-prone universe signatures when constraints require a `Type`. -/ open Opposite -- We use the same universe order as in category theory. -- See note [CategoryTheory universes] universe v v₁ v₂ u u₁ u₂ /-- A quiver `G` on a type `V` of vertices assigns to every pair `a b : V` of vertices a type `a ⟶ b` of arrows from `a` to `b`. For graphs with no repeated edges, one can use `Quiver.{0} V`, which ensures `a ⟶ b : Prop`. For multigraphs, one can use `Quiver.{v+1} V`, which ensures `a ⟶ b : Type v`. Because `Category` will later extend this class, we call the field `Hom`. Except when constructing instances, you should rarely see this, and use the `⟶` notation instead. -/ class Quiver (V : Type u) where /-- The type of edges/arrows/morphisms between a given source and target. -/ Hom : V → V → Sort v /-- Notation for the type of edges/arrows/morphisms between a given source and target in a quiver or category. -/ infixr:10 " ⟶ " => Quiver.Hom /-- A morphism of quivers. As we will later have categorical functors extend this structure, we call it a `Prefunctor`. -/ structure Prefunctor (V : Type u₁) [Quiver.{v₁} V] (W : Type u₂) [Quiver.{v₂} W] where /-- The action of a (pre)functor on vertices/objects. -/ obj : V → W /-- The action of a (pre)functor on edges/arrows/morphisms. -/ map : ∀ {X Y : V}, (X ⟶ Y) → (obj X ⟶ obj Y) namespace Prefunctor -- Porting note: added during port. -- These lemmas can not be `@[simp]` because after `whnfR` they have a variable on the LHS. -- Nevertheless they are sometimes useful when building functors. lemma mk_obj {V W : Type*} [Quiver V] [Quiver W] {obj : V → W} {map} {X : V} : (Prefunctor.mk obj map).obj X = obj X := rfl lemma mk_map {V W : Type*} [Quiver V] [Quiver W] {obj : V → W} {map} {X Y : V} {f : X ⟶ Y} : (Prefunctor.mk obj map).map f = map f := rfl @[ext (iff := false)] theorem ext {V : Type u} [Quiver.{v₁} V] {W : Type u₂} [Quiver.{v₂} W] {F G : Prefunctor V W} (h_obj : ∀ X, F.obj X = G.obj X) (h_map : ∀ (X Y : V) (f : X ⟶ Y), F.map f = Eq.recOn (h_obj Y).symm (Eq.recOn (h_obj X).symm (G.map f))) : F = G := by cases' F with F_obj _ cases' G with G_obj _ obtain rfl : F_obj = G_obj := by ext X apply h_obj congr funext X Y f simpa using h_map X Y f /-- The identity morphism between quivers. -/ @[simps] def id (V : Type*) [Quiver V] : Prefunctor V V where obj := fun X => X map f := f instance (V : Type*) [Quiver V] : Inhabited (Prefunctor V V) := ⟨id V⟩ /-- Composition of morphisms between quivers. -/ @[simps] def comp {U : Type*} [Quiver U] {V : Type*} [Quiver V] {W : Type*} [Quiver W] (F : Prefunctor U V) (G : Prefunctor V W) : Prefunctor U W where obj X := G.obj (F.obj X) map f := G.map (F.map f) @[simp] theorem comp_id {U V : Type*} [Quiver U] [Quiver V] (F : Prefunctor U V) : F.comp (id _) = F := rfl @[simp] theorem id_comp {U V : Type*} [Quiver U] [Quiver V] (F : Prefunctor U V) : (id _).comp F = F := rfl @[simp] theorem comp_assoc {U V W Z : Type*} [Quiver U] [Quiver V] [Quiver W] [Quiver Z] (F : Prefunctor U V) (G : Prefunctor V W) (H : Prefunctor W Z) : (F.comp G).comp H = F.comp (G.comp H) := rfl /-- Notation for a prefunctor between quivers. -/ infixl:50 " ⥤q " => Prefunctor /-- Notation for composition of prefunctors. -/ infixl:60 " ⋙q " => Prefunctor.comp /-- Notation for the identity prefunctor on a quiver. -/ notation "𝟭q" => id theorem congr_map {U V : Type*} [Quiver U] [Quiver V] (F : U ⥤q V) {X Y : U} {f g : X ⟶ Y} (h : f = g) : F.map f = F.map g := by rw [h] end Prefunctor namespace Quiver /-- `Vᵒᵖ` reverses the direction of all arrows of `V`. -/ instance opposite {V} [Quiver V] : Quiver Vᵒᵖ := ⟨fun a b => (unop b ⟶ unop a)ᵒᵖ⟩ /-- The opposite of an arrow in `V`. -/ def Hom.op {V} [Quiver V] {X Y : V} (f : X ⟶ Y) : op Y ⟶ op X := ⟨f⟩ /-- Given an arrow in `Vᵒᵖ`, we can take the "unopposite" back in `V`. -/ def Hom.unop {V} [Quiver V] {X Y : Vᵒᵖ} (f : X ⟶ Y) : unop Y ⟶ unop X := Opposite.unop f /-- A type synonym for a quiver with no arrows. -/ -- Porting note(#5171): this linter isn't ported yet. -- @[nolint has_nonempty_instance] def Empty (V : Type u) : Type u := V instance emptyQuiver (V : Type u) : Quiver.{u} (Empty V) := ⟨fun _ _ => PEmpty⟩ @[simp] theorem empty_arrow {V : Type u} (a b : Empty V) : (a ⟶ b) = PEmpty := rfl /-- A quiver is thin if it has no parallel arrows. -/ abbrev IsThin (V : Type u) [Quiver V] : Prop := ∀ a b : V, Subsingleton (a ⟶ b) end Quiver
Combinatorics\Quiver\Cast.lean
/- Copyright (c) 2022 Antoine Labelle, Rémi Bottinelli. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Antoine Labelle, Rémi Bottinelli -/ import Mathlib.Combinatorics.Quiver.Basic import Mathlib.Combinatorics.Quiver.Path /-! # Rewriting arrows and paths along vertex equalities This files defines `Hom.cast` and `Path.cast` (and associated lemmas) in order to allow rewriting arrows and paths along equalities of their endpoints. -/ universe v v₁ v₂ u u₁ u₂ variable {U : Type*} [Quiver.{u + 1} U] namespace Quiver /-! ### Rewriting arrows along equalities of vertices -/ /-- Change the endpoints of an arrow using equalities. -/ def Hom.cast {u v u' v' : U} (hu : u = u') (hv : v = v') (e : u ⟶ v) : u' ⟶ v' := Eq.ndrec (motive := (· ⟶ v')) (Eq.ndrec e hv) hu theorem Hom.cast_eq_cast {u v u' v' : U} (hu : u = u') (hv : v = v') (e : u ⟶ v) : e.cast hu hv = _root_.cast (by {rw [hu, hv]}) e := by subst_vars rfl @[simp] theorem Hom.cast_rfl_rfl {u v : U} (e : u ⟶ v) : e.cast rfl rfl = e := rfl @[simp] theorem Hom.cast_cast {u v u' v' u'' v'' : U} (e : u ⟶ v) (hu : u = u') (hv : v = v') (hu' : u' = u'') (hv' : v' = v'') : (e.cast hu hv).cast hu' hv' = e.cast (hu.trans hu') (hv.trans hv') := by subst_vars rfl theorem Hom.cast_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (e : u ⟶ v) : HEq (e.cast hu hv) e := by subst_vars rfl theorem Hom.cast_eq_iff_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (e : u ⟶ v) (e' : u' ⟶ v') : e.cast hu hv = e' ↔ HEq e e' := by rw [Hom.cast_eq_cast] exact _root_.cast_eq_iff_heq theorem Hom.eq_cast_iff_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (e : u ⟶ v) (e' : u' ⟶ v') : e' = e.cast hu hv ↔ HEq e' e := by rw [eq_comm, Hom.cast_eq_iff_heq] exact ⟨HEq.symm, HEq.symm⟩ /-! ### Rewriting paths along equalities of vertices -/ open Path /-- Change the endpoints of a path using equalities. -/ def Path.cast {u v u' v' : U} (hu : u = u') (hv : v = v') (p : Path u v) : Path u' v' := Eq.ndrec (motive := (Path · v')) (Eq.ndrec p hv) hu theorem Path.cast_eq_cast {u v u' v' : U} (hu : u = u') (hv : v = v') (p : Path u v) : p.cast hu hv = _root_.cast (by rw [hu, hv]) p := by subst_vars rfl @[simp] theorem Path.cast_rfl_rfl {u v : U} (p : Path u v) : p.cast rfl rfl = p := rfl @[simp] theorem Path.cast_cast {u v u' v' u'' v'' : U} (p : Path u v) (hu : u = u') (hv : v = v') (hu' : u' = u'') (hv' : v' = v'') : (p.cast hu hv).cast hu' hv' = p.cast (hu.trans hu') (hv.trans hv') := by subst_vars rfl @[simp] theorem Path.cast_nil {u u' : U} (hu : u = u') : (Path.nil : Path u u).cast hu hu = Path.nil := by subst_vars rfl theorem Path.cast_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (p : Path u v) : HEq (p.cast hu hv) p := by rw [Path.cast_eq_cast] exact _root_.cast_heq _ _ theorem Path.cast_eq_iff_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (p : Path u v) (p' : Path u' v') : p.cast hu hv = p' ↔ HEq p p' := by rw [Path.cast_eq_cast] exact _root_.cast_eq_iff_heq theorem Path.eq_cast_iff_heq {u v u' v' : U} (hu : u = u') (hv : v = v') (p : Path u v) (p' : Path u' v') : p' = p.cast hu hv ↔ HEq p' p := ⟨fun h => ((p.cast_eq_iff_heq hu hv p').1 h.symm).symm, fun h => ((p.cast_eq_iff_heq hu hv p').2 h.symm).symm⟩ theorem Path.cast_cons {u v w u' w' : U} (p : Path u v) (e : v ⟶ w) (hu : u = u') (hw : w = w') : (p.cons e).cast hu hw = (p.cast hu rfl).cons (e.cast rfl hw) := by subst_vars rfl theorem cast_eq_of_cons_eq_cons {u v v' w : U} {p : Path u v} {p' : Path u v'} {e : v ⟶ w} {e' : v' ⟶ w} (h : p.cons e = p'.cons e') : p.cast rfl (obj_eq_of_cons_eq_cons h) = p' := by rw [Path.cast_eq_iff_heq] exact heq_of_cons_eq_cons h theorem hom_cast_eq_of_cons_eq_cons {u v v' w : U} {p : Path u v} {p' : Path u v'} {e : v ⟶ w} {e' : v' ⟶ w} (h : p.cons e = p'.cons e') : e.cast (obj_eq_of_cons_eq_cons h) rfl = e' := by rw [Hom.cast_eq_iff_heq] exact hom_heq_of_cons_eq_cons h theorem eq_nil_of_length_zero {u v : U} (p : Path u v) (hzero : p.length = 0) : p.cast (eq_of_length_zero p hzero) rfl = Path.nil := by cases p · rfl · simp only [Nat.succ_ne_zero, length_cons] at hzero end Quiver
Combinatorics\Quiver\ConnectedComponent.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import Mathlib.Combinatorics.Quiver.Subquiver import Mathlib.Combinatorics.Quiver.Path import Mathlib.Combinatorics.Quiver.Symmetric /-! ## Weakly connected components For a quiver `V`, define the type `WeaklyConnectedComponent V` as the quotient of `V` by the relation which identifies `a` with `b` if there is a path from `a` to `b` in `Symmetrify V`. (These zigzags can be seen as a proof-relevant analogue of `EqvGen`.) Strongly connected components have not yet been defined. -/ universe v u namespace Quiver variable (V : Type*) [Quiver.{u+1} V] /-- Two vertices are related in the zigzag setoid if there is a zigzag of arrows from one to the other. -/ def zigzagSetoid : Setoid V := ⟨fun a b ↦ Nonempty (@Path (Symmetrify V) _ a b), fun _ ↦ ⟨Path.nil⟩, fun ⟨p⟩ ↦ ⟨p.reverse⟩, fun ⟨p⟩ ⟨q⟩ ↦ ⟨p.comp q⟩⟩ /-- The type of weakly connected components of a directed graph. Two vertices are in the same weakly connected component if there is a zigzag of arrows from one to the other. -/ def WeaklyConnectedComponent : Type _ := Quotient (zigzagSetoid V) namespace WeaklyConnectedComponent variable {V} /-- The weakly connected component corresponding to a vertex. -/ protected def mk : V → WeaklyConnectedComponent V := @Quotient.mk' _ (zigzagSetoid V) instance : CoeTC V (WeaklyConnectedComponent V) := ⟨WeaklyConnectedComponent.mk⟩ instance [Inhabited V] : Inhabited (WeaklyConnectedComponent V) := ⟨show V from default⟩ protected theorem eq (a b : V) : (a : WeaklyConnectedComponent V) = b ↔ Nonempty (@Path (Symmetrify V) _ a b) := Quotient.eq'' end WeaklyConnectedComponent variable {V} -- Without the explicit universe level in `Quiver.{v+1}` Lean comes up with -- `Quiver.{max u_2 u_3 + 1}`. This causes problems elsewhere, so we write `Quiver.{v+1}`. /-- A wide subquiver `H` of `Symmetrify V` determines a wide subquiver of `V`, containing an arrow `e` if either `e` or its reversal is in `H`. -/ def wideSubquiverSymmetrify (H : WideSubquiver (Symmetrify V)) : WideSubquiver V := fun _ _ ↦ { e | H _ _ (Sum.inl e) ∨ H _ _ (Sum.inr e) } end Quiver
Combinatorics\Quiver\Covering.lean
/- Copyright (c) 2022 Antoine Labelle, Rémi Bottinelli. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Antoine Labelle, Rémi Bottinelli -/ import Mathlib.Combinatorics.Quiver.Cast import Mathlib.Combinatorics.Quiver.Symmetric import Mathlib.Data.Sigma.Basic import Mathlib.Logic.Equiv.Basic import Mathlib.Tactic.Common /-! # Covering This file defines coverings of quivers as prefunctors that are bijective on the so-called stars and costars at each vertex of the domain. ## Main definitions * `Quiver.Star u` is the type of all arrows with source `u`; * `Quiver.Costar u` is the type of all arrows with target `u`; * `Prefunctor.star φ u` is the obvious function `star u → star (φ.obj u)`; * `Prefunctor.costar φ u` is the obvious function `costar u → costar (φ.obj u)`; * `Prefunctor.IsCovering φ` means that `φ.star u` and `φ.costar u` are bijections for all `u`; * `Quiver.PathStar u` is the type of all paths with source `u`; * `Prefunctor.pathStar u` is the obvious function `PathStar u → PathStar (φ.obj u)`. ## Main statements * `Prefunctor.IsCovering.pathStar_bijective` states that if `φ` is a covering, then `φ.pathStar u` is a bijection for all `u`. In other words, every path in the codomain of `φ` lifts uniquely to its domain. ## TODO Clean up the namespaces by renaming `Prefunctor` to `Quiver.Prefunctor`. ## Tags Cover, covering, quiver, path, lift -/ open Function Quiver universe u v w variable {U : Type _} [Quiver.{u + 1} U] {V : Type _} [Quiver.{v + 1} V] (φ : U ⥤q V) {W : Type _} [Quiver.{w + 1} W] (ψ : V ⥤q W) /-- The `Quiver.Star` at a vertex is the collection of arrows whose source is the vertex. The type `Quiver.Star u` is defined to be `Σ (v : U), (u ⟶ v)`. -/ abbrev Quiver.Star (u : U) := Σ v : U, u ⟶ v /-- Constructor for `Quiver.Star`. Defined to be `Sigma.mk`. -/ protected abbrev Quiver.Star.mk {u v : U} (f : u ⟶ v) : Quiver.Star u := ⟨_, f⟩ /-- The `Quiver.Costar` at a vertex is the collection of arrows whose target is the vertex. The type `Quiver.Costar v` is defined to be `Σ (u : U), (u ⟶ v)`. -/ abbrev Quiver.Costar (v : U) := Σ u : U, u ⟶ v /-- Constructor for `Quiver.Costar`. Defined to be `Sigma.mk`. -/ protected abbrev Quiver.Costar.mk {u v : U} (f : u ⟶ v) : Quiver.Costar v := ⟨_, f⟩ /-- A prefunctor induces a map of `Quiver.Star` at every vertex. -/ @[simps] def Prefunctor.star (u : U) : Quiver.Star u → Quiver.Star (φ.obj u) := fun F => Quiver.Star.mk (φ.map F.2) /-- A prefunctor induces a map of `Quiver.Costar` at every vertex. -/ @[simps] def Prefunctor.costar (u : U) : Quiver.Costar u → Quiver.Costar (φ.obj u) := fun F => Quiver.Costar.mk (φ.map F.2) @[simp] theorem Prefunctor.star_apply {u v : U} (e : u ⟶ v) : φ.star u (Quiver.Star.mk e) = Quiver.Star.mk (φ.map e) := rfl @[simp] theorem Prefunctor.costar_apply {u v : U} (e : u ⟶ v) : φ.costar v (Quiver.Costar.mk e) = Quiver.Costar.mk (φ.map e) := rfl theorem Prefunctor.star_comp (u : U) : (φ ⋙q ψ).star u = ψ.star (φ.obj u) ∘ φ.star u := rfl theorem Prefunctor.costar_comp (u : U) : (φ ⋙q ψ).costar u = ψ.costar (φ.obj u) ∘ φ.costar u := rfl /-- A prefunctor is a covering of quivers if it defines bijections on all stars and costars. -/ protected structure Prefunctor.IsCovering : Prop where star_bijective : ∀ u, Bijective (φ.star u) costar_bijective : ∀ u, Bijective (φ.costar u) @[simp] theorem Prefunctor.IsCovering.map_injective (hφ : φ.IsCovering) {u v : U} : Injective fun f : u ⟶ v => φ.map f := by rintro f g he have : φ.star u (Quiver.Star.mk f) = φ.star u (Quiver.Star.mk g) := by simpa using he simpa using (hφ.star_bijective u).left this theorem Prefunctor.IsCovering.comp (hφ : φ.IsCovering) (hψ : ψ.IsCovering) : (φ ⋙q ψ).IsCovering := ⟨fun _ => (hψ.star_bijective _).comp (hφ.star_bijective _), fun _ => (hψ.costar_bijective _).comp (hφ.costar_bijective _)⟩ theorem Prefunctor.IsCovering.of_comp_right (hψ : ψ.IsCovering) (hφψ : (φ ⋙q ψ).IsCovering) : φ.IsCovering := ⟨fun _ => (Bijective.of_comp_iff' (hψ.star_bijective _) _).mp (hφψ.star_bijective _), fun _ => (Bijective.of_comp_iff' (hψ.costar_bijective _) _).mp (hφψ.costar_bijective _)⟩ theorem Prefunctor.IsCovering.of_comp_left (hφ : φ.IsCovering) (hφψ : (φ ⋙q ψ).IsCovering) (φsur : Surjective φ.obj) : ψ.IsCovering := by refine ⟨fun v => ?_, fun v => ?_⟩ <;> obtain ⟨u, rfl⟩ := φsur v exacts [(Bijective.of_comp_iff _ (hφ.star_bijective u)).mp (hφψ.star_bijective u), (Bijective.of_comp_iff _ (hφ.costar_bijective u)).mp (hφψ.costar_bijective u)] /-- The star of the symmetrification of a quiver at a vertex `u` is equivalent to the sum of the star and the costar at `u` in the original quiver. -/ def Quiver.symmetrifyStar (u : U) : Quiver.Star (Symmetrify.of.obj u) ≃ Quiver.Star u ⊕ Quiver.Costar u := Equiv.sigmaSumDistrib _ _ /-- The costar of the symmetrification of a quiver at a vertex `u` is equivalent to the sum of the costar and the star at `u` in the original quiver. -/ def Quiver.symmetrifyCostar (u : U) : Quiver.Costar (Symmetrify.of.obj u) ≃ Quiver.Costar u ⊕ Quiver.Star u := Equiv.sigmaSumDistrib _ _ theorem Prefunctor.symmetrifyStar (u : U) : φ.symmetrify.star u = (Quiver.symmetrifyStar _).symm ∘ Sum.map (φ.star u) (φ.costar u) ∘ Quiver.symmetrifyStar u := by -- This used to be `rw`, but we need `erw` after leanprover/lean4#2644 erw [Equiv.eq_symm_comp] ext ⟨v, f | g⟩ <;> -- porting note (#10745): was `simp [Quiver.symmetrifyStar]` simp only [Quiver.symmetrifyStar, Function.comp_apply] <;> erw [Equiv.sigmaSumDistrib_apply, Equiv.sigmaSumDistrib_apply] <;> simp protected theorem Prefunctor.symmetrifyCostar (u : U) : φ.symmetrify.costar u = (Quiver.symmetrifyCostar _).symm ∘ Sum.map (φ.costar u) (φ.star u) ∘ Quiver.symmetrifyCostar u := by -- This used to be `rw`, but we need `erw` after leanprover/lean4#2644 erw [Equiv.eq_symm_comp] ext ⟨v, f | g⟩ <;> -- porting note (#10745): was `simp [Quiver.symmetrifyCostar]` simp only [Quiver.symmetrifyCostar, Function.comp_apply] <;> erw [Equiv.sigmaSumDistrib_apply, Equiv.sigmaSumDistrib_apply] <;> simp protected theorem Prefunctor.IsCovering.symmetrify (hφ : φ.IsCovering) : φ.symmetrify.IsCovering := by refine ⟨fun u => ?_, fun u => ?_⟩ <;> -- Porting note: was -- simp [φ.symmetrifyStar, φ.symmetrifyCostar, hφ.star_bijective u, hφ.costar_bijective u] simp only [φ.symmetrifyStar, φ.symmetrifyCostar] <;> erw [EquivLike.comp_bijective, EquivLike.bijective_comp] <;> simp [hφ.star_bijective u, hφ.costar_bijective u] /-- The path star at a vertex `u` is the type of all paths starting at `u`. The type `Quiver.PathStar u` is defined to be `Σ v : U, Path u v`. -/ abbrev Quiver.PathStar (u : U) := Σ v : U, Path u v /-- Constructor for `Quiver.PathStar`. Defined to be `Sigma.mk`. -/ protected abbrev Quiver.PathStar.mk {u v : U} (p : Path u v) : Quiver.PathStar u := ⟨_, p⟩ /-- A prefunctor induces a map of path stars. -/ def Prefunctor.pathStar (u : U) : Quiver.PathStar u → Quiver.PathStar (φ.obj u) := fun p => Quiver.PathStar.mk (φ.mapPath p.2) @[simp] theorem Prefunctor.pathStar_apply {u v : U} (p : Path u v) : φ.pathStar u (Quiver.PathStar.mk p) = Quiver.PathStar.mk (φ.mapPath p) := rfl theorem Prefunctor.pathStar_injective (hφ : ∀ u, Injective (φ.star u)) (u : U) : Injective (φ.pathStar u) := by dsimp (config := { unfoldPartialApp := true }) [Prefunctor.pathStar, Quiver.PathStar.mk] rintro ⟨v₁, p₁⟩ induction' p₁ with x₁ y₁ p₁ e₁ ih <;> rintro ⟨y₂, p₂⟩ <;> cases' p₂ with x₂ _ p₂ e₂ <;> intro h <;> -- Porting note: added `Sigma.mk.inj_iff` simp only [Prefunctor.pathStar_apply, Prefunctor.mapPath_nil, Prefunctor.mapPath_cons, Sigma.mk.inj_iff] at h · -- Porting note: goal not present in lean3. rfl · exfalso cases' h with h h' rw [← Path.eq_cast_iff_heq rfl h.symm, Path.cast_cons] at h' exact (Path.nil_ne_cons _ _) h' · exfalso cases' h with h h' rw [← Path.cast_eq_iff_heq rfl h, Path.cast_cons] at h' exact (Path.cons_ne_nil _ _) h' · cases' h with hφy h' rw [← Path.cast_eq_iff_heq rfl hφy, Path.cast_cons, Path.cast_rfl_rfl] at h' have hφx := Path.obj_eq_of_cons_eq_cons h' have hφp := Path.heq_of_cons_eq_cons h' have hφe := HEq.trans (Hom.cast_heq rfl hφy _).symm (Path.hom_heq_of_cons_eq_cons h') have h_path_star : φ.pathStar u ⟨x₁, p₁⟩ = φ.pathStar u ⟨x₂, p₂⟩ := by simp only [Prefunctor.pathStar_apply, Sigma.mk.inj_iff]; exact ⟨hφx, hφp⟩ cases ih h_path_star have h_star : φ.star x₁ ⟨y₁, e₁⟩ = φ.star x₁ ⟨y₂, e₂⟩ := by simp only [Prefunctor.star_apply, Sigma.mk.inj_iff]; exact ⟨hφy, hφe⟩ cases hφ x₁ h_star rfl theorem Prefunctor.pathStar_surjective (hφ : ∀ u, Surjective (φ.star u)) (u : U) : Surjective (φ.pathStar u) := by dsimp (config := { unfoldPartialApp := true }) [Prefunctor.pathStar, Quiver.PathStar.mk] rintro ⟨v, p⟩ induction' p with v' v'' p' ev ih · use ⟨u, Path.nil⟩ simp only [Prefunctor.mapPath_nil, eq_self_iff_true, heq_iff_eq, and_self_iff] · obtain ⟨⟨u', q'⟩, h⟩ := ih simp only at h obtain ⟨rfl, rfl⟩ := h obtain ⟨⟨u'', eu⟩, k⟩ := hφ u' ⟨_, ev⟩ simp only [star_apply, Sigma.mk.inj_iff] at k -- Porting note: was `obtain ⟨rfl, rfl⟩ := k` obtain ⟨rfl, k⟩ := k simp only [heq_eq_eq] at k subst k use ⟨_, q'.cons eu⟩ simp only [Prefunctor.mapPath_cons, eq_self_iff_true, heq_iff_eq, and_self_iff] theorem Prefunctor.pathStar_bijective (hφ : ∀ u, Bijective (φ.star u)) (u : U) : Bijective (φ.pathStar u) := ⟨φ.pathStar_injective (fun u => (hφ u).1) _, φ.pathStar_surjective (fun u => (hφ u).2) _⟩ namespace Prefunctor.IsCovering variable {φ} protected theorem pathStar_bijective (hφ : φ.IsCovering) (u : U) : Bijective (φ.pathStar u) := φ.pathStar_bijective hφ.1 u end Prefunctor.IsCovering section HasInvolutiveReverse variable [HasInvolutiveReverse U] [HasInvolutiveReverse V] /-- In a quiver with involutive inverses, the star and costar at every vertex are equivalent. This map is induced by `Quiver.reverse`. -/ @[simps] def Quiver.starEquivCostar (u : U) : Quiver.Star u ≃ Quiver.Costar u where toFun e := ⟨e.1, reverse e.2⟩ invFun e := ⟨e.1, reverse e.2⟩ left_inv e := by simp [Sigma.ext_iff] right_inv e := by simp [Sigma.ext_iff] @[simp] theorem Quiver.starEquivCostar_apply {u v : U} (e : u ⟶ v) : Quiver.starEquivCostar u (Quiver.Star.mk e) = Quiver.Costar.mk (reverse e) := rfl @[simp] theorem Quiver.starEquivCostar_symm_apply {u v : U} (e : u ⟶ v) : (Quiver.starEquivCostar v).symm (Quiver.Costar.mk e) = Quiver.Star.mk (reverse e) := rfl variable [Prefunctor.MapReverse φ] theorem Prefunctor.costar_conj_star (u : U) : φ.costar u = Quiver.starEquivCostar (φ.obj u) ∘ φ.star u ∘ (Quiver.starEquivCostar u).symm := by ext ⟨v, f⟩ <;> simp theorem Prefunctor.bijective_costar_iff_bijective_star (u : U) : Bijective (φ.costar u) ↔ Bijective (φ.star u) := by rw [Prefunctor.costar_conj_star φ, EquivLike.comp_bijective, EquivLike.bijective_comp] theorem Prefunctor.isCovering_of_bijective_star (h : ∀ u, Bijective (φ.star u)) : φ.IsCovering := ⟨h, fun u => (φ.bijective_costar_iff_bijective_star u).2 (h u)⟩ theorem Prefunctor.isCovering_of_bijective_costar (h : ∀ u, Bijective (φ.costar u)) : φ.IsCovering := ⟨fun u => (φ.bijective_costar_iff_bijective_star u).1 (h u), h⟩ end HasInvolutiveReverse
Combinatorics\Quiver\Path.lean
/- Copyright (c) 2021 David Wärn,. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn, Scott Morrison -/ import Mathlib.Combinatorics.Quiver.Basic import Mathlib.Logic.Lemmas /-! # Paths in quivers Given a quiver `V`, we define the type of paths from `a : V` to `b : V` as an inductive family. We define composition of paths and the action of prefunctors on paths. -/ open Function universe v v₁ v₂ u u₁ u₂ namespace Quiver /-- `Path a b` is the type of paths from `a` to `b` through the arrows of `G`. -/ inductive Path {V : Type u} [Quiver.{v} V] (a : V) : V → Sort max (u + 1) v | nil : Path a a | cons : ∀ {b c : V}, Path a b → (b ⟶ c) → Path a c -- See issue lean4#2049 compile_inductive% Path /-- An arrow viewed as a path of length one. -/ def Hom.toPath {V} [Quiver V] {a b : V} (e : a ⟶ b) : Path a b := Path.nil.cons e namespace Path variable {V : Type u} [Quiver V] {a b c d : V} lemma nil_ne_cons (p : Path a b) (e : b ⟶ a) : Path.nil ≠ p.cons e := fun h => by injection h lemma cons_ne_nil (p : Path a b) (e : b ⟶ a) : p.cons e ≠ Path.nil := fun h => by injection h lemma obj_eq_of_cons_eq_cons {p : Path a b} {p' : Path a c} {e : b ⟶ d} {e' : c ⟶ d} (h : p.cons e = p'.cons e') : b = c := by injection h lemma heq_of_cons_eq_cons {p : Path a b} {p' : Path a c} {e : b ⟶ d} {e' : c ⟶ d} (h : p.cons e = p'.cons e') : HEq p p' := by injection h lemma hom_heq_of_cons_eq_cons {p : Path a b} {p' : Path a c} {e : b ⟶ d} {e' : c ⟶ d} (h : p.cons e = p'.cons e') : HEq e e' := by injection h /-- The length of a path is the number of arrows it uses. -/ def length {a : V} : ∀ {b : V}, Path a b → ℕ | _, nil => 0 | _, cons p _ => p.length + 1 instance {a : V} : Inhabited (Path a a) := ⟨nil⟩ @[simp] theorem length_nil {a : V} : (nil : Path a a).length = 0 := rfl @[simp] theorem length_cons (a b c : V) (p : Path a b) (e : b ⟶ c) : (p.cons e).length = p.length + 1 := rfl theorem eq_of_length_zero (p : Path a b) (hzero : p.length = 0) : a = b := by cases p · rfl · cases Nat.succ_ne_zero _ hzero /-- Composition of paths. -/ def comp {a b : V} : ∀ {c}, Path a b → Path b c → Path a c | _, p, nil => p | _, p, cons q e => (p.comp q).cons e @[simp] theorem comp_cons {a b c d : V} (p : Path a b) (q : Path b c) (e : c ⟶ d) : p.comp (q.cons e) = (p.comp q).cons e := rfl @[simp] theorem comp_nil {a b : V} (p : Path a b) : p.comp Path.nil = p := rfl @[simp] theorem nil_comp {a : V} : ∀ {b} (p : Path a b), Path.nil.comp p = p | _, nil => rfl | _, cons p _ => by rw [comp_cons, nil_comp p] @[simp] theorem comp_assoc {a b c : V} : ∀ {d} (p : Path a b) (q : Path b c) (r : Path c d), (p.comp q).comp r = p.comp (q.comp r) | _, _, _, nil => rfl | _, p, q, cons r _ => by rw [comp_cons, comp_cons, comp_cons, comp_assoc p q r] @[simp] theorem length_comp (p : Path a b) : ∀ {c} (q : Path b c), (p.comp q).length = p.length + q.length | _, nil => rfl | _, cons _ _ => congr_arg Nat.succ (length_comp _ _) theorem comp_inj {p₁ p₂ : Path a b} {q₁ q₂ : Path b c} (hq : q₁.length = q₂.length) : p₁.comp q₁ = p₂.comp q₂ ↔ p₁ = p₂ ∧ q₁ = q₂ := by refine ⟨fun h => ?_, by rintro ⟨rfl, rfl⟩; rfl⟩ induction' q₁ with d₁ e₁ q₁ f₁ ih <;> obtain _ | ⟨q₂, f₂⟩ := q₂ · exact ⟨h, rfl⟩ · cases hq · cases hq · simp only [comp_cons, cons.injEq] at h obtain rfl := h.1 obtain ⟨rfl, rfl⟩ := ih (Nat.succ.inj hq) h.2.1.eq rw [h.2.2.eq] exact ⟨rfl, rfl⟩ theorem comp_inj' {p₁ p₂ : Path a b} {q₁ q₂ : Path b c} (h : p₁.length = p₂.length) : p₁.comp q₁ = p₂.comp q₂ ↔ p₁ = p₂ ∧ q₁ = q₂ := ⟨fun h_eq => (comp_inj <| Nat.add_left_cancel (n := p₂.length) <| by simpa [h] using congr_arg length h_eq).1 h_eq, by rintro ⟨rfl, rfl⟩; rfl⟩ theorem comp_injective_left (q : Path b c) : Injective fun p : Path a b => p.comp q := fun _ _ h => ((comp_inj rfl).1 h).1 theorem comp_injective_right (p : Path a b) : Injective (p.comp : Path b c → Path a c) := fun _ _ h => ((comp_inj' rfl).1 h).2 @[simp] theorem comp_inj_left {p₁ p₂ : Path a b} {q : Path b c} : p₁.comp q = p₂.comp q ↔ p₁ = p₂ := q.comp_injective_left.eq_iff @[simp] theorem comp_inj_right {p : Path a b} {q₁ q₂ : Path b c} : p.comp q₁ = p.comp q₂ ↔ q₁ = q₂ := p.comp_injective_right.eq_iff /-- Turn a path into a list. The list contains `a` at its head, but not `b` a priori. -/ @[simp] def toList : ∀ {b : V}, Path a b → List V | _, nil => [] | _, @cons _ _ _ c _ p _ => c :: p.toList /-- `Quiver.Path.toList` is a contravariant functor. The inversion comes from `Quiver.Path` and `List` having different preferred directions for adding elements. -/ @[simp] theorem toList_comp (p : Path a b) : ∀ {c} (q : Path b c), (p.comp q).toList = q.toList ++ p.toList | _, nil => by simp | _, @cons _ _ _ d _ q _ => by simp [toList_comp] theorem toList_chain_nonempty : ∀ {b} (p : Path a b), p.toList.Chain (fun x y => Nonempty (y ⟶ x)) b | _, nil => List.Chain.nil | _, cons p f => p.toList_chain_nonempty.cons ⟨f⟩ variable [∀ a b : V, Subsingleton (a ⟶ b)] theorem toList_injective (a : V) : ∀ b, Injective (toList : Path a b → List V) | _, nil, nil, _ => rfl | _, nil, @cons _ _ _ c _ p f, h => by cases h | _, @cons _ _ _ c _ p f, nil, h => by cases h | _, @cons _ _ _ c _ p f, @cons _ _ _ t _ C D, h => by simp only [toList, List.cons.injEq] at h obtain ⟨rfl, hAC⟩ := h simp [toList_injective _ _ hAC, eq_iff_true_of_subsingleton] @[simp] theorem toList_inj {p q : Path a b} : p.toList = q.toList ↔ p = q := (toList_injective _ _).eq_iff end Path end Quiver namespace Prefunctor open Quiver variable {V : Type u₁} [Quiver.{v₁} V] {W : Type u₂} [Quiver.{v₂} W] (F : V ⥤q W) /-- The image of a path under a prefunctor. -/ def mapPath {a : V} : ∀ {b : V}, Path a b → Path (F.obj a) (F.obj b) | _, Path.nil => Path.nil | _, Path.cons p e => Path.cons (mapPath p) (F.map e) @[simp] theorem mapPath_nil (a : V) : F.mapPath (Path.nil : Path a a) = Path.nil := rfl @[simp] theorem mapPath_cons {a b c : V} (p : Path a b) (e : b ⟶ c) : F.mapPath (Path.cons p e) = Path.cons (F.mapPath p) (F.map e) := rfl @[simp] theorem mapPath_comp {a b : V} (p : Path a b) : ∀ {c : V} (q : Path b c), F.mapPath (p.comp q) = (F.mapPath p).comp (F.mapPath q) | _, Path.nil => rfl | c, Path.cons q e => by dsimp; rw [mapPath_comp p q] @[simp] theorem mapPath_toPath {a b : V} (f : a ⟶ b) : F.mapPath f.toPath = (F.map f).toPath := rfl end Prefunctor
Combinatorics\Quiver\Push.lean
/- Copyright (c) 2022 Rémi Bottinelli. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Rémi Bottinelli -/ import Mathlib.Combinatorics.Quiver.Basic /-! # Pushing a quiver structure along a map Given a map `σ : V → W` and a `Quiver` instance on `V`, this files defines a `Quiver` instance on `W` by associating to each arrow `v ⟶ v'` in `V` an arrow `σ v ⟶ σ v'` in `W`. -/ namespace Quiver universe v v₁ v₂ u u₁ u₂ variable {V : Type*} [Quiver V] {W : Type*} (σ : V → W) /-- The `Quiver` instance obtained by pushing arrows of `V` along the map `σ : V → W` -/ @[nolint unusedArguments] def Push (_ : V → W) := W instance [h : Nonempty W] : Nonempty (Push σ) := h /-- The quiver structure obtained by pushing arrows of `V` along the map `σ : V → W` -/ inductive PushQuiver {V : Type u} [Quiver.{v} V] {W : Type u₂} (σ : V → W) : W → W → Type max u u₂ v | arrow {X Y : V} (f : X ⟶ Y) : PushQuiver σ (σ X) (σ Y) instance : Quiver (Push σ) := ⟨PushQuiver σ⟩ namespace Push /-- The prefunctor induced by pushing arrows via `σ` -/ def of : V ⥤q Push σ where obj := σ map f := PushQuiver.arrow f @[simp] theorem of_obj : (of σ).obj = σ := rfl variable {W' : Type*} [Quiver W'] (φ : V ⥤q W') (τ : W → W') (h : ∀ x, φ.obj x = τ (σ x)) /-- Given a function `τ : W → W'` and a prefunctor `φ : V ⥤q W'`, one can extend `τ` to be a prefunctor `W ⥤q W'` if `τ` and `σ` factorize `φ` at the level of objects, where `W` is given the pushforward quiver structure `Push σ`. -/ noncomputable def lift : Push σ ⥤q W' where obj := τ map := @PushQuiver.rec V _ W σ (fun X Y _ => τ X ⟶ τ Y) @fun X Y f => by dsimp only rw [← h X, ← h Y] exact φ.map f theorem lift_obj : (lift σ φ τ h).obj = τ := rfl theorem lift_comp : (of σ ⋙q lift σ φ τ h) = φ := by fapply Prefunctor.ext · rintro X simp only [Prefunctor.comp_obj] apply Eq.symm exact h X · rintro X Y f simp only [Prefunctor.comp_map] apply eq_of_heq iterate 2 apply (cast_heq _ _).trans apply HEq.symm apply (eqRec_heq _ _).trans have : ∀ {α γ} {β : α → γ → Sort _} {a a'} (p : a = a') g (b : β a g), HEq (p ▸ b) b := by intros subst_vars rfl apply this theorem lift_unique (Φ : Push σ ⥤q W') (Φ₀ : Φ.obj = τ) (Φcomp : (of σ ⋙q Φ) = φ) : Φ = lift σ φ τ h := by dsimp only [of, lift] fapply Prefunctor.ext · intro X simp only rw [Φ₀] · rintro _ _ ⟨⟩ subst_vars simp only [Prefunctor.comp_map, cast_eq] rfl end Push end Quiver
Combinatorics\Quiver\SingleObj.lean
/- Copyright (c) 2023 Antoine Labelle. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Antoine Labelle -/ import Mathlib.Combinatorics.Quiver.Cast import Mathlib.Combinatorics.Quiver.Symmetric /-! # Single-object quiver Single object quiver with a given arrows type. ## Main definitions Given a type `α`, `SingleObj α` is the `Unit` type, whose single object is called `star α`, with `Quiver` structure such that `star α ⟶ star α` is the type `α`. An element `x : α` can be reinterpreted as an element of `star α ⟶ star α` using `toHom`. More generally, a list of elements of `a` can be reinterpreted as a path from `star α` to itself using `pathEquivList`. -/ namespace Quiver /-- Type tag on `Unit` used to define single-object quivers. -/ -- Porting note: Removed `deriving Unique`. @[nolint unusedArguments] def SingleObj (_ : Type*) : Type := Unit -- Porting note: `deriving` from above has been moved to below. instance {α : Type*} : Unique (SingleObj α) where default := ⟨⟩ uniq := fun _ => rfl namespace SingleObj variable (α β γ : Type*) instance : Quiver (SingleObj α) := ⟨fun _ _ => α⟩ /-- The single object in `SingleObj α`. -/ def star : SingleObj α := Unit.unit instance : Inhabited (SingleObj α) := ⟨star α⟩ variable {α β γ} lemma ext {x y : SingleObj α} : x = y := Unit.ext x y -- See note [reducible non-instances] /-- Equip `SingleObj α` with a reverse operation. -/ abbrev hasReverse (rev : α → α) : HasReverse (SingleObj α) := ⟨rev⟩ -- See note [reducible non-instances] /-- Equip `SingleObj α` with an involutive reverse operation. -/ abbrev hasInvolutiveReverse (rev : α → α) (h : Function.Involutive rev) : HasInvolutiveReverse (SingleObj α) where toHasReverse := hasReverse rev inv' := h /-- The type of arrows from `star α` to itself is equivalent to the original type `α`. -/ @[simps!] def toHom : α ≃ (star α ⟶ star α) := Equiv.refl _ /-- Prefunctors between two `SingleObj` quivers correspond to functions between the corresponding arrows types. -/ @[simps] def toPrefunctor : (α → β) ≃ SingleObj α ⥤q SingleObj β where toFun f := ⟨id, f⟩ invFun f a := f.map (toHom a) left_inv _ := rfl right_inv _ := rfl theorem toPrefunctor_id : toPrefunctor id = 𝟭q (SingleObj α) := rfl @[simp] theorem toPrefunctor_symm_id : toPrefunctor.symm (𝟭q (SingleObj α)) = id := rfl theorem toPrefunctor_comp (f : α → β) (g : β → γ) : toPrefunctor (g ∘ f) = toPrefunctor f ⋙q toPrefunctor g := rfl @[simp] theorem toPrefunctor_symm_comp (f : SingleObj α ⥤q SingleObj β) (g : SingleObj β ⥤q SingleObj γ) : toPrefunctor.symm (f ⋙q g) = toPrefunctor.symm g ∘ toPrefunctor.symm f := by simp only [Equiv.symm_apply_eq, toPrefunctor_comp, Equiv.apply_symm_apply] /-- Auxiliary definition for `quiver.SingleObj.pathEquivList`. Converts a path in the quiver `single_obj α` into a list of elements of type `a`. -/ def pathToList : ∀ {x : SingleObj α}, Path (star α) x → List α | _, Path.nil => [] | _, Path.cons p a => a :: pathToList p /-- Auxiliary definition for `quiver.SingleObj.pathEquivList`. Converts a list of elements of type `α` into a path in the quiver `SingleObj α`. -/ @[simp] def listToPath : List α → Path (star α) (star α) | [] => Path.nil | a :: l => (listToPath l).cons a theorem listToPath_pathToList {x : SingleObj α} (p : Path (star α) x) : listToPath (pathToList p) = p.cast rfl ext := by induction' p with y z p a ih · rfl · dsimp at *; rw [ih] theorem pathToList_listToPath (l : List α) : pathToList (listToPath l) = l := by induction' l with a l ih · rfl · change a :: pathToList (listToPath l) = a :: l; rw [ih] /-- Paths in `SingleObj α` quiver correspond to lists of elements of type `α`. -/ def pathEquivList : Path (star α) (star α) ≃ List α := ⟨pathToList, listToPath, fun p => listToPath_pathToList p, pathToList_listToPath⟩ @[simp] theorem pathEquivList_nil : pathEquivList Path.nil = ([] : List α) := rfl @[simp] theorem pathEquivList_cons (p : Path (star α) (star α)) (a : star α ⟶ star α) : pathEquivList (Path.cons p a) = a :: pathToList p := rfl @[simp] theorem pathEquivList_symm_nil : pathEquivList.symm ([] : List α) = Path.nil := rfl @[simp] theorem pathEquivList_symm_cons (l : List α) (a : α) : pathEquivList.symm (a :: l) = Path.cons (pathEquivList.symm l) a := rfl end SingleObj end Quiver
Combinatorics\Quiver\Subquiver.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import Mathlib.Order.Notation import Mathlib.Combinatorics.Quiver.Basic /-! ## Wide subquivers A wide subquiver `H` of a quiver `H` consists of a subset of the edge set `a ⟶ b` for every pair of vertices `a b : V`. We include 'wide' in the name to emphasize that these subquivers by definition contain all vertices. -/ universe v u /-- A wide subquiver `H` of `G` picks out a set `H a b` of arrows from `a` to `b` for every pair of vertices `a b`. NB: this does not work for `Prop`-valued quivers. It requires `G : Quiver.{v+1} V`. -/ def WideSubquiver (V) [Quiver.{v + 1} V] := ∀ a b : V, Set (a ⟶ b) /-- A type synonym for `V`, when thought of as a quiver having only the arrows from some `WideSubquiver`. -/ -- Porting note: no hasNonemptyInstance linter yet @[nolint unusedArguments] def WideSubquiver.toType (V) [Quiver V] (_ : WideSubquiver V) : Type u := V instance wideSubquiverHasCoeToSort {V} [Quiver V] : CoeSort (WideSubquiver V) (Type u) where coe H := WideSubquiver.toType V H /-- A wide subquiver viewed as a quiver on its own. -/ instance WideSubquiver.quiver {V} [Quiver V] (H : WideSubquiver V) : Quiver H := ⟨fun a b ↦ { f // f ∈ H a b }⟩ namespace Quiver instance {V} [Quiver V] : Bot (WideSubquiver V) := ⟨fun _ _ ↦ ∅⟩ instance {V} [Quiver V] : Top (WideSubquiver V) := ⟨fun _ _ ↦ Set.univ⟩ noncomputable instance {V} [Quiver V] : Inhabited (WideSubquiver V) := ⟨⊤⟩ -- TODO Unify with `CategoryTheory.Arrow`? (The fields have been named to match.) /-- `Total V` is the type of _all_ arrows of `V`. -/ -- Porting note: no hasNonemptyInstance linter yet @[ext] structure Total (V : Type u) [Quiver.{v} V] : Sort max (u + 1) v where /-- the source vertex of an arrow -/ left : V /-- the target vertex of an arrow -/ right : V /-- an arrow -/ hom : left ⟶ right /-- A wide subquiver of `G` can equivalently be viewed as a total set of arrows. -/ def wideSubquiverEquivSetTotal {V} [Quiver V] : WideSubquiver V ≃ Set (Total V) where toFun H := { e | e.hom ∈ H e.left e.right } invFun S a b := { e | Total.mk a b e ∈ S } left_inv _ := rfl right_inv _ := rfl /-- An `L`-labelling of a quiver assigns to every arrow an element of `L`. -/ def Labelling (V : Type u) [Quiver V] (L : Sort*) := ∀ ⦃a b : V⦄, (a ⟶ b) → L instance {V : Type u} [Quiver V] (L) [Inhabited L] : Inhabited (Labelling V L) := ⟨fun _ _ _ ↦ default⟩ end Quiver
Combinatorics\Quiver\Symmetric.lean
/- Copyright (c) 2021 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn, Antoine Labelle, Rémi Bottinelli -/ import Mathlib.Combinatorics.Quiver.Path import Mathlib.Combinatorics.Quiver.Push import Batteries.Data.Sum.Lemmas /-! ## Symmetric quivers and arrow reversal This file contains constructions related to symmetric quivers: * `Symmetrify V` adds formal inverses to each arrow of `V`. * `HasReverse` is the class of quivers where each arrow has an assigned formal inverse. * `HasInvolutiveReverse` extends `HasReverse` by requiring that the reverse of the reverse is equal to the original arrow. * `Prefunctor.PreserveReverse` is the class of prefunctors mapping reverses to reverses. * `Symmetrify.of`, `Symmetrify.lift`, and the associated lemmas witness the universal property of `Symmetrify`. -/ universe v u w v' namespace Quiver /-- A type synonym for the symmetrized quiver (with an arrow both ways for each original arrow). NB: this does not work for `Prop`-valued quivers. It requires `[Quiver.{v+1} V]`. -/ -- Porting note: no hasNonemptyInstance linter yet def Symmetrify (V : Type*) := V instance symmetrifyQuiver (V : Type u) [Quiver V] : Quiver (Symmetrify V) := ⟨fun a b : V ↦ (a ⟶ b) ⊕ (b ⟶ a)⟩ variable (U V W : Type*) [Quiver.{u + 1} U] [Quiver.{v + 1} V] [Quiver.{w + 1} W] /-- A quiver `HasReverse` if we can reverse an arrow `p` from `a` to `b` to get an arrow `p.reverse` from `b` to `a`. -/ class HasReverse where /-- the map which sends an arrow to its reverse -/ reverse' : ∀ {a b : V}, (a ⟶ b) → (b ⟶ a) /-- Reverse the direction of an arrow. -/ def reverse {V} [Quiver.{v + 1} V] [HasReverse V] {a b : V} : (a ⟶ b) → (b ⟶ a) := HasReverse.reverse' /-- A quiver `HasInvolutiveReverse` if reversing twice is the identity. -/ class HasInvolutiveReverse extends HasReverse V where /-- `reverse` is involutive -/ inv' : ∀ {a b : V} (f : a ⟶ b), reverse (reverse f) = f variable {U V W} @[simp] theorem reverse_reverse [h : HasInvolutiveReverse V] {a b : V} (f : a ⟶ b) : reverse (reverse f) = f := by apply h.inv' @[simp] theorem reverse_inj [h : HasInvolutiveReverse V] {a b : V} (f g : a ⟶ b) : reverse f = reverse g ↔ f = g := by constructor · rintro h simpa using congr_arg Quiver.reverse h · rintro h congr theorem eq_reverse_iff [h : HasInvolutiveReverse V] {a b : V} (f : a ⟶ b) (g : b ⟶ a) : f = reverse g ↔ reverse f = g := by rw [← reverse_inj, reverse_reverse] section MapReverse variable [HasReverse U] [HasReverse V] [HasReverse W] /-- A prefunctor preserving reversal of arrows -/ class _root_.Prefunctor.MapReverse (φ : U ⥤q V) : Prop where /-- The image of a reverse is the reverse of the image. -/ map_reverse' : ∀ {u v : U} (e : u ⟶ v), φ.map (reverse e) = reverse (φ.map e) @[simp] theorem _root_.Prefunctor.map_reverse (φ : U ⥤q V) [φ.MapReverse] {u v : U} (e : u ⟶ v) : φ.map (reverse e) = reverse (φ.map e) := Prefunctor.MapReverse.map_reverse' e instance _root_.Prefunctor.mapReverseComp (φ : U ⥤q V) (ψ : V ⥤q W) [φ.MapReverse] [ψ.MapReverse] : (φ ⋙q ψ).MapReverse where map_reverse' e := by simp only [Prefunctor.comp_map, Prefunctor.MapReverse.map_reverse'] instance _root_.Prefunctor.mapReverseId : (Prefunctor.id U).MapReverse where map_reverse' _ := rfl end MapReverse instance : HasReverse (Symmetrify V) := ⟨fun e => e.swap⟩ instance : HasInvolutiveReverse (Symmetrify V) where toHasReverse := ⟨fun e ↦ e.swap⟩ inv' e := congr_fun Sum.swap_swap_eq e @[simp] theorem symmetrify_reverse {a b : Symmetrify V} (e : a ⟶ b) : reverse e = e.swap := rfl section Paths /-- Shorthand for the "forward" arrow corresponding to `f` in `symmetrify V` -/ abbrev Hom.toPos {X Y : V} (f : X ⟶ Y) : (Quiver.symmetrifyQuiver V).Hom X Y := Sum.inl f /-- Shorthand for the "backward" arrow corresponding to `f` in `symmetrify V` -/ abbrev Hom.toNeg {X Y : V} (f : X ⟶ Y) : (Quiver.symmetrifyQuiver V).Hom Y X := Sum.inr f /-- Reverse the direction of a path. -/ @[simp] def Path.reverse [HasReverse V] {a : V} : ∀ {b}, Path a b → Path b a | _, Path.nil => Path.nil | _, Path.cons p e => (Quiver.reverse e).toPath.comp p.reverse @[simp] theorem Path.reverse_toPath [HasReverse V] {a b : V} (f : a ⟶ b) : f.toPath.reverse = (Quiver.reverse f).toPath := rfl @[simp] theorem Path.reverse_comp [HasReverse V] {a b c : V} (p : Path a b) (q : Path b c) : (p.comp q).reverse = q.reverse.comp p.reverse := by induction' q with _ _ _ _ h · simp · simp [h] @[simp] theorem Path.reverse_reverse [h : HasInvolutiveReverse V] {a b : V} (p : Path a b) : p.reverse.reverse = p := by induction' p with _ _ _ _ h · simp · rw [Path.reverse, Path.reverse_comp, h, Path.reverse_toPath, Quiver.reverse_reverse] rfl end Paths namespace Symmetrify /-- The inclusion of a quiver in its symmetrification -/ def of : Prefunctor V (Symmetrify V) where obj := id map := Sum.inl variable {V' : Type*} [Quiver.{v' + 1} V'] /-- Given a quiver `V'` with reversible arrows, a prefunctor to `V'` can be lifted to one from `Symmetrify V` to `V'` -/ def lift [HasReverse V'] (φ : Prefunctor V V') : Prefunctor (Symmetrify V) V' where obj := φ.obj map f := match f with | Sum.inl g => φ.map g | Sum.inr g => reverse (φ.map g) theorem lift_spec [HasReverse V'] (φ : Prefunctor V V') : Symmetrify.of.comp (Symmetrify.lift φ) = φ := by fapply Prefunctor.ext · rintro X rfl · rintro X Y f rfl theorem lift_reverse [h : HasInvolutiveReverse V'] (φ : Prefunctor V V') {X Y : Symmetrify V} (f : X ⟶ Y) : (Symmetrify.lift φ).map (Quiver.reverse f) = Quiver.reverse ((Symmetrify.lift φ).map f) := by dsimp [Symmetrify.lift]; cases f · simp only rfl · simp only [reverse_reverse] rfl /-- `lift φ` is the only prefunctor extending `φ` and preserving reverses. -/ theorem lift_unique [HasReverse V'] (φ : V ⥤q V') (Φ : Symmetrify V ⥤q V') (hΦ : (of ⋙q Φ) = φ) (hΦinv : ∀ {X Y : Symmetrify V} (f : X ⟶ Y), Φ.map (Quiver.reverse f) = Quiver.reverse (Φ.map f)) : Φ = Symmetrify.lift φ := by subst_vars fapply Prefunctor.ext · rintro X rfl · rintro X Y f cases f · rfl · exact hΦinv (Sum.inl _) /-- A prefunctor canonically defines a prefunctor of the symmetrifications. -/ @[simps] def _root_.Prefunctor.symmetrify (φ : U ⥤q V) : Symmetrify U ⥤q Symmetrify V where obj := φ.obj map := Sum.map φ.map φ.map instance _root_.Prefunctor.symmetrify_mapReverse (φ : U ⥤q V) : Prefunctor.MapReverse φ.symmetrify := ⟨fun e => by cases e <;> rfl⟩ end Symmetrify namespace Push variable {V' : Type*} (σ : V → V') instance [HasReverse V] : HasReverse (Quiver.Push σ) where reverse' := fun | PushQuiver.arrow f => PushQuiver.arrow (reverse f) instance [h : HasInvolutiveReverse V] : HasInvolutiveReverse (Push σ) where reverse' := fun | PushQuiver.arrow f => PushQuiver.arrow (reverse f) inv' := fun | PushQuiver.arrow f => by dsimp [reverse]; congr; apply h.inv' theorem of_reverse [HasInvolutiveReverse V] (X Y : V) (f : X ⟶ Y) : (reverse <| (Push.of σ).map f) = (Push.of σ).map (reverse f) := rfl instance ofMapReverse [h : HasInvolutiveReverse V] : (Push.of σ).MapReverse := ⟨by simp [of_reverse]⟩ end Push /-- A quiver is preconnected iff there exists a path between any pair of vertices. Note that if `V` doesn't `HasReverse`, then the definition is stronger than simply having a preconnected underlying `SimpleGraph`, since a path in one direction doesn't induce one in the other. -/ def IsPreconnected (V) [Quiver.{u + 1} V] := ∀ X Y : V, Nonempty (Path X Y) end Quiver
Combinatorics\SetFamily\AhlswedeZhang.lean
/- Copyright (c) 2023 Yaël Dillies, Vladimir Ivanov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Vladimir Ivanov -/ import Mathlib.Algebra.BigOperators.Intervals import Mathlib.Algebra.BigOperators.Ring import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Algebra.Order.Field.Basic import Mathlib.Data.Finset.Sups import Mathlib.Tactic.FieldSimp import Mathlib.Tactic.Positivity.Basic import Mathlib.Tactic.Ring /-! # The Ahlswede-Zhang identity This file proves the Ahlswede-Zhang identity, which is a nontrivial relation between the size of the "truncated unions" of a set family. It sharpens the Lubell-Yamamoto-Meshalkin inequality `Finset.sum_card_slice_div_choose_le_one`, by making explicit the correction term. For a set family `𝒜` over a ground set of size `n`, the Ahlswede-Zhang identity states that the sum of `|⋂ B ∈ 𝒜, B ⊆ A, B|/(|A| * n.choose |A|)` over all set `A` is exactly `1`. This implies the LYM inequality since for an antichain `𝒜` and every `A ∈ 𝒜` we have `|⋂ B ∈ 𝒜, B ⊆ A, B|/(|A| * n.choose |A|) = 1 / n.choose |A|`. ## Main declarations * `Finset.truncatedSup`: `s.truncatedSup a` is the supremum of all `b ≥ a` in `𝒜` if there are some, or `⊤` if there are none. * `Finset.truncatedInf`: `s.truncatedInf a` is the infimum of all `b ≤ a` in `𝒜` if there are some, or `⊥` if there are none. * `AhlswedeZhang.infSum`: LHS of the Ahlswede-Zhang identity. * `AhlswedeZhang.le_infSum`: The sum of `1 / n.choose |A|` over an antichain is less than the RHS of the Ahlswede-Zhang identity. * `AhlswedeZhang.infSum_eq_one`: Ahlswede-Zhang identity. ## References * [R. Ahlswede, Z. Zhang, *An identity in combinatorial extremal theory*](https://doi.org/10.1016/0001-8708(90)90023-G) * [D. T. Tru, *An AZ-style identity and Bollobás deficiency*](https://doi.org/10.1016/j.jcta.2007.03.005) -/ section variable (α : Type*) [Fintype α] [Nonempty α] {m n : ℕ} open Finset Fintype Nat private lemma binomial_sum_eq (h : n < m) : ∑ i ∈ range (n + 1), (n.choose i * (m - n) / ((m - i) * m.choose i) : ℚ) = 1 := by set f : ℕ → ℚ := fun i ↦ n.choose i * (m.choose i : ℚ)⁻¹ with hf suffices ∀ i ∈ range (n + 1), f i - f (i + 1) = n.choose i * (m - n) / ((m - i) * m.choose i) by rw [← sum_congr rfl this, sum_range_sub', hf] simp [choose_self, choose_zero_right, choose_eq_zero_of_lt h] intro i h₁ rw [mem_range] at h₁ have h₁ := le_of_lt_succ h₁ have h₂ := h₁.trans_lt h have h₃ := h₂.le have hi₄ : (i + 1 : ℚ) ≠ 0 := i.cast_add_one_ne_zero have := congr_arg ((↑) : ℕ → ℚ) (choose_succ_right_eq m i) push_cast at this dsimp [f, hf] rw [(eq_mul_inv_iff_mul_eq₀ hi₄).mpr this] have := congr_arg ((↑) : ℕ → ℚ) (choose_succ_right_eq n i) push_cast at this rw [(eq_mul_inv_iff_mul_eq₀ hi₄).mpr this] have : (m - i : ℚ) ≠ 0 := sub_ne_zero_of_ne (cast_lt.mpr h₂).ne' have : (m.choose i : ℚ) ≠ 0 := cast_ne_zero.2 (choose_pos h₂.le).ne' field_simp ring private lemma Fintype.sum_div_mul_card_choose_card : ∑ s : Finset α, (card α / ((card α - s.card) * (card α).choose s.card) : ℚ) = card α * ∑ k ∈ range (card α), (↑k)⁻¹ + 1 := by rw [← powerset_univ, powerset_card_disjiUnion, sum_disjiUnion] have : ∀ {x : ℕ}, ∀ s ∈ powersetCard x (univ : Finset α), (card α / ((card α - Finset.card s) * (card α).choose (Finset.card s)) : ℚ) = card α / ((card α - x) * (card α).choose x) := by intros n s hs rw [mem_powersetCard_univ.1 hs] simp_rw [sum_congr rfl this, sum_const, card_powersetCard, card_univ, nsmul_eq_mul, mul_div, mul_comm, ← mul_div] rw [← mul_sum, ← mul_inv_cancel (cast_ne_zero.mpr card_ne_zero : (card α : ℚ) ≠ 0), ← mul_add, add_comm _ ((card α)⁻¹ : ℚ), ← sum_insert (f := fun x : ℕ ↦ (x⁻¹ : ℚ)) not_mem_range_self, ← range_succ] have (n) (hn : n ∈ range (card α + 1)) : ((card α).choose n / ((card α - n) * (card α).choose n) : ℚ) = (card α - n : ℚ)⁻¹ := by rw [div_mul_cancel_right₀] exact cast_ne_zero.2 (choose_pos $ mem_range_succ_iff.1 hn).ne' simp only [sum_congr rfl this, mul_eq_mul_left_iff, cast_eq_zero] convert Or.inl $ sum_range_reflect _ _ with a ha rw [add_tsub_cancel_right, cast_sub (mem_range_succ_iff.mp ha)] end open scoped FinsetFamily namespace Finset variable {α β : Type*} /-! ### Truncated supremum, truncated infimum -/ section SemilatticeSup variable [SemilatticeSup α] [SemilatticeSup β] [BoundedOrder β] {s t : Finset α} {a b : α} private lemma sup_aux [@DecidableRel α (· ≤ ·)] : a ∈ lowerClosure s → (s.filter fun b ↦ a ≤ b).Nonempty := fun ⟨b, hb, hab⟩ ↦ ⟨b, mem_filter.2 ⟨hb, hab⟩⟩ private lemma lower_aux [DecidableEq α] : a ∈ lowerClosure ↑(s ∪ t) ↔ a ∈ lowerClosure s ∨ a ∈ lowerClosure t := by rw [coe_union, lowerClosure_union, LowerSet.mem_sup_iff] variable [@DecidableRel α (· ≤ ·)] [OrderTop α] /-- The supremum of the elements of `s` less than `a` if there are some, otherwise `⊤`. -/ def truncatedSup (s : Finset α) (a : α) : α := if h : a ∈ lowerClosure s then (s.filter fun b ↦ a ≤ b).sup' (sup_aux h) id else ⊤ lemma truncatedSup_of_mem (h : a ∈ lowerClosure s) : truncatedSup s a = (s.filter fun b ↦ a ≤ b).sup' (sup_aux h) id := dif_pos h lemma truncatedSup_of_not_mem (h : a ∉ lowerClosure s) : truncatedSup s a = ⊤ := dif_neg h @[simp] lemma truncatedSup_empty (a : α) : truncatedSup ∅ a = ⊤ := truncatedSup_of_not_mem $ by simp @[simp] lemma truncatedSup_singleton (b a : α) : truncatedSup {b} a = if a ≤ b then b else ⊤ := by simp [truncatedSup]; split_ifs <;> simp [Finset.filter_true_of_mem, *] lemma le_truncatedSup : a ≤ truncatedSup s a := by rw [truncatedSup] split_ifs with h · obtain ⟨ℬ, hb, h⟩ := h exact h.trans $ le_sup' id $ mem_filter.2 ⟨hb, h⟩ · exact le_top lemma map_truncatedSup [@DecidableRel β (· ≤ ·)] (e : α ≃o β) (s : Finset α) (a : α) : e (truncatedSup s a) = truncatedSup (s.map e.toEquiv.toEmbedding) (e a) := by have : e a ∈ lowerClosure (s.map e.toEquiv.toEmbedding : Set β) ↔ a ∈ lowerClosure s := by simp simp_rw [truncatedSup, apply_dite e, map_finset_sup', map_top, this] congr with h simp only [filter_map, Function.comp, Equiv.coe_toEmbedding, RelIso.coe_fn_toEquiv, OrderIso.le_iff_le, id] rw [sup'_map] -- TODO: Why can't `simp` use `Finset.sup'_map`? simp only [sup'_map, Equiv.coe_toEmbedding, RelIso.coe_fn_toEquiv, Function.comp_apply] lemma truncatedSup_of_isAntichain (hs : IsAntichain (· ≤ ·) (s : Set α)) (ha : a ∈ s) : truncatedSup s a = a := by refine le_antisymm ?_ le_truncatedSup simp_rw [truncatedSup_of_mem (subset_lowerClosure ha), sup'_le_iff, mem_filter] rintro b ⟨hb, hab⟩ exact (hs.eq ha hb hab).ge variable [DecidableEq α] lemma truncatedSup_union (hs : a ∈ lowerClosure s) (ht : a ∈ lowerClosure t) : truncatedSup (s ∪ t) a = truncatedSup s a ⊔ truncatedSup t a := by simpa only [truncatedSup_of_mem, hs, ht, lower_aux.2 (Or.inl hs), filter_union] using sup'_union _ _ _ lemma truncatedSup_union_left (hs : a ∈ lowerClosure s) (ht : a ∉ lowerClosure t) : truncatedSup (s ∪ t) a = truncatedSup s a := by simp only [mem_lowerClosure, mem_coe, exists_prop, not_exists, not_and] at ht simp only [truncatedSup_of_mem, hs, filter_union, filter_false_of_mem ht, union_empty, lower_aux.2 (Or.inl hs), ht] lemma truncatedSup_union_right (hs : a ∉ lowerClosure s) (ht : a ∈ lowerClosure t) : truncatedSup (s ∪ t) a = truncatedSup t a := by rw [union_comm, truncatedSup_union_left ht hs] lemma truncatedSup_union_of_not_mem (hs : a ∉ lowerClosure s) (ht : a ∉ lowerClosure t) : truncatedSup (s ∪ t) a = ⊤ := truncatedSup_of_not_mem fun h ↦ (lower_aux.1 h).elim hs ht end SemilatticeSup section SemilatticeInf variable [SemilatticeInf α] [SemilatticeInf β] [BoundedOrder β] [@DecidableRel β (· ≤ ·)] {s t : Finset α} {a : α} private lemma inf_aux [@DecidableRel α (· ≤ ·)]: a ∈ upperClosure s → (s.filter (· ≤ a)).Nonempty := fun ⟨b, hb, hab⟩ ↦ ⟨b, mem_filter.2 ⟨hb, hab⟩⟩ private lemma upper_aux [DecidableEq α] : a ∈ upperClosure ↑(s ∪ t) ↔ a ∈ upperClosure s ∨ a ∈ upperClosure t := by rw [coe_union, upperClosure_union, UpperSet.mem_inf_iff] variable [@DecidableRel α (· ≤ ·)] [BoundedOrder α] /-- The infimum of the elements of `s` less than `a` if there are some, otherwise `⊥`. -/ def truncatedInf (s : Finset α) (a : α) : α := if h : a ∈ upperClosure s then (s.filter (· ≤ a)).inf' (inf_aux h) id else ⊥ lemma truncatedInf_of_mem (h : a ∈ upperClosure s) : truncatedInf s a = (s.filter (· ≤ a)).inf' (inf_aux h) id := dif_pos h lemma truncatedInf_of_not_mem (h : a ∉ upperClosure s) : truncatedInf s a = ⊥ := dif_neg h lemma truncatedInf_le : truncatedInf s a ≤ a := by unfold truncatedInf split_ifs with h · obtain ⟨b, hb, hba⟩ := h exact hba.trans' $ inf'_le id $ mem_filter.2 ⟨hb, ‹_›⟩ · exact bot_le @[simp] lemma truncatedInf_empty (a : α) : truncatedInf ∅ a = ⊥ := truncatedInf_of_not_mem $ by simp @[simp] lemma truncatedInf_singleton (b a : α) : truncatedInf {b} a = if b ≤ a then b else ⊥ := by simp only [truncatedInf, coe_singleton, upperClosure_singleton, UpperSet.mem_Ici_iff, filter_congr_decidable, id_eq] split_ifs <;> simp [Finset.filter_true_of_mem, *] lemma map_truncatedInf (e : α ≃o β) (s : Finset α) (a : α) : e (truncatedInf s a) = truncatedInf (s.map e.toEquiv.toEmbedding) (e a) := by have : e a ∈ upperClosure (s.map e.toEquiv.toEmbedding) ↔ a ∈ upperClosure s := by simp simp_rw [truncatedInf, apply_dite e, map_finset_inf', map_bot, this] congr with h simp only [filter_map, Function.comp, Equiv.coe_toEmbedding, RelIso.coe_fn_toEquiv, OrderIso.le_iff_le, id, inf'_map] lemma truncatedInf_of_isAntichain (hs : IsAntichain (· ≤ ·) (s : Set α)) (ha : a ∈ s) : truncatedInf s a = a := by refine le_antisymm truncatedInf_le ?_ simp_rw [truncatedInf_of_mem (subset_upperClosure ha), le_inf'_iff, mem_filter] rintro b ⟨hb, hba⟩ exact (hs.eq hb ha hba).ge variable [DecidableEq α] lemma truncatedInf_union (hs : a ∈ upperClosure s) (ht : a ∈ upperClosure t) : truncatedInf (s ∪ t) a = truncatedInf s a ⊓ truncatedInf t a := by simpa only [truncatedInf_of_mem, hs, ht, upper_aux.2 (Or.inl hs), filter_union] using inf'_union _ _ _ lemma truncatedInf_union_left (hs : a ∈ upperClosure s) (ht : a ∉ upperClosure t) : truncatedInf (s ∪ t) a = truncatedInf s a := by simp only [mem_upperClosure, mem_coe, exists_prop, not_exists, not_and] at ht simp only [truncatedInf_of_mem, hs, filter_union, filter_false_of_mem ht, union_empty, upper_aux.2 (Or.inl hs), ht] lemma truncatedInf_union_right (hs : a ∉ upperClosure s) (ht : a ∈ upperClosure t) : truncatedInf (s ∪ t) a = truncatedInf t a := by rw [union_comm, truncatedInf_union_left ht hs] lemma truncatedInf_union_of_not_mem (hs : a ∉ upperClosure s) (ht : a ∉ upperClosure t) : truncatedInf (s ∪ t) a = ⊥ := truncatedInf_of_not_mem $ by rw [coe_union, upperClosure_union]; exact fun h ↦ h.elim hs ht end SemilatticeInf section DistribLattice variable [DistribLattice α] [DecidableEq α] {s t : Finset α} {a : α} private lemma infs_aux : a ∈ lowerClosure ↑(s ⊼ t) ↔ a ∈ lowerClosure s ∧ a ∈ lowerClosure t := by rw [coe_infs, lowerClosure_infs, LowerSet.mem_inf_iff] private lemma sups_aux : a ∈ upperClosure ↑(s ⊻ t) ↔ a ∈ upperClosure s ∧ a ∈ upperClosure t := by rw [coe_sups, upperClosure_sups, UpperSet.mem_sup_iff] variable [@DecidableRel α (· ≤ ·)] [BoundedOrder α] lemma truncatedSup_infs (hs : a ∈ lowerClosure s) (ht : a ∈ lowerClosure t) : truncatedSup (s ⊼ t) a = truncatedSup s a ⊓ truncatedSup t a := by simp only [truncatedSup_of_mem, hs, ht, infs_aux.2 ⟨hs, ht⟩, sup'_inf_sup', filter_infs_le] simp_rw [← image_inf_product] rw [sup'_image] simp [Function.uncurry_def] lemma truncatedInf_sups (hs : a ∈ upperClosure s) (ht : a ∈ upperClosure t) : truncatedInf (s ⊻ t) a = truncatedInf s a ⊔ truncatedInf t a := by simp only [truncatedInf_of_mem, hs, ht, sups_aux.2 ⟨hs, ht⟩, inf'_sup_inf', filter_sups_le] simp_rw [← image_sup_product] rw [inf'_image] simp [Function.uncurry_def] lemma truncatedSup_infs_of_not_mem (ha : a ∉ lowerClosure s ⊓ lowerClosure t) : truncatedSup (s ⊼ t) a = ⊤ := truncatedSup_of_not_mem $ by rwa [coe_infs, lowerClosure_infs] lemma truncatedInf_sups_of_not_mem (ha : a ∉ upperClosure s ⊔ upperClosure t) : truncatedInf (s ⊻ t) a = ⊥ := truncatedInf_of_not_mem $ by rwa [coe_sups, upperClosure_sups] end DistribLattice section BooleanAlgebra variable [BooleanAlgebra α] [@DecidableRel α (· ≤ ·)] {s : Finset α} {a : α} @[simp] lemma compl_truncatedSup (s : Finset α) (a : α) : (truncatedSup s a)ᶜ = truncatedInf sᶜˢ aᶜ := map_truncatedSup (OrderIso.compl α) _ _ @[simp] lemma compl_truncatedInf (s : Finset α) (a : α) : (truncatedInf s a)ᶜ = truncatedSup sᶜˢ aᶜ := map_truncatedInf (OrderIso.compl α) _ _ end BooleanAlgebra variable [DecidableEq α] [Fintype α] lemma card_truncatedSup_union_add_card_truncatedSup_infs (𝒜 ℬ : Finset (Finset α)) (s : Finset α) : (truncatedSup (𝒜 ∪ ℬ) s).card + (truncatedSup (𝒜 ⊼ ℬ) s).card = (truncatedSup 𝒜 s).card + (truncatedSup ℬ s).card := by by_cases h𝒜 : s ∈ lowerClosure (𝒜 : Set $ Finset α) <;> by_cases hℬ : s ∈ lowerClosure (ℬ : Set $ Finset α) · rw [truncatedSup_union h𝒜 hℬ, truncatedSup_infs h𝒜 hℬ] exact card_union_add_card_inter _ _ · rw [truncatedSup_union_left h𝒜 hℬ, truncatedSup_of_not_mem hℬ, truncatedSup_infs_of_not_mem fun h ↦ hℬ h.2] · rw [truncatedSup_union_right h𝒜 hℬ, truncatedSup_of_not_mem h𝒜, truncatedSup_infs_of_not_mem fun h ↦ h𝒜 h.1, add_comm] · rw [truncatedSup_of_not_mem h𝒜, truncatedSup_of_not_mem hℬ, truncatedSup_union_of_not_mem h𝒜 hℬ, truncatedSup_infs_of_not_mem fun h ↦ h𝒜 h.1] lemma card_truncatedInf_union_add_card_truncatedInf_sups (𝒜 ℬ : Finset (Finset α)) (s : Finset α) : (truncatedInf (𝒜 ∪ ℬ) s).card + (truncatedInf (𝒜 ⊻ ℬ) s).card = (truncatedInf 𝒜 s).card + (truncatedInf ℬ s).card := by by_cases h𝒜 : s ∈ upperClosure (𝒜 : Set $ Finset α) <;> by_cases hℬ : s ∈ upperClosure (ℬ : Set $ Finset α) · rw [truncatedInf_union h𝒜 hℬ, truncatedInf_sups h𝒜 hℬ] exact card_inter_add_card_union _ _ · rw [truncatedInf_union_left h𝒜 hℬ, truncatedInf_of_not_mem hℬ, truncatedInf_sups_of_not_mem fun h ↦ hℬ h.2] · rw [truncatedInf_union_right h𝒜 hℬ, truncatedInf_of_not_mem h𝒜, truncatedInf_sups_of_not_mem fun h ↦ h𝒜 h.1, add_comm] · rw [truncatedInf_of_not_mem h𝒜, truncatedInf_of_not_mem hℬ, truncatedInf_union_of_not_mem h𝒜 hℬ, truncatedInf_sups_of_not_mem fun h ↦ h𝒜 h.1] end Finset open Finset hiding card open Fintype Nat namespace AhlswedeZhang variable {α : Type*} [Fintype α] [DecidableEq α] {𝒜 ℬ : Finset (Finset α)} {s : Finset α} /-- Weighted sum of the size of the truncated infima of a set family. Relevant to the Ahlswede-Zhang identity. -/ def infSum (𝒜 : Finset (Finset α)) : ℚ := ∑ s, (truncatedInf 𝒜 s).card / (s.card * (card α).choose s.card) /-- Weighted sum of the size of the truncated suprema of a set family. Relevant to the Ahlswede-Zhang identity. -/ def supSum (𝒜 : Finset (Finset α)) : ℚ := ∑ s, (truncatedSup 𝒜 s).card / ((card α - s.card) * (card α).choose s.card) lemma supSum_union_add_supSum_infs (𝒜 ℬ : Finset (Finset α)) : supSum (𝒜 ∪ ℬ) + supSum (𝒜 ⊼ ℬ) = supSum 𝒜 + supSum ℬ := by unfold supSum rw [← sum_add_distrib, ← sum_add_distrib, sum_congr rfl fun s _ ↦ _] simp_rw [div_add_div_same, ← Nat.cast_add, card_truncatedSup_union_add_card_truncatedSup_infs] simp lemma infSum_union_add_infSum_sups (𝒜 ℬ : Finset (Finset α)) : infSum (𝒜 ∪ ℬ) + infSum (𝒜 ⊻ ℬ) = infSum 𝒜 + infSum ℬ := by unfold infSum rw [← sum_add_distrib, ← sum_add_distrib, sum_congr rfl fun s _ ↦ _] simp_rw [div_add_div_same, ← Nat.cast_add, card_truncatedInf_union_add_card_truncatedInf_sups] simp lemma IsAntichain.le_infSum (h𝒜 : IsAntichain (· ⊆ ·) (𝒜 : Set (Finset α))) (h𝒜₀ : ∅ ∉ 𝒜) : ∑ s ∈ 𝒜, ((card α).choose s.card : ℚ)⁻¹ ≤ infSum 𝒜 := by calc _ = ∑ s ∈ 𝒜, (truncatedInf 𝒜 s).card / (s.card * (card α).choose s.card : ℚ) := ?_ _ ≤ _ := sum_le_univ_sum_of_nonneg fun s ↦ by positivity refine sum_congr rfl fun s hs ↦ ?_ rw [truncatedInf_of_isAntichain h𝒜 hs, div_mul_cancel_left₀] have := (nonempty_iff_ne_empty.2 $ ne_of_mem_of_not_mem hs h𝒜₀).card_pos positivity variable [Nonempty α] @[simp] lemma supSum_singleton (hs : s ≠ univ) : supSum ({s} : Finset (Finset α)) = card α * ∑ k ∈ range (card α), (k : ℚ)⁻¹ := by have : ∀ t : Finset α, (card α - (truncatedSup {s} t).card : ℚ) / ((card α - t.card) * (card α).choose t.card) = if t ⊆ s then (card α - s.card : ℚ) / ((card α - t.card) * (card α).choose t.card) else 0 := by rintro t simp_rw [truncatedSup_singleton, le_iff_subset] split_ifs <;> simp [card_univ] simp_rw [← sub_eq_of_eq_add (Fintype.sum_div_mul_card_choose_card α), eq_sub_iff_add_eq, ← eq_sub_iff_add_eq', supSum, ← sum_sub_distrib, ← sub_div] rw [sum_congr rfl fun t _ ↦ this t, sum_ite, sum_const_zero, add_zero, filter_subset_univ, sum_powerset, ← binomial_sum_eq ((card_lt_iff_ne_univ _).2 hs), eq_comm] refine sum_congr rfl fun n _ ↦ ?_ rw [mul_div_assoc, ← nsmul_eq_mul] exact sum_powersetCard n s fun m ↦ (card α - s.card : ℚ) / ((card α - m) * (card α).choose m) /-- The **Ahlswede-Zhang Identity**. -/ lemma infSum_compls_add_supSum (𝒜 : Finset (Finset α)) : infSum 𝒜ᶜˢ + supSum 𝒜 = card α * ∑ k ∈ range (card α), (k : ℚ)⁻¹ + 1 := by unfold infSum supSum rw [← @map_univ_of_surjective (Finset α) _ _ _ ⟨compl, compl_injective⟩ compl_surjective, sum_map] simp only [Function.Embedding.coeFn_mk, univ_map_embedding, ← compl_truncatedSup, ← sum_add_distrib, card_compl, cast_sub (card_le_univ _), choose_symm (card_le_univ _), div_add_div_same, sub_add_cancel, Fintype.sum_div_mul_card_choose_card] lemma supSum_of_not_univ_mem (h𝒜₁ : 𝒜.Nonempty) (h𝒜₂ : univ ∉ 𝒜) : supSum 𝒜 = card α * ∑ k ∈ range (card α), (k : ℚ)⁻¹ := by set m := 𝒜.card with hm clear_value m induction' m using Nat.strong_induction_on with m ih generalizing 𝒜 replace ih := fun 𝒜 h𝒜 h𝒜₁ h𝒜₂ ↦ @ih _ h𝒜 𝒜 h𝒜₁ h𝒜₂ rfl obtain ⟨a, rfl⟩ | h𝒜₃ := h𝒜₁.exists_eq_singleton_or_nontrivial · refine supSum_singleton ?_ simpa [eq_comm] using h𝒜₂ cases m · cases h𝒜₁.card_pos.ne hm obtain ⟨s, 𝒜, hs, rfl, rfl⟩ := card_eq_succ.1 hm.symm have h𝒜 : 𝒜.Nonempty := nonempty_iff_ne_empty.2 (by rintro rfl; simp at h𝒜₃) rw [insert_eq, eq_sub_of_add_eq (supSum_union_add_supSum_infs _ _), singleton_infs, supSum_singleton (ne_of_mem_of_not_mem (mem_insert_self _ _) h𝒜₂), ih, ih, add_sub_cancel_right] · exact card_image_le.trans_lt (lt_add_one _) · exact h𝒜.image _ · simpa using fun _ ↦ ne_of_mem_of_not_mem (mem_insert_self _ _) h𝒜₂ · exact lt_add_one _ · exact h𝒜 · exact fun h ↦ h𝒜₂ (mem_insert_of_mem h) /-- The **Ahlswede-Zhang Identity**. -/ lemma infSum_eq_one (h𝒜₁ : 𝒜.Nonempty) (h𝒜₀ : ∅ ∉ 𝒜) : infSum 𝒜 = 1 := by rw [← compls_compls 𝒜, eq_sub_of_add_eq (infSum_compls_add_supSum _), supSum_of_not_univ_mem h𝒜₁.compls, add_sub_cancel_left] simpa end AhlswedeZhang
Combinatorics\SetFamily\CauchyDavenport.lean
/- Copyright (c) 2023 Yaël Dillies, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta -/ import Mathlib.Combinatorics.Additive.ETransform import Mathlib.GroupTheory.Order.Min /-! # The Cauchy-Davenport theorem This file proves a generalisation of the Cauchy-Davenport theorem to arbitrary groups. Cauchy-Davenport provides a lower bound on the size of `s + t` in terms of the sizes of `s` and `t`, where `s` and `t` are nonempty finite sets in a monoid. Precisely, it says that `|s + t| ≥ |s| + |t| - 1` unless the RHS is bigger than the size of the smallest nontrivial subgroup (in which case taking `s` and `t` to be that subgroup would yield a counterexample). The motivating example is `s = {0, ..., m}`, `t = {0, ..., n}` in the integers, which gives `s + t = {0, ..., m + n}` and `|s + t| = m + n + 1 = |s| + |t| - 1`. There are two kinds of proof of Cauchy-Davenport: * The first one works in linear orders by writing `a₁ < ... < aₖ` the elements of `s`, `b₁ < ... < bₗ` the elements of `t`, and arguing that `a₁ + b₁ < ... < aₖ + b₁ < ... < aₖ + bₗ` are distinct elements of `s + t`. * The second one works in groups by performing an "e-transform". In an abelian group, the e-transform replaces `s` and `t` by `s ∩ g • s` and `t ∪ g⁻¹ • t`. For a suitably chosen `g`, this decreases `|s + t|` and keeps `|s| + |t|` the same. In a general group, we use a trickier e-transform (in fact, a pair of e-transforms), but the idea is the same. ## Main declarations * `Finset.min_le_card_mul`: A generalisation of the Cauchy-Davenport theorem to arbitrary groups. * `Monoid.IsTorsionFree.card_add_card_sub_one_le_card_mul`: The Cauchy-Davenport theorem in torsion-free groups. * `ZMod.min_le_card_add`: The Cauchy-Davenport theorem. * `Finset.card_add_card_sub_one_le_card_mul`: The Cauchy-Davenport theorem in linear ordered cancellative semigroups. ## TODO Version for `circle`. ## References * Matt DeVos, *On a generalization of the Cauchy-Davenport theorem* ## Tags additive combinatorics, number theory, sumset, cauchy-davenport -/ open Finset Function Monoid MulOpposite Subgroup open scoped Pointwise variable {α : Type*} /-! ### General case -/ section General variable [Group α] [DecidableEq α] {x y : Finset α × Finset α} {s t : Finset α} /-- The relation we induct along in the proof by DeVos of the Cauchy-Davenport theorem. `(s₁, t₁) < (s₂, t₂)` iff * `|s₁ * t₁| < |s₂ * t₂|` * or `|s₁ * t₁| = |s₂ * t₂|` and `|s₂| + |t₂| < |s₁| + |t₁|` * or `|s₁ * t₁| = |s₂ * t₂|` and `|s₁| + |t₁| = |s₂| + |t₂|` and `|s₁| < |s₂|`. -/ @[to_additive "The relation we induct along in the proof by DeVos of the Cauchy-Davenport theorem. `(s₁, t₁) < (s₂, t₂)` iff * `|s₁ + t₁| < |s₂ + t₂|` * or `|s₁ + t₁| = |s₂ + t₂|` and `|s₂| + |t₂| < |s₁| + |t₁|` * or `|s₁ + t₁| = |s₂ + t₂|` and `|s₁| + |t₁| = |s₂| + |t₂|` and `|s₁| < |s₂|`."] private def DevosMulRel : Finset α × Finset α → Finset α × Finset α → Prop := Prod.Lex (· < ·) (Prod.Lex (· > ·) (· < ·)) on fun x ↦ ((x.1 * x.2).card, x.1.card + x.2.card, x.1.card) @[to_additive] private lemma devosMulRel_iff : DevosMulRel x y ↔ (x.1 * x.2).card < (y.1 * y.2).card ∨ (x.1 * x.2).card = (y.1 * y.2).card ∧ y.1.card + y.2.card < x.1.card + x.2.card ∨ (x.1 * x.2).card = (y.1 * y.2).card ∧ x.1.card + x.2.card = y.1.card + y.2.card ∧ x.1.card < y.1.card := by simp [DevosMulRel, Prod.lex_iff, and_or_left] @[to_additive] private lemma devosMulRel_of_le (mul : (x.1 * x.2).card ≤ (y.1 * y.2).card) (hadd : y.1.card + y.2.card < x.1.card + x.2.card) : DevosMulRel x y := devosMulRel_iff.2 <| mul.lt_or_eq.imp_right fun h ↦ Or.inl ⟨h, hadd⟩ @[to_additive] private lemma devosMulRel_of_le_of_le (mul : (x.1 * x.2).card ≤ (y.1 * y.2).card) (hadd : y.1.card + y.2.card ≤ x.1.card + x.2.card) (hone : x.1.card < y.1.card) : DevosMulRel x y := devosMulRel_iff.2 <| mul.lt_or_eq.imp_right fun h ↦ hadd.gt_or_eq.imp (And.intro h) fun h' ↦ ⟨h, h', hone⟩ @[to_additive] private lemma wellFoundedOn_devosMulRel : {x : Finset α × Finset α | x.1.Nonempty ∧ x.2.Nonempty}.WellFoundedOn (DevosMulRel : Finset α × Finset α → Finset α × Finset α → Prop) := by refine wellFounded_lt.onFun.wellFoundedOn.prod_lex_of_wellFoundedOn_fiber fun n ↦ Set.WellFoundedOn.prod_lex_of_wellFoundedOn_fiber ?_ fun n ↦ wellFounded_lt.onFun.wellFoundedOn exact wellFounded_lt.onFun.wellFoundedOn.mono' fun x hx y _ ↦ tsub_lt_tsub_left_of_le <| add_le_add ((card_le_card_mul_right _ hx.1.2).trans_eq hx.2) <| (card_le_card_mul_left _ hx.1.1).trans_eq hx.2 /-- A generalisation of the **Cauchy-Davenport theorem** to arbitrary groups. The size of `s * t` is lower-bounded by `|s| + |t| - 1` unless this quantity is greater than the size of the smallest subgroup. -/ @[to_additive "A generalisation of the **Cauchy-Davenport theorem** to arbitrary groups. The size of `s + t` is lower-bounded by `|s| + |t| - 1` unless this quantity is greater than the size of the smallest subgroup."] lemma Finset.min_le_card_mul (hs : s.Nonempty) (ht : t.Nonempty) : min (minOrder α) ↑(s.card + t.card - 1) ≤ (s * t).card := by -- Set up the induction on `x := (s, t)` along the `DevosMulRel` relation. set x := (s, t) with hx clear_value x simp only [Prod.ext_iff] at hx obtain ⟨rfl, rfl⟩ := hx refine wellFoundedOn_devosMulRel.induction (P := fun x : Finset α × Finset α ↦ min (minOrder α) ↑(card x.1 + card x.2 - 1) ≤ card (x.1 * x.2)) ⟨hs, ht⟩ ?_ clear! x rintro ⟨s, t⟩ ⟨hs, ht⟩ ih simp only [min_le_iff, tsub_le_iff_right, Prod.forall, Set.mem_setOf_eq, and_imp, Nat.cast_le] at * -- If `t.card < s.card`, we're done by the induction hypothesis on `(t⁻¹, s⁻¹)`. obtain hts | hst := lt_or_le t.card s.card · simpa only [← mul_inv_rev, add_comm, card_inv] using ih _ _ ht.inv hs.inv (devosMulRel_iff.2 <| Or.inr <| Or.inr <| by simpa only [← mul_inv_rev, add_comm, card_inv, true_and]) -- If `s` is a singleton, then the result is trivial. obtain ⟨a, rfl⟩ | ⟨a, ha, b, hb, hab⟩ := hs.exists_eq_singleton_or_nontrivial · simp [add_comm] -- Else, we have `a, b ∈ s` distinct. So `g := b⁻¹ * a` is a non-identity element such that `s` -- intersects its right translate by `g`. obtain ⟨g, hg, hgs⟩ : ∃ g : α, g ≠ 1 ∧ (s ∩ op g • s).Nonempty := ⟨b⁻¹ * a, inv_mul_eq_one.not.2 hab.symm, _, mem_inter.2 ⟨ha, mem_smul_finset.2 ⟨_, hb, by simp⟩⟩⟩ -- If `s` is equal to its right translate by `g`, then it contains a nontrivial subgroup, namely -- the subgroup generated by `g`. So `s * t` has size at least the size of a nontrivial subgroup, -- as wanted. obtain hsg | hsg := eq_or_ne (op g • s) s · have hS : (zpowers g : Set α) ⊆ a⁻¹ • (s : Set α) := by refine forall_mem_zpowers.2 <| @zpow_induction_right _ _ _ (· ∈ a⁻¹ • (s : Set α)) ⟨_, ha, inv_mul_self _⟩ (fun c hc ↦ ?_) fun c hc ↦ ?_ · rw [← hsg, coe_smul_finset, smul_comm] exact Set.smul_mem_smul_set hc · simp only rwa [← op_smul_eq_mul, op_inv, ← Set.mem_smul_set_iff_inv_smul_mem, smul_comm, ← coe_smul_finset, hsg] refine Or.inl ((minOrder_le_natCard (zpowers_ne_bot.2 hg) <| s.finite_toSet.smul_set.subset hS).trans <| WithTop.coe_le_coe.2 <| ((Nat.card_mono s.finite_toSet.smul_set hS).trans_eq <| ?_).trans <| card_le_card_mul_right _ ht) rw [← coe_smul_finset] simp [-coe_smul_finset] -- Else, we can transform `s`, `t` to `s'`, `t'` and `s''`, `t''`, such that one of `(s', t')` and -- `(s'', t'')` is strictly smaller than `(s, t)` according to `DevosMulRel`. replace hsg : (s ∩ op g • s).card < s.card := card_lt_card ⟨inter_subset_left, fun h ↦ hsg <| eq_of_superset_of_card_ge (h.trans inter_subset_right) (card_smul_finset _ _).le⟩ replace aux1 := card_mono <| mulETransformLeft.fst_mul_snd_subset g (s, t) replace aux2 := card_mono <| mulETransformRight.fst_mul_snd_subset g (s, t) -- If the left translate of `t` by `g⁻¹` is disjoint from `t`, then we're easily done. obtain hgt | hgt := disjoint_or_nonempty_inter t (g⁻¹ • t) · rw [← card_smul_finset g⁻¹ t] refine Or.inr ((add_le_add_right hst _).trans ?_) rw [← card_union_of_disjoint hgt] exact (card_le_card_mul_left _ hgs).trans (le_add_of_le_left aux1) -- Else, we're done by induction on either `(s', t')` or `(s'', t'')` depending on whether -- `|s| + |t| ≤ |s'| + |t'|` or `|s| + |t| < |s''| + |t''|`. One of those two inequalities must -- hold since `2 * (|s| + |t|) = |s'| + |t'| + |s''| + |t''|`. obtain hstg | hstg := le_or_lt_of_add_le_add (MulETransform.card g (s, t)).ge · exact (ih _ _ hgs (hgt.mono inter_subset_union) <| devosMulRel_of_le_of_le aux1 hstg hsg).imp (WithTop.coe_le_coe.2 aux1).trans' fun h ↦ hstg.trans <| h.trans <| add_le_add_right aux1 _ · exact (ih _ _ (hgs.mono inter_subset_union) hgt <| devosMulRel_of_le aux2 hstg).imp (WithTop.coe_le_coe.2 aux2).trans' fun h ↦ hstg.le.trans <| h.trans <| add_le_add_right aux2 _ /-- The **Cauchy-Davenport Theorem** for torsion-free groups. The size of `s * t` is lower-bounded by `|s| + |t| - 1`. -/ @[to_additive "The **Cauchy-Davenport theorem** for torsion-free groups. The size of `s + t` is lower-bounded by `|s| + |t| - 1`."] lemma Monoid.IsTorsionFree.card_add_card_sub_one_le_card_mul (h : IsTorsionFree α) (hs : s.Nonempty) (ht : t.Nonempty) : s.card + t.card - 1 ≤ (s * t).card := by simpa only [h.minOrder, min_eq_right, le_top, Nat.cast_le] using Finset.min_le_card_mul hs ht end General /-! ### $$ℤ/nℤ$$ -/ /-- The **Cauchy-Davenport Theorem**. If `s`, `t` are nonempty sets in $$ℤ/pℤ$$, then the size of `s + t` is lower-bounded by `|s| + |t| - 1`, unless this quantity is greater than `p`. -/ lemma ZMod.min_le_card_add {p : ℕ} (hp : p.Prime) {s t : Finset (ZMod p)} (hs : s.Nonempty) (ht : t.Nonempty) : min p (s.card + t.card - 1) ≤ (s + t).card := by simpa only [ZMod.minOrder_of_prime hp, min_le_iff, Nat.cast_le] using Finset.min_le_card_add hs ht /-! ### Linearly ordered cancellative semigroups -/ /-- The **Cauchy-Davenport Theorem** for linearly ordered cancellative semigroups. The size of `s * t` is lower-bounded by `|s| + |t| - 1`. -/ @[to_additive "The **Cauchy-Davenport theorem** for linearly ordered additive cancellative semigroups. The size of `s + t` is lower-bounded by `|s| + |t| - 1`."] lemma Finset.card_add_card_sub_one_le_card_mul [LinearOrder α] [Semigroup α] [IsCancelMul α] [CovariantClass α α (· * ·) (· ≤ ·)] [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {s t : Finset α} (hs : s.Nonempty) (ht : t.Nonempty) : s.card + t.card - 1 ≤ (s * t).card := by suffices s * {t.min' ht} ∩ ({s.max' hs} * t) = {s.max' hs * t.min' ht} by rw [← card_singleton_mul t (s.max' hs), ← card_mul_singleton s (t.min' ht), ← card_union_add_card_inter, ← card_singleton _, ← this, Nat.add_sub_cancel] exact card_mono (union_subset (mul_subset_mul_left <| singleton_subset_iff.2 <| min'_mem _ _) <| mul_subset_mul_right <| singleton_subset_iff.2 <| max'_mem _ _) refine eq_singleton_iff_unique_mem.2 ⟨mem_inter.2 ⟨mul_mem_mul (max'_mem _ _) <| mem_singleton_self _, mul_mem_mul (mem_singleton_self _) <| min'_mem _ _⟩, ?_⟩ simp only [mem_inter, and_imp, mem_mul, mem_singleton, exists_and_left, exists_eq_left, forall_exists_index, and_imp, forall_apply_eq_imp_iff₂, mul_left_inj] exact fun a' ha' b' hb' h ↦ (le_max' _ _ ha').eq_of_not_lt fun ha ↦ ((mul_lt_mul_right' ha _).trans_eq' h).not_le <| mul_le_mul_left' (min'_le _ _ hb') _
Combinatorics\SetFamily\FourFunctions.lean
/- Copyright (c) 2023 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.Order.BigOperators.Group.Finset import Mathlib.Algebra.Order.Pi import Mathlib.Algebra.Order.Ring.Basic import Mathlib.Data.Finset.Sups import Mathlib.Data.Set.Subsingleton import Mathlib.Order.Birkhoff import Mathlib.Order.Booleanisation import Mathlib.Order.Sublattice import Mathlib.Tactic.Positivity.Basic import Mathlib.Tactic.Ring /-! # The four functions theorem and corollaries This file proves the four functions theorem. The statement is that if `f₁ a * f₂ b ≤ f₃ (a ⊓ b) * f₄ (a ⊔ b)` for all `a`, `b` in a finite distributive lattice, then `(∑ x ∈ s, f₁ x) * (∑ x ∈ t, f₂ x) ≤ (∑ x ∈ s ⊼ t, f₃ x) * (∑ x ∈ s ⊻ t, f₄ x)` where `s ⊼ t = {a ⊓ b | a ∈ s, b ∈ t}`, `s ⊻ t = {a ⊔ b | a ∈ s, b ∈ t}`. The proof uses Birkhoff's representation theorem to restrict to the case where the finite distributive lattice is in fact a finite powerset algebra, namely `Finset α` for some finite `α`. Then it proves this new statement by induction on the size of `α`. ## Main declarations The two versions of the four functions theorem are * `Finset.four_functions_theorem` for finite powerset algebras. * `four_functions_theorem` for any finite distributive lattices. We deduce a number of corollaries: * `Finset.le_card_infs_mul_card_sups`: Daykin inequality. `|s| |t| ≤ |s ⊼ t| |s ⊻ t|` * `holley`: Holley inequality. * `fkg`: Fortuin-Kastelyn-Ginibre inequality. * `Finset.card_le_card_diffs`: Marica-Schönheim inequality. `|s| ≤ |{a \ b | a, b ∈ s}|` ## TODO Prove that lattices in which `Finset.le_card_infs_mul_card_sups` holds are distributive. See Daykin, *A lattice is distributive iff |A| |B| <= |A ∨ B| |A ∧ B|* Prove the Fishburn-Shepp inequality. Is `collapse` a construct generally useful for set family inductions? If so, we should move it to an earlier file and give it a proper API. ## References [*Applications of the FKG Inequality and Its Relatives*, Graham][Graham1983] -/ open Finset Fintype Function open scoped FinsetFamily variable {α β : Type*} section Finset variable [DecidableEq α] [LinearOrderedCommSemiring β] {𝒜 ℬ : Finset (Finset α)} {a : α} {f f₁ f₂ f₃ f₄ g μ : Finset α → β} {s t u : Finset α} /-- The `n = 1` case of the Ahlswede-Daykin inequality. Note that we can't just expand everything out and bound termwise since `c₀ * d₁` appears twice on the RHS of the assumptions while `c₁ * d₀` does not appear. -/ private lemma ineq [ExistsAddOfLE β] {a₀ a₁ b₀ b₁ c₀ c₁ d₀ d₁ : β} (ha₀ : 0 ≤ a₀) (ha₁ : 0 ≤ a₁) (hb₀ : 0 ≤ b₀) (hb₁ : 0 ≤ b₁) (hc₀ : 0 ≤ c₀) (hc₁ : 0 ≤ c₁) (hd₀ : 0 ≤ d₀) (hd₁ : 0 ≤ d₁) (h₀₀ : a₀ * b₀ ≤ c₀ * d₀) (h₁₀ : a₁ * b₀ ≤ c₀ * d₁) (h₀₁ : a₀ * b₁ ≤ c₀ * d₁) (h₁₁ : a₁ * b₁ ≤ c₁ * d₁) : (a₀ + a₁) * (b₀ + b₁) ≤ (c₀ + c₁) * (d₀ + d₁) := by calc _ = a₀ * b₀ + (a₀ * b₁ + a₁ * b₀) + a₁ * b₁ := by ring _ ≤ c₀ * d₀ + (c₀ * d₁ + c₁ * d₀) + c₁ * d₁ := add_le_add_three h₀₀ ?_ h₁₁ _ = (c₀ + c₁) * (d₀ + d₁) := by ring obtain hcd | hcd := (mul_nonneg hc₀ hd₁).eq_or_gt · rw [hcd] at h₀₁ h₁₀ rw [h₀₁.antisymm, h₁₀.antisymm, add_zero] <;> positivity refine le_of_mul_le_mul_right ?_ hcd calc (a₀ * b₁ + a₁ * b₀) * (c₀ * d₁) = a₀ * b₁ * (c₀ * d₁) + c₀ * d₁ * (a₁ * b₀) := by ring _ ≤ a₀ * b₁ * (a₁ * b₀) + c₀ * d₁ * (c₀ * d₁) := mul_add_mul_le_mul_add_mul h₀₁ h₁₀ _ = a₀ * b₀ * (a₁ * b₁) + c₀ * d₁ * (c₀ * d₁) := by ring _ ≤ c₀ * d₀ * (c₁ * d₁) + c₀ * d₁ * (c₀ * d₁) := add_le_add_right (mul_le_mul h₀₀ h₁₁ (by positivity) <| by positivity) _ _ = (c₀ * d₁ + c₁ * d₀) * (c₀ * d₁) := by ring private def collapse (𝒜 : Finset (Finset α)) (a : α) (f : Finset α → β) (s : Finset α) : β := ∑ t ∈ 𝒜.filter fun t ↦ t.erase a = s, f t private lemma erase_eq_iff (hs : a ∉ s) : t.erase a = s ↔ t = s ∨ t = insert a s := by by_cases ht : a ∈ t <;> · simp [ne_of_mem_of_not_mem', erase_eq_iff_eq_insert, *] aesop private lemma filter_collapse_eq (ha : a ∉ s) (𝒜 : Finset (Finset α)) : (𝒜.filter fun t ↦ t.erase a = s) = if s ∈ 𝒜 then (if insert a s ∈ 𝒜 then {s, insert a s} else {s}) else (if insert a s ∈ 𝒜 then {insert a s} else ∅) := by ext t; split_ifs <;> simp [erase_eq_iff ha] <;> aesop lemma collapse_eq (ha : a ∉ s) (𝒜 : Finset (Finset α)) (f : Finset α → β) : collapse 𝒜 a f s = (if s ∈ 𝒜 then f s else 0) + if insert a s ∈ 𝒜 then f (insert a s) else 0 := by rw [collapse, filter_collapse_eq ha] split_ifs <;> simp [(ne_of_mem_of_not_mem' (mem_insert_self a s) ha).symm, *] lemma collapse_of_mem (ha : a ∉ s) (ht : t ∈ 𝒜) (hu : u ∈ 𝒜) (hts : t = s) (hus : u = insert a s) : collapse 𝒜 a f s = f t + f u := by subst hts; subst hus; simp_rw [collapse_eq ha, if_pos ht, if_pos hu] lemma le_collapse_of_mem (ha : a ∉ s) (hf : 0 ≤ f) (hts : t = s) (ht : t ∈ 𝒜) : f t ≤ collapse 𝒜 a f s := by subst hts rw [collapse_eq ha, if_pos ht] split_ifs · exact le_add_of_nonneg_right <| hf _ · rw [add_zero] lemma le_collapse_of_insert_mem (ha : a ∉ s) (hf : 0 ≤ f) (hts : t = insert a s) (ht : t ∈ 𝒜) : f t ≤ collapse 𝒜 a f s := by rw [collapse_eq ha, ← hts, if_pos ht] split_ifs · exact le_add_of_nonneg_left <| hf _ · rw [zero_add] lemma collapse_nonneg (hf : 0 ≤ f) : 0 ≤ collapse 𝒜 a f := fun _s ↦ sum_nonneg fun _t _ ↦ hf _ lemma collapse_modular [ExistsAddOfLE β] (hu : a ∉ u) (h₁ : 0 ≤ f₁) (h₂ : 0 ≤ f₂) (h₃ : 0 ≤ f₃) (h₄ : 0 ≤ f₄) (h : ∀ ⦃s⦄, s ⊆ insert a u → ∀ ⦃t⦄, t ⊆ insert a u → f₁ s * f₂ t ≤ f₃ (s ∩ t) * f₄ (s ∪ t)) (𝒜 ℬ : Finset (Finset α)) : ∀ ⦃s⦄, s ⊆ u → ∀ ⦃t⦄, t ⊆ u → collapse 𝒜 a f₁ s * collapse ℬ a f₂ t ≤ collapse (𝒜 ⊼ ℬ) a f₃ (s ∩ t) * collapse (𝒜 ⊻ ℬ) a f₄ (s ∪ t) := by rintro s hsu t htu -- Gather a bunch of facts we'll need a lot have := hsu.trans <| subset_insert a _ have := htu.trans <| subset_insert a _ have := insert_subset_insert a hsu have := insert_subset_insert a htu have has := not_mem_mono hsu hu have hat := not_mem_mono htu hu have : a ∉ s ∩ t := not_mem_mono (inter_subset_left.trans hsu) hu have := not_mem_union.2 ⟨has, hat⟩ rw [collapse_eq has] split_ifs · rw [collapse_eq hat] split_ifs · rw [collapse_of_mem ‹_› (inter_mem_infs ‹_› ‹_›) (inter_mem_infs ‹_› ‹_›) rfl (insert_inter_distrib _ _ _).symm, collapse_of_mem ‹_› (union_mem_sups ‹_› ‹_›) (union_mem_sups ‹_› ‹_›) rfl (insert_union_distrib _ _ _).symm] refine ineq (h₁ _) (h₁ _) (h₂ _) (h₂ _) (h₃ _) (h₃ _) (h₄ _) (h₄ _) (h ‹_› ‹_›) ?_ ?_ ?_ · simpa [*] using h ‹insert a s ⊆ _› ‹t ⊆ _› · simpa [*] using h ‹s ⊆ _› ‹insert a t ⊆ _› · simpa [*] using h ‹insert a s ⊆ _› ‹insert a t ⊆ _› · rw [add_zero, add_mul] refine (add_le_add (h ‹_› ‹_›) <| h ‹_› ‹_›).trans ?_ rw [collapse_of_mem ‹_› (union_mem_sups ‹_› ‹_›) (union_mem_sups ‹_› ‹_›) rfl (insert_union _ _ _), insert_inter_of_not_mem ‹_›, ← mul_add] exact mul_le_mul_of_nonneg_right (le_collapse_of_mem ‹_› h₃ rfl <| inter_mem_infs ‹_› ‹_›) <| add_nonneg (h₄ _) <| h₄ _ · rw [zero_add, add_mul] refine (add_le_add (h ‹_› ‹_›) <| h ‹_› ‹_›).trans ?_ rw [collapse_of_mem ‹_› (inter_mem_infs ‹_› ‹_›) (inter_mem_infs ‹_› ‹_›) (inter_insert_of_not_mem ‹_›) (insert_inter_distrib _ _ _).symm, union_insert, insert_union_distrib, ← add_mul] exact mul_le_mul_of_nonneg_left (le_collapse_of_insert_mem ‹_› h₄ (insert_union_distrib _ _ _).symm <| union_mem_sups ‹_› ‹_›) <| add_nonneg (h₃ _) <| h₃ _ · rw [add_zero, mul_zero] exact mul_nonneg (collapse_nonneg h₃ _) <| collapse_nonneg h₄ _ · rw [add_zero, collapse_eq hat, mul_add] split_ifs · refine (add_le_add (h ‹_› ‹_›) <| h ‹_› ‹_›).trans ?_ rw [collapse_of_mem ‹_› (union_mem_sups ‹_› ‹_›) (union_mem_sups ‹_› ‹_›) rfl (union_insert _ _ _), inter_insert_of_not_mem ‹_›, ← mul_add] exact mul_le_mul_of_nonneg_right (le_collapse_of_mem ‹_› h₃ rfl <| inter_mem_infs ‹_› ‹_›) <| add_nonneg (h₄ _) <| h₄ _ · rw [mul_zero, add_zero] exact (h ‹_› ‹_›).trans <| mul_le_mul (le_collapse_of_mem ‹_› h₃ rfl <| inter_mem_infs ‹_› ‹_›) (le_collapse_of_mem ‹_› h₄ rfl <| union_mem_sups ‹_› ‹_›) (h₄ _) <| collapse_nonneg h₃ _ · rw [mul_zero, zero_add] refine (h ‹_› ‹_›).trans <| mul_le_mul ?_ (le_collapse_of_insert_mem ‹_› h₄ (union_insert _ _ _) <| union_mem_sups ‹_› ‹_›) (h₄ _) <| collapse_nonneg h₃ _ exact le_collapse_of_mem (not_mem_mono inter_subset_left ‹_›) h₃ (inter_insert_of_not_mem ‹_›) <| inter_mem_infs ‹_› ‹_› · simp_rw [mul_zero, add_zero] exact mul_nonneg (collapse_nonneg h₃ _) <| collapse_nonneg h₄ _ · rw [zero_add, collapse_eq hat, mul_add] split_ifs · refine (add_le_add (h ‹_› ‹_›) <| h ‹_› ‹_›).trans ?_ rw [collapse_of_mem ‹_› (inter_mem_infs ‹_› ‹_›) (inter_mem_infs ‹_› ‹_›) (insert_inter_of_not_mem ‹_›) (insert_inter_distrib _ _ _).symm, insert_inter_of_not_mem ‹_›, ← insert_inter_distrib, insert_union, insert_union_distrib, ← add_mul] exact mul_le_mul_of_nonneg_left (le_collapse_of_insert_mem ‹_› h₄ (insert_union_distrib _ _ _).symm <| union_mem_sups ‹_› ‹_›) <| add_nonneg (h₃ _) <| h₃ _ · rw [mul_zero, add_zero] refine (h ‹_› ‹_›).trans <| mul_le_mul (le_collapse_of_mem ‹_› h₃ (insert_inter_of_not_mem ‹_›) <| inter_mem_infs ‹_› ‹_›) (le_collapse_of_insert_mem ‹_› h₄ (insert_union _ _ _) <| union_mem_sups ‹_› ‹_›) (h₄ _) <| collapse_nonneg h₃ _ · rw [mul_zero, zero_add] exact (h ‹_› ‹_›).trans <| mul_le_mul (le_collapse_of_insert_mem ‹_› h₃ (insert_inter_distrib _ _ _).symm <| inter_mem_infs ‹_› ‹_›) (le_collapse_of_insert_mem ‹_› h₄ (insert_union_distrib _ _ _).symm <| union_mem_sups ‹_› ‹_›) (h₄ _) <| collapse_nonneg h₃ _ · simp_rw [mul_zero, add_zero] exact mul_nonneg (collapse_nonneg h₃ _) <| collapse_nonneg h₄ _ · simp_rw [add_zero, zero_mul] exact mul_nonneg (collapse_nonneg h₃ _) <| collapse_nonneg h₄ _ lemma sum_collapse (h𝒜 : 𝒜 ⊆ (insert a u).powerset) (hu : a ∉ u) : ∑ s ∈ u.powerset, collapse 𝒜 a f s = ∑ s ∈ 𝒜, f s := by calc _ = ∑ s ∈ u.powerset ∩ 𝒜, f s + ∑ s ∈ u.powerset.image (insert a) ∩ 𝒜, f s := ?_ _ = ∑ s ∈ u.powerset ∩ 𝒜, f s + ∑ s ∈ ((insert a u).powerset \ u.powerset) ∩ 𝒜, f s := ?_ _ = ∑ s ∈ 𝒜, f s := ?_ · rw [← sum_ite_mem, ← sum_ite_mem, sum_image, ← sum_add_distrib] · exact sum_congr rfl fun s hs ↦ collapse_eq (not_mem_mono (mem_powerset.1 hs) hu) _ _ · exact (insert_erase_invOn.2.injOn).mono fun s hs ↦ not_mem_mono (mem_powerset.1 hs) hu · congr with s simp only [mem_image, mem_powerset, mem_sdiff, subset_insert_iff] refine ⟨?_, fun h ↦ ⟨_, h.1, ?_⟩⟩ · rintro ⟨s, hs, rfl⟩ exact ⟨subset_insert_iff.1 <| insert_subset_insert _ hs, fun h ↦ hu <| h <| mem_insert_self _ _⟩ · rw [insert_erase (erase_ne_self.1 fun hs ↦ ?_)] rw [hs] at h exact h.2 h.1 · rw [← sum_union (disjoint_sdiff_self_right.mono inf_le_left inf_le_left), ← union_inter_distrib_right, union_sdiff_of_subset (powerset_mono.2 <| subset_insert _ _), inter_eq_right.2 h𝒜] variable [ExistsAddOfLE β] /-- The **Four Functions Theorem** on a powerset algebra. See `four_functions_theorem` for the finite distributive lattice generalisation. -/ protected lemma Finset.four_functions_theorem (u : Finset α) (h₁ : 0 ≤ f₁) (h₂ : 0 ≤ f₂) (h₃ : 0 ≤ f₃) (h₄ : 0 ≤ f₄) (h : ∀ ⦃s⦄, s ⊆ u → ∀ ⦃t⦄, t ⊆ u → f₁ s * f₂ t ≤ f₃ (s ∩ t) * f₄ (s ∪ t)) {𝒜 ℬ : Finset (Finset α)} (h𝒜 : 𝒜 ⊆ u.powerset) (hℬ : ℬ ⊆ u.powerset) : (∑ s ∈ 𝒜, f₁ s) * ∑ s ∈ ℬ, f₂ s ≤ (∑ s ∈ 𝒜 ⊼ ℬ, f₃ s) * ∑ s ∈ 𝒜 ⊻ ℬ, f₄ s := by induction' u using Finset.induction with a u hu ih generalizing f₁ f₂ f₃ f₄ 𝒜 ℬ · simp only [Finset.powerset_empty, Finset.subset_singleton_iff] at h𝒜 hℬ obtain rfl | rfl := h𝒜 <;> obtain rfl | rfl := hℬ <;> simp; exact h (subset_refl ∅) subset_rfl specialize ih (collapse_nonneg h₁) (collapse_nonneg h₂) (collapse_nonneg h₃) (collapse_nonneg h₄) (collapse_modular hu h₁ h₂ h₃ h₄ h 𝒜 ℬ) Subset.rfl Subset.rfl have : 𝒜 ⊼ ℬ ⊆ powerset (insert a u) := by simpa using infs_subset h𝒜 hℬ have : 𝒜 ⊻ ℬ ⊆ powerset (insert a u) := by simpa using sups_subset h𝒜 hℬ simpa only [powerset_sups_powerset_self, powerset_infs_powerset_self, sum_collapse, not_false_eq_true, *] using ih variable (f₁ f₂ f₃ f₄) [Fintype α] private lemma four_functions_theorem_aux (h₁ : 0 ≤ f₁) (h₂ : 0 ≤ f₂) (h₃ : 0 ≤ f₃) (h₄ : 0 ≤ f₄) (h : ∀ s t, f₁ s * f₂ t ≤ f₃ (s ∩ t) * f₄ (s ∪ t)) (𝒜 ℬ : Finset (Finset α)) : (∑ s ∈ 𝒜, f₁ s) * ∑ s ∈ ℬ, f₂ s ≤ (∑ s ∈ 𝒜 ⊼ ℬ, f₃ s) * ∑ s ∈ 𝒜 ⊻ ℬ, f₄ s := by refine univ.four_functions_theorem h₁ h₂ h₃ h₄ ?_ ?_ ?_ <;> simp [h] end Finset section DistribLattice variable [DistribLattice α] [LinearOrderedCommSemiring β] [ExistsAddOfLE β] (f f₁ f₂ f₃ f₄ g μ : α → β) /-- The **Four Functions Theorem**, aka **Ahlswede-Daykin Inequality**. -/ lemma four_functions_theorem [DecidableEq α] (h₁ : 0 ≤ f₁) (h₂ : 0 ≤ f₂) (h₃ : 0 ≤ f₃) (h₄ : 0 ≤ f₄) (h : ∀ a b, f₁ a * f₂ b ≤ f₃ (a ⊓ b) * f₄ (a ⊔ b)) (s t : Finset α) : (∑ a ∈ s, f₁ a) * ∑ a ∈ t, f₂ a ≤ (∑ a ∈ s ⊼ t, f₃ a) * ∑ a ∈ s ⊻ t, f₄ a := by classical set L : Sublattice α := ⟨latticeClosure (s ∪ t), isSublattice_latticeClosure.1, isSublattice_latticeClosure.2⟩ have : Finite L := (s.finite_toSet.union t.finite_toSet).latticeClosure.to_subtype set s' : Finset L := s.preimage (↑) Subtype.coe_injective.injOn set t' : Finset L := t.preimage (↑) Subtype.coe_injective.injOn have hs' : s'.map ⟨L.subtype, Subtype.coe_injective⟩ = s := by simp [s', map_eq_image, image_preimage, filter_eq_self] exact fun a ha ↦ subset_latticeClosure <| Set.subset_union_left ha have ht' : t'.map ⟨L.subtype, Subtype.coe_injective⟩ = t := by simp [t', map_eq_image, image_preimage, filter_eq_self] exact fun a ha ↦ subset_latticeClosure <| Set.subset_union_right ha clear_value s' t' obtain ⟨β, _, _, g, hg⟩ := exists_birkhoff_representation L have := four_functions_theorem_aux (extend g (f₁ ∘ (↑)) 0) (extend g (f₂ ∘ (↑)) 0) (extend g (f₃ ∘ (↑)) 0) (extend g (f₄ ∘ (↑)) 0) (extend_nonneg (fun _ ↦ h₁ _) le_rfl) (extend_nonneg (fun _ ↦ h₂ _) le_rfl) (extend_nonneg (fun _ ↦ h₃ _) le_rfl) (extend_nonneg (fun _ ↦ h₄ _) le_rfl) ?_ (s'.map ⟨g, hg⟩) (t'.map ⟨g, hg⟩) · simpa only [← hs', ← ht', ← map_sups, ← map_infs, sum_map, Embedding.coeFn_mk, hg.extend_apply] using this rintro s t classical obtain ⟨a, rfl⟩ | hs := em (∃ a, g a = s) · obtain ⟨b, rfl⟩ | ht := em (∃ b, g b = t) · simp_rw [← sup_eq_union, ← inf_eq_inter, ← map_sup, ← map_inf, hg.extend_apply] exact h _ _ · simpa [extend_apply' _ _ _ ht] using mul_nonneg (extend_nonneg (fun a : L ↦ h₃ a) le_rfl _) (extend_nonneg (fun a : L ↦ h₄ a) le_rfl _) · simpa [extend_apply' _ _ _ hs] using mul_nonneg (extend_nonneg (fun a : L ↦ h₃ a) le_rfl _) (extend_nonneg (fun a : L ↦ h₄ a) le_rfl _) /-- An inequality of Daykin. Interestingly, any lattice in which this inequality holds is distributive. -/ lemma Finset.le_card_infs_mul_card_sups [DecidableEq α] (s t : Finset α) : s.card * t.card ≤ (s ⊼ t).card * (s ⊻ t).card := by simpa using four_functions_theorem (1 : α → ℕ) 1 1 1 zero_le_one zero_le_one zero_le_one zero_le_one (fun _ _ ↦ le_rfl) s t variable [Fintype α] /-- Special case of the **Four Functions Theorem** when `s = t = univ`. -/ lemma four_functions_theorem_univ (h₁ : 0 ≤ f₁) (h₂ : 0 ≤ f₂) (h₃ : 0 ≤ f₃) (h₄ : 0 ≤ f₄) (h : ∀ a b, f₁ a * f₂ b ≤ f₃ (a ⊓ b) * f₄ (a ⊔ b)) : (∑ a, f₁ a) * ∑ a, f₂ a ≤ (∑ a, f₃ a) * ∑ a, f₄ a := by classical simpa using four_functions_theorem f₁ f₂ f₃ f₄ h₁ h₂ h₃ h₄ h univ univ /-- The **Holley Inequality**. -/ lemma holley (hμ₀ : 0 ≤ μ) (hf : 0 ≤ f) (hg : 0 ≤ g) (hμ : Monotone μ) (hfg : ∑ a, f a = ∑ a, g a) (h : ∀ a b, f a * g b ≤ f (a ⊓ b) * g (a ⊔ b)) : ∑ a, μ a * f a ≤ ∑ a, μ a * g a := by classical obtain rfl | hf := hf.eq_or_lt · simp only [Pi.zero_apply, sum_const_zero, eq_comm, Fintype.sum_eq_zero_iff_of_nonneg hg] at hfg simp [hfg] obtain rfl | hg := hg.eq_or_lt · simp only [Pi.zero_apply, sum_const_zero, Fintype.sum_eq_zero_iff_of_nonneg hf.le] at hfg simp [hfg] have := four_functions_theorem g (μ * f) f (μ * g) hg.le (mul_nonneg hμ₀ hf.le) hf.le (mul_nonneg hμ₀ hg.le) (fun a b ↦ ?_) univ univ · simpa [hfg, sum_pos hg] using this · simp_rw [Pi.mul_apply, mul_left_comm _ (μ _), mul_comm (g _)] rw [sup_comm, inf_comm] exact mul_le_mul (hμ le_sup_left) (h _ _) (mul_nonneg (hf.le _) <| hg.le _) <| hμ₀ _ /-- The **Fortuin-Kastelyn-Ginibre Inequality**. -/ lemma fkg (hμ₀ : 0 ≤ μ) (hf₀ : 0 ≤ f) (hg₀ : 0 ≤ g) (hf : Monotone f) (hg : Monotone g) (hμ : ∀ a b, μ a * μ b ≤ μ (a ⊓ b) * μ (a ⊔ b)) : (∑ a, μ a * f a) * ∑ a, μ a * g a ≤ (∑ a, μ a) * ∑ a, μ a * (f a * g a) := by refine four_functions_theorem_univ (μ * f) (μ * g) μ _ (mul_nonneg hμ₀ hf₀) (mul_nonneg hμ₀ hg₀) hμ₀ (mul_nonneg hμ₀ <| mul_nonneg hf₀ hg₀) (fun a b ↦ ?_) dsimp rw [mul_mul_mul_comm, ← mul_assoc (μ (a ⊓ b))] exact mul_le_mul (hμ _ _) (mul_le_mul (hf le_sup_left) (hg le_sup_right) (hg₀ _) <| hf₀ _) (mul_nonneg (hf₀ _) <| hg₀ _) <| mul_nonneg (hμ₀ _) <| hμ₀ _ end DistribLattice open Booleanisation variable [DecidableEq α] [GeneralizedBooleanAlgebra α] /-- A slight generalisation of the **Marica-Schönheim Inequality**. -/ lemma Finset.le_card_diffs_mul_card_diffs (s t : Finset α) : s.card * t.card ≤ (s \\ t).card * (t \\ s).card := by have : ∀ s t : Finset α, (s \\ t).map ⟨_, liftLatticeHom_injective⟩ = s.map ⟨_, liftLatticeHom_injective⟩ \\ t.map ⟨_, liftLatticeHom_injective⟩ := by rintro s t simp_rw [map_eq_image] exact image_image₂_distrib fun a b ↦ rfl simpa [← card_compls (_ ⊻ _), ← map_sup, ← map_inf, ← this] using (s.map ⟨_, liftLatticeHom_injective⟩).le_card_infs_mul_card_sups (t.map ⟨_, liftLatticeHom_injective⟩)ᶜˢ /-- The **Marica-Schönheim Inequality**. -/ lemma Finset.card_le_card_diffs (s : Finset α) : s.card ≤ (s \\ s).card := le_of_pow_le_pow_left two_ne_zero (zero_le _) <| by simpa [← sq] using s.le_card_diffs_mul_card_diffs s
Combinatorics\SetFamily\HarrisKleitman.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.Order.Ring.Nat import Mathlib.Combinatorics.SetFamily.Compression.Down import Mathlib.Order.UpperLower.Basic import Mathlib.Data.Fintype.Powerset /-! # Harris-Kleitman inequality This file proves the Harris-Kleitman inequality. This relates `𝒜.card * ℬ.card` and `2 ^ card α * (𝒜 ∩ ℬ).card` where `𝒜` and `ℬ` are upward- or downcard-closed finite families of finsets. This can be interpreted as saying that any two lower sets (resp. any two upper sets) correlate in the uniform measure. ## Main declarations * `IsLowerSet.le_card_inter_finset`: One form of the Harris-Kleitman inequality. ## References * [D. J. Kleitman, *Families of non-disjoint subsets*][kleitman1966] -/ open Finset variable {α : Type*} [DecidableEq α] {𝒜 ℬ : Finset (Finset α)} {s : Finset α} {a : α} theorem IsLowerSet.nonMemberSubfamily (h : IsLowerSet (𝒜 : Set (Finset α))) : IsLowerSet (𝒜.nonMemberSubfamily a : Set (Finset α)) := fun s t hts => by simp_rw [mem_coe, mem_nonMemberSubfamily] exact And.imp (h hts) (mt <| @hts _) theorem IsLowerSet.memberSubfamily (h : IsLowerSet (𝒜 : Set (Finset α))) : IsLowerSet (𝒜.memberSubfamily a : Set (Finset α)) := by rintro s t hts simp_rw [mem_coe, mem_memberSubfamily] exact And.imp (h <| insert_subset_insert _ hts) (mt <| @hts _) theorem IsLowerSet.memberSubfamily_subset_nonMemberSubfamily (h : IsLowerSet (𝒜 : Set (Finset α))) : 𝒜.memberSubfamily a ⊆ 𝒜.nonMemberSubfamily a := fun s => by rw [mem_memberSubfamily, mem_nonMemberSubfamily] exact And.imp_left (h <| subset_insert _ _) /-- **Harris-Kleitman inequality**: Any two lower sets of finsets correlate. -/ theorem IsLowerSet.le_card_inter_finset' (h𝒜 : IsLowerSet (𝒜 : Set (Finset α))) (hℬ : IsLowerSet (ℬ : Set (Finset α))) (h𝒜s : ∀ t ∈ 𝒜, t ⊆ s) (hℬs : ∀ t ∈ ℬ, t ⊆ s) : 𝒜.card * ℬ.card ≤ 2 ^ s.card * (𝒜 ∩ ℬ).card := by induction' s using Finset.induction with a s hs ih generalizing 𝒜 ℬ · simp_rw [subset_empty, ← subset_singleton_iff', subset_singleton_iff] at h𝒜s hℬs obtain rfl | rfl := h𝒜s · simp only [card_empty, zero_mul, empty_inter, mul_zero, le_refl] obtain rfl | rfl := hℬs · simp only [card_empty, inter_empty, mul_zero, zero_mul, le_refl] · simp only [card_empty, pow_zero, inter_singleton_of_mem, mem_singleton, card_singleton, le_refl] rw [card_insert_of_not_mem hs, ← card_memberSubfamily_add_card_nonMemberSubfamily a 𝒜, ← card_memberSubfamily_add_card_nonMemberSubfamily a ℬ, add_mul, mul_add, mul_add, add_comm (_ * _), add_add_add_comm] refine (add_le_add_right (mul_add_mul_le_mul_add_mul (card_le_card h𝒜.memberSubfamily_subset_nonMemberSubfamily) <| card_le_card hℬ.memberSubfamily_subset_nonMemberSubfamily) _).trans ?_ rw [← two_mul, pow_succ', mul_assoc] have h₀ : ∀ 𝒞 : Finset (Finset α), (∀ t ∈ 𝒞, t ⊆ insert a s) → ∀ t ∈ 𝒞.nonMemberSubfamily a, t ⊆ s := by rintro 𝒞 h𝒞 t ht rw [mem_nonMemberSubfamily] at ht exact (subset_insert_iff_of_not_mem ht.2).1 (h𝒞 _ ht.1) have h₁ : ∀ 𝒞 : Finset (Finset α), (∀ t ∈ 𝒞, t ⊆ insert a s) → ∀ t ∈ 𝒞.memberSubfamily a, t ⊆ s := by rintro 𝒞 h𝒞 t ht rw [mem_memberSubfamily] at ht exact (subset_insert_iff_of_not_mem ht.2).1 ((subset_insert _ _).trans <| h𝒞 _ ht.1) refine mul_le_mul_left' ?_ _ refine (add_le_add (ih h𝒜.memberSubfamily hℬ.memberSubfamily (h₁ _ h𝒜s) <| h₁ _ hℬs) <| ih h𝒜.nonMemberSubfamily hℬ.nonMemberSubfamily (h₀ _ h𝒜s) <| h₀ _ hℬs).trans_eq ?_ rw [← mul_add, ← memberSubfamily_inter, ← nonMemberSubfamily_inter, card_memberSubfamily_add_card_nonMemberSubfamily] variable [Fintype α] /-- **Harris-Kleitman inequality**: Any two lower sets of finsets correlate. -/ theorem IsLowerSet.le_card_inter_finset (h𝒜 : IsLowerSet (𝒜 : Set (Finset α))) (hℬ : IsLowerSet (ℬ : Set (Finset α))) : 𝒜.card * ℬ.card ≤ 2 ^ Fintype.card α * (𝒜 ∩ ℬ).card := h𝒜.le_card_inter_finset' hℬ (fun _ _ => subset_univ _) fun _ _ => subset_univ _ /-- **Harris-Kleitman inequality**: Upper sets and lower sets of finsets anticorrelate. -/ theorem IsUpperSet.card_inter_le_finset (h𝒜 : IsUpperSet (𝒜 : Set (Finset α))) (hℬ : IsLowerSet (ℬ : Set (Finset α))) : 2 ^ Fintype.card α * (𝒜 ∩ ℬ).card ≤ 𝒜.card * ℬ.card := by rw [← isLowerSet_compl, ← coe_compl] at h𝒜 have := h𝒜.le_card_inter_finset hℬ rwa [card_compl, Fintype.card_finset, tsub_mul, tsub_le_iff_tsub_le, ← mul_tsub, ← card_sdiff inter_subset_right, sdiff_inter_self_right, sdiff_compl, _root_.inf_comm] at this /-- **Harris-Kleitman inequality**: Lower sets and upper sets of finsets anticorrelate. -/ theorem IsLowerSet.card_inter_le_finset (h𝒜 : IsLowerSet (𝒜 : Set (Finset α))) (hℬ : IsUpperSet (ℬ : Set (Finset α))) : 2 ^ Fintype.card α * (𝒜 ∩ ℬ).card ≤ 𝒜.card * ℬ.card := by rw [inter_comm, mul_comm 𝒜.card] exact hℬ.card_inter_le_finset h𝒜 /-- **Harris-Kleitman inequality**: Any two upper sets of finsets correlate. -/ theorem IsUpperSet.le_card_inter_finset (h𝒜 : IsUpperSet (𝒜 : Set (Finset α))) (hℬ : IsUpperSet (ℬ : Set (Finset α))) : 𝒜.card * ℬ.card ≤ 2 ^ Fintype.card α * (𝒜 ∩ ℬ).card := by rw [← isLowerSet_compl, ← coe_compl] at h𝒜 have := h𝒜.card_inter_le_finset hℬ rwa [card_compl, Fintype.card_finset, tsub_mul, le_tsub_iff_le_tsub, ← mul_tsub, ← card_sdiff inter_subset_right, sdiff_inter_self_right, sdiff_compl, _root_.inf_comm] at this · exact mul_le_mul_left' (card_le_card inter_subset_right) _ · rw [← Fintype.card_finset] exact mul_le_mul_right' (card_le_univ _) _
Combinatorics\SetFamily\Intersecting.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Data.Fintype.Card import Mathlib.Order.UpperLower.Basic /-! # Intersecting families This file defines intersecting families and proves their basic properties. ## Main declarations * `Set.Intersecting`: Predicate for a set of elements in a generalized boolean algebra to be an intersecting family. * `Set.Intersecting.card_le`: An intersecting family can only take up to half the elements, because `a` and `aᶜ` cannot simultaneously be in it. * `Set.Intersecting.is_max_iff_card_eq`: Any maximal intersecting family takes up half the elements. ## References * [D. J. Kleitman, *Families of non-disjoint subsets*][kleitman1966] -/ open Finset variable {α : Type*} namespace Set section SemilatticeInf variable [SemilatticeInf α] [OrderBot α] {s t : Set α} {a b c : α} /-- A set family is intersecting if every pair of elements is non-disjoint. -/ def Intersecting (s : Set α) : Prop := ∀ ⦃a⦄, a ∈ s → ∀ ⦃b⦄, b ∈ s → ¬Disjoint a b @[mono] theorem Intersecting.mono (h : t ⊆ s) (hs : s.Intersecting) : t.Intersecting := fun _a ha _b hb => hs (h ha) (h hb) theorem Intersecting.not_bot_mem (hs : s.Intersecting) : ⊥ ∉ s := fun h => hs h h disjoint_bot_left theorem Intersecting.ne_bot (hs : s.Intersecting) (ha : a ∈ s) : a ≠ ⊥ := ne_of_mem_of_not_mem ha hs.not_bot_mem theorem intersecting_empty : (∅ : Set α).Intersecting := fun _ => False.elim @[simp] theorem intersecting_singleton : ({a} : Set α).Intersecting ↔ a ≠ ⊥ := by simp [Intersecting] protected theorem Intersecting.insert (hs : s.Intersecting) (ha : a ≠ ⊥) (h : ∀ b ∈ s, ¬Disjoint a b) : (insert a s).Intersecting := by rintro b (rfl | hb) c (rfl | hc) · rwa [disjoint_self] · exact h _ hc · exact fun H => h _ hb H.symm · exact hs hb hc theorem intersecting_insert : (insert a s).Intersecting ↔ s.Intersecting ∧ a ≠ ⊥ ∧ ∀ b ∈ s, ¬Disjoint a b := ⟨fun h => ⟨h.mono <| subset_insert _ _, h.ne_bot <| mem_insert _ _, fun _b hb => h (mem_insert _ _) <| mem_insert_of_mem _ hb⟩, fun h => h.1.insert h.2.1 h.2.2⟩ theorem intersecting_iff_pairwise_not_disjoint : s.Intersecting ↔ (s.Pairwise fun a b => ¬Disjoint a b) ∧ s ≠ {⊥} := by refine ⟨fun h => ⟨fun a ha b hb _ => h ha hb, ?_⟩, fun h a ha b hb hab => ?_⟩ · rintro rfl exact intersecting_singleton.1 h rfl have := h.1.eq ha hb (Classical.not_not.2 hab) rw [this, disjoint_self] at hab rw [hab] at hb exact h.2 (eq_singleton_iff_unique_mem.2 ⟨hb, fun c hc => not_ne_iff.1 fun H => h.1 hb hc H.symm disjoint_bot_left⟩) protected theorem Subsingleton.intersecting (hs : s.Subsingleton) : s.Intersecting ↔ s ≠ {⊥} := intersecting_iff_pairwise_not_disjoint.trans <| and_iff_right <| hs.pairwise _ theorem intersecting_iff_eq_empty_of_subsingleton [Subsingleton α] (s : Set α) : s.Intersecting ↔ s = ∅ := by refine subsingleton_of_subsingleton.intersecting.trans ⟨not_imp_comm.2 fun h => subsingleton_of_subsingleton.eq_singleton_of_mem ?_, ?_⟩ · obtain ⟨a, ha⟩ := nonempty_iff_ne_empty.2 h rwa [Subsingleton.elim ⊥ a] · rintro rfl exact (Set.singleton_nonempty _).ne_empty.symm /-- Maximal intersecting families are upper sets. -/ protected theorem Intersecting.isUpperSet (hs : s.Intersecting) (h : ∀ t : Set α, t.Intersecting → s ⊆ t → s = t) : IsUpperSet s := by classical rintro a b hab ha rw [h (Insert.insert b s) _ (subset_insert _ _)] · exact mem_insert _ _ exact hs.insert (mt (eq_bot_mono hab) <| hs.ne_bot ha) fun c hc hbc => hs ha hc <| hbc.mono_left hab /-- Maximal intersecting families are upper sets. Finset version. -/ theorem Intersecting.isUpperSet' {s : Finset α} (hs : (s : Set α).Intersecting) (h : ∀ t : Finset α, (t : Set α).Intersecting → s ⊆ t → s = t) : IsUpperSet (s : Set α) := by classical rintro a b hab ha rw [h (Insert.insert b s) _ (Finset.subset_insert _ _)] · exact mem_insert_self _ _ rw [coe_insert] exact hs.insert (mt (eq_bot_mono hab) <| hs.ne_bot ha) fun c hc hbc => hs ha hc <| hbc.mono_left hab end SemilatticeInf theorem Intersecting.exists_mem_set {𝒜 : Set (Set α)} (h𝒜 : 𝒜.Intersecting) {s t : Set α} (hs : s ∈ 𝒜) (ht : t ∈ 𝒜) : ∃ a, a ∈ s ∧ a ∈ t := not_disjoint_iff.1 <| h𝒜 hs ht theorem Intersecting.exists_mem_finset [DecidableEq α] {𝒜 : Set (Finset α)} (h𝒜 : 𝒜.Intersecting) {s t : Finset α} (hs : s ∈ 𝒜) (ht : t ∈ 𝒜) : ∃ a, a ∈ s ∧ a ∈ t := not_disjoint_iff.1 <| disjoint_coe.not.2 <| h𝒜 hs ht variable [BooleanAlgebra α] theorem Intersecting.not_compl_mem {s : Set α} (hs : s.Intersecting) {a : α} (ha : a ∈ s) : aᶜ ∉ s := fun h => hs ha h disjoint_compl_right theorem Intersecting.not_mem {s : Set α} (hs : s.Intersecting) {a : α} (ha : aᶜ ∈ s) : a ∉ s := fun h => hs ha h disjoint_compl_left theorem Intersecting.disjoint_map_compl {s : Finset α} (hs : (s : Set α).Intersecting) : Disjoint s (s.map ⟨compl, compl_injective⟩) := by rw [Finset.disjoint_left] rintro x hx hxc obtain ⟨x, hx', rfl⟩ := mem_map.mp hxc exact hs.not_compl_mem hx' hx theorem Intersecting.card_le [Fintype α] {s : Finset α} (hs : (s : Set α).Intersecting) : 2 * s.card ≤ Fintype.card α := by classical refine (s.disjUnion _ hs.disjoint_map_compl).card_le_univ.trans_eq' ?_ rw [Nat.two_mul, card_disjUnion, card_map] variable [Nontrivial α] [Fintype α] {s : Finset α} -- Note, this lemma is false when `α` has exactly one element and boring when `α` is empty. theorem Intersecting.is_max_iff_card_eq (hs : (s : Set α).Intersecting) : (∀ t : Finset α, (t : Set α).Intersecting → s ⊆ t → s = t) ↔ 2 * s.card = Fintype.card α := by classical refine ⟨fun h ↦ ?_, fun h t ht hst ↦ Finset.eq_of_subset_of_card_le hst <| Nat.le_of_mul_le_mul_left (ht.card_le.trans_eq h.symm) Nat.two_pos⟩ suffices s.disjUnion (s.map ⟨compl, compl_injective⟩) hs.disjoint_map_compl = Finset.univ by rw [Fintype.card, ← this, Nat.two_mul, card_disjUnion, card_map] rw [← coe_eq_univ, disjUnion_eq_union, coe_union, coe_map, Function.Embedding.coeFn_mk, image_eq_preimage_of_inverse compl_compl compl_compl] refine eq_univ_of_forall fun a => ?_ simp_rw [mem_union, mem_preimage] by_contra! ha refine s.ne_insert_of_not_mem _ ha.1 (h _ ?_ <| s.subset_insert _) rw [coe_insert] refine hs.insert ?_ fun b hb hab => ha.2 <| (hs.isUpperSet' h) hab.le_compl_left hb rintro rfl have := h {⊤} (by rw [coe_singleton]; exact intersecting_singleton.2 top_ne_bot) rw [compl_bot] at ha rw [coe_eq_empty.1 ((hs.isUpperSet' h).not_top_mem.1 ha.2)] at this exact Finset.singleton_ne_empty _ (this <| Finset.empty_subset _).symm theorem Intersecting.exists_card_eq (hs : (s : Set α).Intersecting) : ∃ t, s ⊆ t ∧ 2 * t.card = Fintype.card α ∧ (t : Set α).Intersecting := by have := hs.card_le rw [mul_comm, ← Nat.le_div_iff_mul_le' Nat.two_pos] at this revert hs refine s.strongDownwardInductionOn ?_ this rintro s ih _hcard hs by_cases h : ∀ t : Finset α, (t : Set α).Intersecting → s ⊆ t → s = t · exact ⟨s, Subset.rfl, hs.is_max_iff_card_eq.1 h, hs⟩ push_neg at h obtain ⟨t, ht, hst⟩ := h refine (ih ?_ (_root_.ssubset_iff_subset_ne.2 hst) ht).imp fun u => And.imp_left hst.1.trans rw [Nat.le_div_iff_mul_le' Nat.two_pos, mul_comm] exact ht.card_le end Set
Combinatorics\SetFamily\Kleitman.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Combinatorics.SetFamily.HarrisKleitman import Mathlib.Combinatorics.SetFamily.Intersecting /-! # Kleitman's bound on the size of intersecting families An intersecting family on `n` elements has size at most `2ⁿ⁻¹`, so we could naïvely think that two intersecting families could cover all `2ⁿ` sets. But actually that's not case because for example none of them can contain the empty set. Intersecting families are in some sense correlated. Kleitman's bound stipulates that `k` intersecting families cover at most `2ⁿ - 2ⁿ⁻ᵏ` sets. ## Main declarations * `Finset.card_biUnion_le_of_intersecting`: Kleitman's theorem. ## References * [D. J. Kleitman, *Families of non-disjoint subsets*][kleitman1966] -/ open Finset open Fintype (card) variable {ι α : Type*} [Fintype α] [DecidableEq α] [Nonempty α] /-- **Kleitman's theorem**. An intersecting family on `n` elements contains at most `2ⁿ⁻¹` sets, and each further intersecting family takes at most half of the sets that are in no previous family. -/ theorem Finset.card_biUnion_le_of_intersecting (s : Finset ι) (f : ι → Finset (Finset α)) (hf : ∀ i ∈ s, (f i : Set (Finset α)).Intersecting) : (s.biUnion f).card ≤ 2 ^ Fintype.card α - 2 ^ (Fintype.card α - s.card) := by have : DecidableEq ι := by classical infer_instance obtain hs | hs := le_total (Fintype.card α) s.card · rw [tsub_eq_zero_of_le hs, pow_zero] refine (card_le_card <| biUnion_subset.2 fun i hi a ha ↦ mem_compl.2 <| not_mem_singleton.2 <| (hf _ hi).ne_bot ha).trans_eq ?_ rw [card_compl, Fintype.card_finset, card_singleton] induction' s using Finset.cons_induction with i s hi ih generalizing f · simp set f' : ι → Finset (Finset α) := fun j ↦ if hj : j ∈ cons i s hi then (hf j hj).exists_card_eq.choose else ∅ have hf₁ : ∀ j, j ∈ cons i s hi → f j ⊆ f' j ∧ 2 * (f' j).card = 2 ^ Fintype.card α ∧ (f' j : Set (Finset α)).Intersecting := by rintro j hj simp_rw [f', dif_pos hj, ← Fintype.card_finset] exact Classical.choose_spec (hf j hj).exists_card_eq have hf₂ : ∀ j, j ∈ cons i s hi → IsUpperSet (f' j : Set (Finset α)) := by refine fun j hj ↦ (hf₁ _ hj).2.2.isUpperSet' ((hf₁ _ hj).2.2.is_max_iff_card_eq.2 ?_) rw [Fintype.card_finset] exact (hf₁ _ hj).2.1 refine (card_le_card <| biUnion_mono fun j hj ↦ (hf₁ _ hj).1).trans ?_ nth_rw 1 [cons_eq_insert i] rw [biUnion_insert] refine (card_mono <| @le_sup_sdiff _ _ _ <| f' i).trans ((card_union_le _ _).trans ?_) rw [union_sdiff_left, sdiff_eq_inter_compl] refine le_of_mul_le_mul_left ?_ (pow_pos (zero_lt_two' ℕ) <| Fintype.card α + 1) rw [pow_succ, mul_add, mul_assoc, mul_comm _ 2, mul_assoc] refine (add_le_add ((mul_le_mul_left <| pow_pos (zero_lt_two' ℕ) _).2 (hf₁ _ <| mem_cons_self _ _).2.2.card_le) <| (mul_le_mul_left <| zero_lt_two' ℕ).2 <| IsUpperSet.card_inter_le_finset ?_ ?_).trans ?_ · rw [coe_biUnion] exact isUpperSet_iUnion₂ fun i hi ↦ hf₂ _ <| subset_cons _ hi · rw [coe_compl] exact (hf₂ _ <| mem_cons_self _ _).compl rw [mul_tsub, card_compl, Fintype.card_finset, mul_left_comm, mul_tsub, (hf₁ _ <| mem_cons_self _ _).2.1, two_mul, add_tsub_cancel_left, ← mul_tsub, ← mul_two, mul_assoc, ← add_mul, mul_comm] refine mul_le_mul_left' ?_ _ refine (add_le_add_left (ih _ (fun i hi ↦ (hf₁ _ <| subset_cons _ hi).2.2) ((card_le_card <| subset_cons _).trans hs)) _).trans ?_ rw [mul_tsub, two_mul, ← pow_succ', ← add_tsub_assoc_of_le (pow_le_pow_right' (one_le_two : (1 : ℕ) ≤ 2) tsub_le_self), tsub_add_eq_add_tsub hs, card_cons, add_tsub_add_eq_tsub_right]
Combinatorics\SetFamily\LYM.lean
/- Copyright (c) 2022 Bhavik Mehta, Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Alena Gusakov, Yaël Dillies -/ import Mathlib.Algebra.BigOperators.Ring import Mathlib.Algebra.Field.Rat import Mathlib.Algebra.Order.Field.Basic import Mathlib.Algebra.Order.Field.Rat import Mathlib.Combinatorics.Enumerative.DoubleCounting import Mathlib.Combinatorics.SetFamily.Shadow /-! # Lubell-Yamamoto-Meshalkin inequality and Sperner's theorem This file proves the local LYM and LYM inequalities as well as Sperner's theorem. ## Main declarations * `Finset.card_div_choose_le_card_shadow_div_choose`: Local Lubell-Yamamoto-Meshalkin inequality. The shadow of a set `𝒜` in a layer takes a greater proportion of its layer than `𝒜` does. * `Finset.sum_card_slice_div_choose_le_one`: Lubell-Yamamoto-Meshalkin inequality. The sum of densities of `𝒜` in each layer is at most `1` for any antichain `𝒜`. * `IsAntichain.sperner`: Sperner's theorem. The size of any antichain in `Finset α` is at most the size of the maximal layer of `Finset α`. It is a corollary of `sum_card_slice_div_choose_le_one`. ## TODO Prove upward local LYM. Provide equality cases. Local LYM gives that the equality case of LYM and Sperner is precisely when `𝒜` is a middle layer. `falling` could be useful more generally in grade orders. ## References * http://b-mehta.github.io/maths-notes/iii/mich/combinatorics.pdf * http://discretemath.imp.fu-berlin.de/DMII-2015-16/kruskal.pdf ## Tags shadow, lym, slice, sperner, antichain -/ open Finset Nat open FinsetFamily variable {𝕜 α : Type*} [LinearOrderedField 𝕜] namespace Finset /-! ### Local LYM inequality -/ section LocalLYM variable [DecidableEq α] [Fintype α] {𝒜 : Finset (Finset α)} {r : ℕ} /-- The downward **local LYM inequality**, with cancelled denominators. `𝒜` takes up less of `α^(r)` (the finsets of card `r`) than `∂𝒜` takes up of `α^(r - 1)`. -/ theorem card_mul_le_card_shadow_mul (h𝒜 : (𝒜 : Set (Finset α)).Sized r) : 𝒜.card * r ≤ (∂ 𝒜).card * (Fintype.card α - r + 1) := by let i : DecidableRel ((· ⊆ ·) : Finset α → Finset α → Prop) := fun _ _ => Classical.dec _ refine card_mul_le_card_mul' (· ⊆ ·) (fun s hs => ?_) (fun s hs => ?_) · rw [← h𝒜 hs, ← card_image_of_injOn s.erase_injOn] refine card_le_card ?_ simp_rw [image_subset_iff, mem_bipartiteBelow] exact fun a ha => ⟨erase_mem_shadow hs ha, erase_subset _ _⟩ refine le_trans ?_ tsub_tsub_le_tsub_add rw [← (Set.Sized.shadow h𝒜) hs, ← card_compl, ← card_image_of_injOn (insert_inj_on' _)] refine card_le_card fun t ht => ?_ -- Porting note: commented out the following line -- infer_instance rw [mem_bipartiteAbove] at ht have : ∅ ∉ 𝒜 := by rw [← mem_coe, h𝒜.empty_mem_iff, coe_eq_singleton] rintro rfl rw [shadow_singleton_empty] at hs exact not_mem_empty s hs have h := exists_eq_insert_iff.2 ⟨ht.2, by rw [(sized_shadow_iff this).1 (Set.Sized.shadow h𝒜) ht.1, (Set.Sized.shadow h𝒜) hs]⟩ rcases h with ⟨a, ha, rfl⟩ exact mem_image_of_mem _ (mem_compl.2 ha) /-- The downward **local LYM inequality**. `𝒜` takes up less of `α^(r)` (the finsets of card `r`) than `∂𝒜` takes up of `α^(r - 1)`. -/ theorem card_div_choose_le_card_shadow_div_choose (hr : r ≠ 0) (h𝒜 : (𝒜 : Set (Finset α)).Sized r) : (𝒜.card : 𝕜) / (Fintype.card α).choose r ≤ (∂ 𝒜).card / (Fintype.card α).choose (r - 1) := by obtain hr' | hr' := lt_or_le (Fintype.card α) r · rw [choose_eq_zero_of_lt hr', cast_zero, div_zero] exact div_nonneg (cast_nonneg _) (cast_nonneg _) replace h𝒜 := card_mul_le_card_shadow_mul h𝒜 rw [div_le_div_iff] <;> norm_cast · cases' r with r · exact (hr rfl).elim rw [tsub_add_eq_add_tsub hr', add_tsub_add_eq_tsub_right] at h𝒜 apply le_of_mul_le_mul_right _ (pos_iff_ne_zero.2 hr) convert Nat.mul_le_mul_right ((Fintype.card α).choose r) h𝒜 using 1 · simpa [mul_assoc, Nat.choose_succ_right_eq] using Or.inl (mul_comm _ _) · simp only [mul_assoc, choose_succ_right_eq, mul_eq_mul_left_iff] exact Or.inl (mul_comm _ _) · exact Nat.choose_pos hr' · exact Nat.choose_pos (r.pred_le.trans hr') end LocalLYM /-! ### LYM inequality -/ section LYM section Falling variable [DecidableEq α] (k : ℕ) (𝒜 : Finset (Finset α)) /-- `falling k 𝒜` is all the finsets of cardinality `k` which are a subset of something in `𝒜`. -/ def falling : Finset (Finset α) := 𝒜.sup <| powersetCard k variable {𝒜 k} {s : Finset α} theorem mem_falling : s ∈ falling k 𝒜 ↔ (∃ t ∈ 𝒜, s ⊆ t) ∧ s.card = k := by simp_rw [falling, mem_sup, mem_powersetCard] aesop variable (𝒜 k) theorem sized_falling : (falling k 𝒜 : Set (Finset α)).Sized k := fun _ hs => (mem_falling.1 hs).2 theorem slice_subset_falling : 𝒜 # k ⊆ falling k 𝒜 := fun s hs => mem_falling.2 <| (mem_slice.1 hs).imp_left fun h => ⟨s, h, Subset.refl _⟩ theorem falling_zero_subset : falling 0 𝒜 ⊆ {∅} := subset_singleton_iff'.2 fun _ ht => card_eq_zero.1 <| sized_falling _ _ ht theorem slice_union_shadow_falling_succ : 𝒜 # k ∪ ∂ (falling (k + 1) 𝒜) = falling k 𝒜 := by ext s simp_rw [mem_union, mem_slice, mem_shadow_iff, mem_falling] constructor · rintro (h | ⟨s, ⟨⟨t, ht, hst⟩, hs⟩, a, ha, rfl⟩) · exact ⟨⟨s, h.1, Subset.refl _⟩, h.2⟩ refine ⟨⟨t, ht, (erase_subset _ _).trans hst⟩, ?_⟩ rw [card_erase_of_mem ha, hs] rfl · rintro ⟨⟨t, ht, hst⟩, hs⟩ by_cases h : s ∈ 𝒜 · exact Or.inl ⟨h, hs⟩ obtain ⟨a, ha, hst⟩ := ssubset_iff.1 (ssubset_of_subset_of_ne hst (ht.ne_of_not_mem h).symm) refine Or.inr ⟨insert a s, ⟨⟨t, ht, hst⟩, ?_⟩, a, mem_insert_self _ _, erase_insert ha⟩ rw [card_insert_of_not_mem ha, hs] variable {𝒜 k} /-- The shadow of `falling m 𝒜` is disjoint from the `n`-sized elements of `𝒜`, thanks to the antichain property. -/ theorem IsAntichain.disjoint_slice_shadow_falling {m n : ℕ} (h𝒜 : IsAntichain (· ⊆ ·) (𝒜 : Set (Finset α))) : Disjoint (𝒜 # m) (∂ (falling n 𝒜)) := disjoint_right.2 fun s h₁ h₂ => by simp_rw [mem_shadow_iff, mem_falling] at h₁ obtain ⟨s, ⟨⟨t, ht, hst⟩, _⟩, a, ha, rfl⟩ := h₁ refine h𝒜 (slice_subset h₂) ht ?_ ((erase_subset _ _).trans hst) rintro rfl exact not_mem_erase _ _ (hst ha) /-- A bound on any top part of the sum in LYM in terms of the size of `falling k 𝒜`. -/ theorem le_card_falling_div_choose [Fintype α] (hk : k ≤ Fintype.card α) (h𝒜 : IsAntichain (· ⊆ ·) (𝒜 : Set (Finset α))) : (∑ r ∈ range (k + 1), ((𝒜 # (Fintype.card α - r)).card : 𝕜) / (Fintype.card α).choose (Fintype.card α - r)) ≤ (falling (Fintype.card α - k) 𝒜).card / (Fintype.card α).choose (Fintype.card α - k) := by induction' k with k ih · simp only [tsub_zero, cast_one, cast_le, sum_singleton, div_one, choose_self, range_one, zero_eq, zero_add, range_one, sum_singleton, nonpos_iff_eq_zero, tsub_zero, choose_self, cast_one, div_one, cast_le] exact card_le_card (slice_subset_falling _ _) rw [sum_range_succ, ← slice_union_shadow_falling_succ, card_union_of_disjoint (IsAntichain.disjoint_slice_shadow_falling h𝒜), cast_add, _root_.add_div, add_comm] rw [← tsub_tsub, tsub_add_cancel_of_le (le_tsub_of_add_le_left hk)] exact add_le_add_left ((ih <| le_of_succ_le hk).trans <| card_div_choose_le_card_shadow_div_choose (tsub_pos_iff_lt.2 <| Nat.succ_le_iff.1 hk).ne' <| sized_falling _ _) _ end Falling variable {𝒜 : Finset (Finset α)} {s : Finset α} {k : ℕ} /-- The **Lubell-Yamamoto-Meshalkin inequality**. If `𝒜` is an antichain, then the sum of the proportion of elements it takes from each layer is less than `1`. -/ theorem sum_card_slice_div_choose_le_one [Fintype α] (h𝒜 : IsAntichain (· ⊆ ·) (𝒜 : Set (Finset α))) : (∑ r ∈ range (Fintype.card α + 1), ((𝒜 # r).card : 𝕜) / (Fintype.card α).choose r) ≤ 1 := by classical rw [← sum_flip] refine (le_card_falling_div_choose le_rfl h𝒜).trans ?_ rw [div_le_iff] <;> norm_cast · simpa only [Nat.sub_self, one_mul, Nat.choose_zero_right, falling] using Set.Sized.card_le (sized_falling 0 𝒜) · rw [tsub_self, choose_zero_right] exact zero_lt_one end LYM /-! ### Sperner's theorem -/ /-- **Sperner's theorem**. The size of an antichain in `Finset α` is bounded by the size of the maximal layer in `Finset α`. This precisely means that `Finset α` is a Sperner order. -/ theorem IsAntichain.sperner [Fintype α] {𝒜 : Finset (Finset α)} (h𝒜 : IsAntichain (· ⊆ ·) (𝒜 : Set (Finset α))) : 𝒜.card ≤ (Fintype.card α).choose (Fintype.card α / 2) := by classical suffices (∑ r ∈ Iic (Fintype.card α), ((𝒜 # r).card : ℚ) / (Fintype.card α).choose (Fintype.card α / 2)) ≤ 1 by rw [← sum_div, ← Nat.cast_sum, div_le_one] at this · simp only [cast_le] at this rwa [sum_card_slice] at this simp only [cast_pos] exact choose_pos (Nat.div_le_self _ _) rw [Iic_eq_Icc, ← Ico_succ_right, bot_eq_zero, Ico_zero_eq_range] refine (sum_le_sum fun r hr => ?_).trans (sum_card_slice_div_choose_le_one h𝒜) rw [mem_range] at hr refine div_le_div_of_nonneg_left ?_ ?_ ?_ <;> norm_cast · exact Nat.zero_le _ · exact choose_pos (Nat.lt_succ_iff.1 hr) · exact choose_le_middle _ _ end Finset
Combinatorics\SetFamily\Shadow.lean
/- Copyright (c) 2021 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Alena Gusakov, Yaël Dillies -/ import Mathlib.Data.Finset.Grade import Mathlib.Data.Finset.Sups import Mathlib.Logic.Function.Iterate /-! # Shadows This file defines shadows of a set family. The shadow of a set family is the set family of sets we get by removing any element from any set of the original family. If one pictures `Finset α` as a big hypercube (each dimension being membership of a given element), then taking the shadow corresponds to projecting each finset down once in all available directions. ## Main definitions * `Finset.shadow`: The shadow of a set family. Everything we can get by removing a new element from some set. * `Finset.upShadow`: The upper shadow of a set family. Everything we can get by adding an element to some set. ## Notation We define notation in locale `FinsetFamily`: * `∂ 𝒜`: Shadow of `𝒜`. * `∂⁺ 𝒜`: Upper shadow of `𝒜`. We also maintain the convention that `a, b : α` are elements of the ground type, `s, t : Finset α` are finsets, and `𝒜, ℬ : Finset (Finset α)` are finset families. ## References * https://github.com/b-mehta/maths-notes/blob/master/iii/mich/combinatorics.pdf * http://discretemath.imp.fu-berlin.de/DMII-2015-16/kruskal.pdf ## Tags shadow, set family -/ open Finset Nat variable {α : Type*} namespace Finset section Shadow variable [DecidableEq α] {𝒜 : Finset (Finset α)} {s t : Finset α} {a : α} {k r : ℕ} /-- The shadow of a set family `𝒜` is all sets we can get by removing one element from any set in `𝒜`, and the (`k` times) iterated shadow (`shadow^[k]`) is all sets we can get by removing `k` elements from any set in `𝒜`. -/ def shadow (𝒜 : Finset (Finset α)) : Finset (Finset α) := 𝒜.sup fun s => s.image (erase s) -- Porting note: added `inherit_doc` to calm linter @[inherit_doc] scoped[FinsetFamily] notation:max "∂ " => Finset.shadow -- Porting note: had to open FinsetFamily open FinsetFamily /-- The shadow of the empty set is empty. -/ @[simp] theorem shadow_empty : ∂ (∅ : Finset (Finset α)) = ∅ := rfl @[simp] lemma shadow_iterate_empty (k : ℕ) : ∂^[k] (∅ : Finset (Finset α)) = ∅ := by induction' k <;> simp [*, shadow_empty] @[simp] theorem shadow_singleton_empty : ∂ ({∅} : Finset (Finset α)) = ∅ := rfl --TODO: Prove `∂ {{a}} = {∅}` quickly using `covers` and `GradeOrder` /-- The shadow is monotone. -/ @[mono] theorem shadow_monotone : Monotone (shadow : Finset (Finset α) → Finset (Finset α)) := fun _ _ => sup_mono /-- `t` is in the shadow of `𝒜` iff there is a `s ∈ 𝒜` from which we can remove one element to get `t`. -/ lemma mem_shadow_iff : t ∈ ∂ 𝒜 ↔ ∃ s ∈ 𝒜, ∃ a ∈ s, erase s a = t := by simp only [shadow, mem_sup, mem_image] theorem erase_mem_shadow (hs : s ∈ 𝒜) (ha : a ∈ s) : erase s a ∈ ∂ 𝒜 := mem_shadow_iff.2 ⟨s, hs, a, ha, rfl⟩ /-- `t ∈ ∂𝒜` iff `t` is exactly one element less than something from `𝒜`. See also `Finset.mem_shadow_iff_exists_mem_card_add_one`. -/ lemma mem_shadow_iff_exists_sdiff : t ∈ ∂ 𝒜 ↔ ∃ s ∈ 𝒜, t ⊆ s ∧ (s \ t).card = 1 := by simp_rw [mem_shadow_iff, ← covBy_iff_card_sdiff_eq_one, covBy_iff_exists_erase] /-- `t` is in the shadow of `𝒜` iff we can add an element to it so that the resulting finset is in `𝒜`. -/ lemma mem_shadow_iff_insert_mem : t ∈ ∂ 𝒜 ↔ ∃ a ∉ t, insert a t ∈ 𝒜 := by simp_rw [mem_shadow_iff_exists_sdiff, ← covBy_iff_card_sdiff_eq_one, covBy_iff_exists_insert] aesop /-- `s ∈ ∂ 𝒜` iff `s` is exactly one element less than something from `𝒜`. See also `Finset.mem_shadow_iff_exists_sdiff`. -/ lemma mem_shadow_iff_exists_mem_card_add_one : t ∈ ∂ 𝒜 ↔ ∃ s ∈ 𝒜, t ⊆ s ∧ s.card = t.card + 1 := by refine mem_shadow_iff_exists_sdiff.trans <| exists_congr fun t ↦ and_congr_right fun _ ↦ and_congr_right fun hst ↦ ?_ rw [card_sdiff hst, tsub_eq_iff_eq_add_of_le, add_comm] exact card_mono hst lemma mem_shadow_iterate_iff_exists_card : t ∈ ∂^[k] 𝒜 ↔ ∃ u : Finset α, u.card = k ∧ Disjoint t u ∧ t ∪ u ∈ 𝒜 := by induction' k with k ih generalizing t · simp set_option tactic.skipAssignedInstances false in simp only [mem_shadow_iff_insert_mem, ih, Function.iterate_succ_apply', card_eq_succ] aesop /-- `t ∈ ∂^k 𝒜` iff `t` is exactly `k` elements less than something from `𝒜`. See also `Finset.mem_shadow_iff_exists_mem_card_add`. -/ lemma mem_shadow_iterate_iff_exists_sdiff : t ∈ ∂^[k] 𝒜 ↔ ∃ s ∈ 𝒜, t ⊆ s ∧ (s \ t).card = k := by rw [mem_shadow_iterate_iff_exists_card] constructor · rintro ⟨u, rfl, htu, hsuA⟩ exact ⟨_, hsuA, subset_union_left, by rw [union_sdiff_cancel_left htu]⟩ · rintro ⟨s, hs, hts, rfl⟩ refine ⟨s \ t, rfl, disjoint_sdiff, ?_⟩ rwa [union_sdiff_self_eq_union, union_eq_right.2 hts] /-- `t ∈ ∂^k 𝒜` iff `t` is exactly `k` elements less than something in `𝒜`. See also `Finset.mem_shadow_iterate_iff_exists_sdiff`. -/ lemma mem_shadow_iterate_iff_exists_mem_card_add : t ∈ ∂^[k] 𝒜 ↔ ∃ s ∈ 𝒜, t ⊆ s ∧ s.card = t.card + k := by refine mem_shadow_iterate_iff_exists_sdiff.trans <| exists_congr fun t ↦ and_congr_right fun _ ↦ and_congr_right fun hst ↦ ?_ rw [card_sdiff hst, tsub_eq_iff_eq_add_of_le, add_comm] exact card_mono hst /-- The shadow of a family of `r`-sets is a family of `r - 1`-sets. -/ protected theorem _root_.Set.Sized.shadow (h𝒜 : (𝒜 : Set (Finset α)).Sized r) : (∂ 𝒜 : Set (Finset α)).Sized (r - 1) := by intro A h obtain ⟨A, hA, i, hi, rfl⟩ := mem_shadow_iff.1 h rw [card_erase_of_mem hi, h𝒜 hA] /-- The `k`-th shadow of a family of `r`-sets is a family of `r - k`-sets. -/ lemma _root_.Set.Sized.shadow_iterate (h𝒜 : (𝒜 : Set (Finset α)).Sized r) : (∂^[k] 𝒜 : Set (Finset α)).Sized (r - k) := by simp_rw [Set.Sized, mem_coe, mem_shadow_iterate_iff_exists_sdiff] rintro t ⟨s, hs, hts, rfl⟩ rw [card_sdiff hts, ← h𝒜 hs, Nat.sub_sub_self (card_le_card hts)] theorem sized_shadow_iff (h : ∅ ∉ 𝒜) : (∂ 𝒜 : Set (Finset α)).Sized r ↔ (𝒜 : Set (Finset α)).Sized (r + 1) := by refine ⟨fun h𝒜 s hs => ?_, Set.Sized.shadow⟩ obtain ⟨a, ha⟩ := nonempty_iff_ne_empty.2 (ne_of_mem_of_not_mem hs h) rw [← h𝒜 (erase_mem_shadow hs ha), card_erase_add_one ha] /-- Being in the shadow of `𝒜` means we have a superset in `𝒜`. -/ lemma exists_subset_of_mem_shadow (hs : t ∈ ∂ 𝒜) : ∃ s ∈ 𝒜, t ⊆ s := let ⟨t, ht, hst⟩ := mem_shadow_iff_exists_mem_card_add_one.1 hs ⟨t, ht, hst.1⟩ end Shadow open FinsetFamily section UpShadow variable [DecidableEq α] [Fintype α] {𝒜 : Finset (Finset α)} {s t : Finset α} {a : α} {k r : ℕ} /-- The upper shadow of a set family `𝒜` is all sets we can get by adding one element to any set in `𝒜`, and the (`k` times) iterated upper shadow (`upShadow^[k]`) is all sets we can get by adding `k` elements from any set in `𝒜`. -/ def upShadow (𝒜 : Finset (Finset α)) : Finset (Finset α) := 𝒜.sup fun s => sᶜ.image fun a => insert a s -- Porting note: added `inherit_doc` to calm linter @[inherit_doc] scoped[FinsetFamily] notation:max "∂⁺ " => Finset.upShadow /-- The upper shadow of the empty set is empty. -/ @[simp] theorem upShadow_empty : ∂⁺ (∅ : Finset (Finset α)) = ∅ := rfl /-- The upper shadow is monotone. -/ @[mono] theorem upShadow_monotone : Monotone (upShadow : Finset (Finset α) → Finset (Finset α)) := fun _ _ => sup_mono /-- `t` is in the upper shadow of `𝒜` iff there is a `s ∈ 𝒜` from which we can remove one element to get `t`. -/ lemma mem_upShadow_iff : t ∈ ∂⁺ 𝒜 ↔ ∃ s ∈ 𝒜, ∃ a ∉ s, insert a s = t := by simp_rw [upShadow, mem_sup, mem_image, mem_compl] theorem insert_mem_upShadow (hs : s ∈ 𝒜) (ha : a ∉ s) : insert a s ∈ ∂⁺ 𝒜 := mem_upShadow_iff.2 ⟨s, hs, a, ha, rfl⟩ /-- `t` is in the upper shadow of `𝒜` iff `t` is exactly one element more than something from `𝒜`. See also `Finset.mem_upShadow_iff_exists_mem_card_add_one`. -/ lemma mem_upShadow_iff_exists_sdiff : t ∈ ∂⁺ 𝒜 ↔ ∃ s ∈ 𝒜, s ⊆ t ∧ (t \ s).card = 1 := by simp_rw [mem_upShadow_iff, ← covBy_iff_card_sdiff_eq_one, covBy_iff_exists_insert] /-- `t` is in the upper shadow of `𝒜` iff we can remove an element from it so that the resulting finset is in `𝒜`. -/ lemma mem_upShadow_iff_erase_mem : t ∈ ∂⁺ 𝒜 ↔ ∃ a, a ∈ t ∧ erase t a ∈ 𝒜 := by simp_rw [mem_upShadow_iff_exists_sdiff, ← covBy_iff_card_sdiff_eq_one, covBy_iff_exists_erase] aesop /-- `t` is in the upper shadow of `𝒜` iff `t` is exactly one element less than something from `𝒜`. See also `Finset.mem_upShadow_iff_exists_sdiff`. -/ lemma mem_upShadow_iff_exists_mem_card_add_one : t ∈ ∂⁺ 𝒜 ↔ ∃ s ∈ 𝒜, s ⊆ t ∧ t.card = s.card + 1 := by refine mem_upShadow_iff_exists_sdiff.trans <| exists_congr fun t ↦ and_congr_right fun _ ↦ and_congr_right fun hst ↦ ?_ rw [card_sdiff hst, tsub_eq_iff_eq_add_of_le, add_comm] exact card_mono hst lemma mem_upShadow_iterate_iff_exists_card : t ∈ ∂⁺^[k] 𝒜 ↔ ∃ u : Finset α, u.card = k ∧ u ⊆ t ∧ t \ u ∈ 𝒜 := by induction' k with k ih generalizing t · simp simp only [mem_upShadow_iff_erase_mem, ih, Function.iterate_succ_apply', card_eq_succ, subset_erase, erase_sdiff_comm, ← sdiff_insert] constructor · rintro ⟨a, hat, u, rfl, ⟨hut, hau⟩, htu⟩ exact ⟨_, ⟨_, _, hau, rfl, rfl⟩, insert_subset hat hut, htu⟩ · rintro ⟨_, ⟨a, u, hau, rfl, rfl⟩, hut, htu⟩ rw [insert_subset_iff] at hut exact ⟨a, hut.1, _, rfl, ⟨hut.2, hau⟩, htu⟩ /-- `t` is in the upper shadow of `𝒜` iff `t` is exactly `k` elements less than something from `𝒜`. See also `Finset.mem_upShadow_iff_exists_mem_card_add`. -/ lemma mem_upShadow_iterate_iff_exists_sdiff : t ∈ ∂⁺^[k] 𝒜 ↔ ∃ s ∈ 𝒜, s ⊆ t ∧ (t \ s).card = k := by rw [mem_upShadow_iterate_iff_exists_card] constructor · rintro ⟨u, rfl, hut, htu⟩ exact ⟨_, htu, sdiff_subset, by rw [sdiff_sdiff_eq_self hut]⟩ · rintro ⟨s, hs, hst, rfl⟩ exact ⟨_, rfl, sdiff_subset, by rwa [sdiff_sdiff_eq_self hst]⟩ /-- `t ∈ ∂⁺^k 𝒜` iff `t` is exactly `k` elements less than something in `𝒜`. See also `Finset.mem_upShadow_iterate_iff_exists_sdiff`. -/ lemma mem_upShadow_iterate_iff_exists_mem_card_add : t ∈ ∂⁺^[k] 𝒜 ↔ ∃ s ∈ 𝒜, s ⊆ t ∧ t.card = s.card + k := by refine mem_upShadow_iterate_iff_exists_sdiff.trans <| exists_congr fun t ↦ and_congr_right fun _ ↦ and_congr_right fun hst ↦ ?_ rw [card_sdiff hst, tsub_eq_iff_eq_add_of_le, add_comm] exact card_mono hst /-- The upper shadow of a family of `r`-sets is a family of `r + 1`-sets. -/ protected lemma _root_.Set.Sized.upShadow (h𝒜 : (𝒜 : Set (Finset α)).Sized r) : (∂⁺ 𝒜 : Set (Finset α)).Sized (r + 1) := by intro A h obtain ⟨A, hA, i, hi, rfl⟩ := mem_upShadow_iff.1 h rw [card_insert_of_not_mem hi, h𝒜 hA] /-- Being in the upper shadow of `𝒜` means we have a superset in `𝒜`. -/ theorem exists_subset_of_mem_upShadow (hs : s ∈ ∂⁺ 𝒜) : ∃ t ∈ 𝒜, t ⊆ s := let ⟨t, ht, hts, _⟩ := mem_upShadow_iff_exists_mem_card_add_one.1 hs ⟨t, ht, hts⟩ /-- `t ∈ ∂^k 𝒜` iff `t` is exactly `k` elements more than something in `𝒜`. -/ theorem mem_upShadow_iff_exists_mem_card_add : s ∈ ∂⁺ ^[k] 𝒜 ↔ ∃ t ∈ 𝒜, t ⊆ s ∧ t.card + k = s.card := by induction' k with k ih generalizing 𝒜 s · refine ⟨fun hs => ⟨s, hs, Subset.refl _, rfl⟩, ?_⟩ rintro ⟨t, ht, hst, hcard⟩ rwa [← eq_of_subset_of_card_le hst hcard.ge] simp only [exists_prop, Function.comp_apply, Function.iterate_succ] refine ih.trans ?_ clear ih constructor · rintro ⟨t, ht, hts, hcardst⟩ obtain ⟨u, hu, hut, hcardtu⟩ := mem_upShadow_iff_exists_mem_card_add_one.1 ht refine ⟨u, hu, hut.trans hts, ?_⟩ rw [← hcardst, hcardtu, add_right_comm] rfl · rintro ⟨t, ht, hts, hcard⟩ obtain ⟨u, htu, hus, hu⟩ := Finset.exists_subsuperset_card_eq hts (Nat.le_add_right _ 1) (by omega) refine ⟨u, mem_upShadow_iff_exists_mem_card_add_one.2 ⟨t, ht, htu, hu⟩, hus, ?_⟩ rw [hu, ← hcard, add_right_comm] rfl @[simp] lemma shadow_compls : ∂ 𝒜ᶜˢ = (∂⁺ 𝒜)ᶜˢ := by ext s simp only [mem_image, exists_prop, mem_shadow_iff, mem_upShadow_iff, mem_compls] refine (compl_involutive.toPerm _).exists_congr_left.trans ?_ simp [← compl_involutive.eq_iff] @[simp] lemma upShadow_compls : ∂⁺ 𝒜ᶜˢ = (∂ 𝒜)ᶜˢ := by ext s simp only [mem_image, exists_prop, mem_shadow_iff, mem_upShadow_iff, mem_compls] refine (compl_involutive.toPerm _).exists_congr_left.trans ?_ simp [← compl_involutive.eq_iff] end UpShadow end Finset
Combinatorics\SetFamily\Shatter.lean
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import Mathlib.Algebra.BigOperators.Group.Finset import Mathlib.Combinatorics.SetFamily.Compression.Down import Mathlib.Order.Interval.Finset.Nat import Mathlib.Order.UpperLower.Basic /-! # Shattering families This file defines the shattering property and VC-dimension of set families. ## Main declarations * `Finset.Shatters`: The shattering property. * `Finset.shatterer`: The set family of sets shattered by a set family. * `Finset.vcDim`: The Vapnik-Chervonenkis dimension. ## TODO * Order-shattering * Strong shattering -/ open scoped FinsetFamily namespace Finset variable {α : Type*} [DecidableEq α] {𝒜 ℬ : Finset (Finset α)} {s t : Finset α} {a : α} {n : ℕ} /-- A set family `𝒜` shatters a set `s` if all subsets of `s` can be obtained as the intersection of `s` and some element of the set family, and we denote this `𝒜.Shatters s`. We also say that `s` is *traced* by `𝒜`. -/ def Shatters (𝒜 : Finset (Finset α)) (s : Finset α) : Prop := ∀ ⦃t⦄, t ⊆ s → ∃ u ∈ 𝒜, s ∩ u = t instance : DecidablePred 𝒜.Shatters := fun _s ↦ decidableForallOfDecidableSubsets lemma Shatters.exists_inter_eq_singleton (hs : Shatters 𝒜 s) (ha : a ∈ s) : ∃ t ∈ 𝒜, s ∩ t = {a} := hs <| singleton_subset_iff.2 ha lemma Shatters.mono_left (h : 𝒜 ⊆ ℬ) (h𝒜 : 𝒜.Shatters s) : ℬ.Shatters s := fun _t ht ↦ let ⟨u, hu, hut⟩ := h𝒜 ht; ⟨u, h hu, hut⟩ lemma Shatters.mono_right (h : t ⊆ s) (hs : 𝒜.Shatters s) : 𝒜.Shatters t := fun u hu ↦ by obtain ⟨v, hv, rfl⟩ := hs (hu.trans h); exact ⟨v, hv, inf_congr_right hu <| inf_le_of_left_le h⟩ lemma Shatters.exists_superset (h : 𝒜.Shatters s) : ∃ t ∈ 𝒜, s ⊆ t := let ⟨t, ht, hst⟩ := h Subset.rfl; ⟨t, ht, inter_eq_left.1 hst⟩ lemma shatters_of_forall_subset (h : ∀ t, t ⊆ s → t ∈ 𝒜) : 𝒜.Shatters s := fun t ht ↦ ⟨t, h _ ht, inter_eq_right.2 ht⟩ protected lemma Shatters.nonempty (h : 𝒜.Shatters s) : 𝒜.Nonempty := let ⟨t, ht, _⟩ := h Subset.rfl; ⟨t, ht⟩ @[simp] lemma shatters_empty : 𝒜.Shatters ∅ ↔ 𝒜.Nonempty := ⟨Shatters.nonempty, fun ⟨s, hs⟩ t ht ↦ ⟨s, hs, by rwa [empty_inter, eq_comm, ← subset_empty]⟩⟩ protected lemma Shatters.subset_iff (h : 𝒜.Shatters s) : t ⊆ s ↔ ∃ u ∈ 𝒜, s ∩ u = t := ⟨fun ht ↦ h ht, by rintro ⟨u, _, rfl⟩; exact inter_subset_left⟩ lemma shatters_iff : 𝒜.Shatters s ↔ 𝒜.image (fun t ↦ s ∩ t) = s.powerset := ⟨fun h ↦ by ext t; rw [mem_image, mem_powerset, h.subset_iff], fun h t ht ↦ by rwa [← mem_powerset, ← h, mem_image] at ht⟩ lemma univ_shatters [Fintype α] : univ.Shatters s := shatters_of_forall_subset fun _ _ ↦ mem_univ _ @[simp] lemma shatters_univ [Fintype α] : 𝒜.Shatters univ ↔ 𝒜 = univ := by rw [shatters_iff, powerset_univ]; simp_rw [univ_inter, image_id'] /-- The set family of sets that are shattered by `𝒜`. -/ def shatterer (𝒜 : Finset (Finset α)) : Finset (Finset α) := (𝒜.biUnion powerset).filter 𝒜.Shatters @[simp] lemma mem_shatterer : s ∈ 𝒜.shatterer ↔ 𝒜.Shatters s := by refine mem_filter.trans <| and_iff_right_of_imp fun h ↦ ?_ simp_rw [mem_biUnion, mem_powerset] exact h.exists_superset lemma shatterer_mono (h : 𝒜 ⊆ ℬ) : 𝒜.shatterer ⊆ ℬ.shatterer := fun _ ↦ by simpa using Shatters.mono_left h lemma subset_shatterer (h : IsLowerSet (𝒜 : Set (Finset α))) : 𝒜 ⊆ 𝒜.shatterer := fun _s hs ↦ mem_shatterer.2 fun t ht ↦ ⟨t, h ht hs, inter_eq_right.2 ht⟩ @[simp] lemma isLowerSet_shatterer (𝒜 : Finset (Finset α)) : IsLowerSet (𝒜.shatterer : Set (Finset α)) := fun s t ↦ by simpa using Shatters.mono_right @[simp] lemma shatterer_eq : 𝒜.shatterer = 𝒜 ↔ IsLowerSet (𝒜 : Set (Finset α)) := by refine ⟨fun h ↦ ?_, fun h ↦ Subset.antisymm (fun s hs ↦ ?_) <| subset_shatterer h⟩ · rw [← h] exact isLowerSet_shatterer _ · obtain ⟨t, ht, hst⟩ := (mem_shatterer.1 hs).exists_superset exact h hst ht @[simp] lemma shatterer_idem : 𝒜.shatterer.shatterer = 𝒜.shatterer := by simp @[simp] lemma shatters_shatterer : 𝒜.shatterer.Shatters s ↔ 𝒜.Shatters s := by simp_rw [← mem_shatterer, shatterer_idem] protected alias ⟨_, Shatters.shatterer⟩ := shatters_shatterer private lemma aux (h : ∀ t ∈ 𝒜, a ∉ t) (ht : 𝒜.Shatters t) : a ∉ t := by obtain ⟨u, hu, htu⟩ := ht.exists_superset; exact not_mem_mono htu <| h u hu /-- Pajor's variant of the **Sauer-Shelah lemma**. -/ lemma card_le_card_shatterer (𝒜 : Finset (Finset α)) : 𝒜.card ≤ 𝒜.shatterer.card := by refine memberFamily_induction_on 𝒜 ?_ ?_ ?_ · simp · rfl intros a 𝒜 ih₀ ih₁ set ℬ : Finset (Finset α) := ((memberSubfamily a 𝒜).shatterer ∩ (nonMemberSubfamily a 𝒜).shatterer).image (insert a) have hℬ : ℬ.card = ((memberSubfamily a 𝒜).shatterer ∩ (nonMemberSubfamily a 𝒜).shatterer).card := by refine card_image_of_injOn <| insert_erase_invOn.2.injOn.mono ?_ simp only [coe_inter, Set.subset_def, Set.mem_inter_iff, mem_coe, Set.mem_setOf_eq, and_imp, mem_shatterer] exact fun s _ ↦ aux (fun t ht ↦ (mem_filter.1 ht).2) rw [← card_memberSubfamily_add_card_nonMemberSubfamily a] refine (Nat.add_le_add ih₁ ih₀).trans ?_ rw [← card_union_add_card_inter, ← hℬ, ← card_union_of_disjoint] swap · simp only [ℬ, disjoint_left, mem_union, mem_shatterer, mem_image, not_exists, not_and] rintro _ (hs | hs) s - rfl · exact aux (fun t ht ↦ (mem_memberSubfamily.1 ht).2) hs <| mem_insert_self _ _ · exact aux (fun t ht ↦ (mem_nonMemberSubfamily.1 ht).2) hs <| mem_insert_self _ _ refine card_mono <| union_subset (union_subset ?_ <| shatterer_mono <| filter_subset _ _) ?_ · simp only [subset_iff, mem_shatterer] rintro s hs t ht obtain ⟨u, hu, rfl⟩ := hs ht rw [mem_memberSubfamily] at hu refine ⟨insert a u, hu.1, inter_insert_of_not_mem fun ha ↦ ?_⟩ obtain ⟨v, hv, hsv⟩ := hs.exists_inter_eq_singleton ha rw [mem_memberSubfamily] at hv rw [← singleton_subset_iff (a := a), ← hsv] at hv exact hv.2 inter_subset_right · refine forall_image.2 fun s hs ↦ mem_shatterer.2 fun t ht ↦ ?_ simp only [mem_inter, mem_shatterer] at hs rw [subset_insert_iff] at ht by_cases ha : a ∈ t · obtain ⟨u, hu, hsu⟩ := hs.1 ht rw [mem_memberSubfamily] at hu refine ⟨_, hu.1, ?_⟩ rw [← insert_inter_distrib, hsu, insert_erase ha] · obtain ⟨u, hu, hsu⟩ := hs.2 ht rw [mem_nonMemberSubfamily] at hu refine ⟨_, hu.1, ?_⟩ rwa [insert_inter_of_not_mem hu.2, hsu, erase_eq_self] lemma Shatters.of_compression (hs : (𝓓 a 𝒜).Shatters s) : 𝒜.Shatters s := by intros t ht obtain ⟨u, hu, rfl⟩ := hs ht rw [Down.mem_compression] at hu obtain hu | hu := hu · exact ⟨u, hu.1, rfl⟩ by_cases ha : a ∈ s · obtain ⟨v, hv, hsv⟩ := hs <| insert_subset ha ht rw [Down.mem_compression] at hv obtain hv | hv := hv · refine ⟨erase v a, hv.2, ?_⟩ rw [inter_erase, hsv, erase_insert] rintro ha rw [insert_eq_self.2 (mem_inter.1 ha).2] at hu exact hu.1 hu.2 rw [insert_eq_self.2 <| inter_subset_right (s₁ := s) ?_] at hv cases hv.1 hv.2 rw [hsv] exact mem_insert_self _ _ · refine ⟨insert a u, hu.2, ?_⟩ rw [inter_insert_of_not_mem ha] lemma shatterer_compress_subset_shatterer (a : α) (𝒜 : Finset (Finset α)) : (𝓓 a 𝒜).shatterer ⊆ 𝒜.shatterer := by simp only [subset_iff, mem_shatterer]; exact fun s hs ↦ hs.of_compression /-! ### Vapnik-Chervonenkis dimension -/ /-- The Vapnik-Chervonenkis dimension of a set family is the maximal size of a set it shatters. -/ def vcDim (𝒜 : Finset (Finset α)) : ℕ := 𝒜.shatterer.sup card lemma Shatters.card_le_vcDim (hs : 𝒜.Shatters s) : s.card ≤ 𝒜.vcDim := le_sup <| mem_shatterer.2 hs /-- Down-compressing decreases the VC-dimension. -/ lemma vcDim_compress_le (a : α) (𝒜 : Finset (Finset α)) : (𝓓 a 𝒜).vcDim ≤ 𝒜.vcDim := sup_mono <| shatterer_compress_subset_shatterer _ _ /-- The **Sauer-Shelah lemma**. -/ lemma card_shatterer_le_sum_vcDim [Fintype α] : 𝒜.shatterer.card ≤ ∑ k ∈ Iic 𝒜.vcDim, (Fintype.card α).choose k := by simp_rw [← card_univ, ← card_powersetCard] refine (card_le_card fun s hs ↦ mem_biUnion.2 ⟨card s, ?_⟩).trans card_biUnion_le exact ⟨mem_Iic.2 (mem_shatterer.1 hs).card_le_vcDim, mem_powersetCard_univ.2 rfl⟩ end Finset