source
stringlengths
17
118
lean4
stringlengths
0
335k
.lake/packages/mathlib/Mathlib/LinearAlgebra/SesquilinearForm/Basic.lean
import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.BilinearMap import Mathlib.LinearAlgebra.LinearIndependent.Lemmas /-! # Sesquilinear maps This file provides properties about sesquilinear maps and forms. The maps considered are of the form `M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M`, where `I₁ : R₁ →+* R` and `I₂ : R₂ →+* R` are ring homomorphisms and `M₁` is a module over `R₁`, `M₂` is a module over `R₂` and `M` is a module over `R`. Sesquilinear forms are the special case that `M₁ = M₂`, `M = R₁ = R₂ = R`, and `I₁ = RingHom.id R`. Taking additionally `I₂ = RingHom.id R`, then one obtains bilinear forms. Sesquilinear maps are a special case of the bilinear maps defined in `BilinearMap.lean`, and many basic lemmas about construction and elementary calculations are found there. ## Main declarations * `IsOrtho`: states that two vectors are orthogonal with respect to a sesquilinear map * `IsSymm`, `IsAlt`: states that a sesquilinear form is symmetric and alternating, respectively * `orthogonalBilin` provides the orthogonal complement with respect to a sesquilinear form ## References * <https://en.wikipedia.org/wiki/Sesquilinear_form#Over_arbitrary_rings> ## Tags Sesquilinear form, Sesquilinear map -/ open Module variable {R R₁ R₂ R₃ M M₁ M₂ M₃ Mₗ₁ Mₗ₁' Mₗ₂ Mₗ₂' K K₁ K₂ V V₁ V₂ n : Type*} namespace LinearMap /-! ### Orthogonal vectors -/ section CommRing -- the `ₗ` subscript variables are for special cases about linear (as opposed to semilinear) maps variable [CommSemiring R] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] [CommSemiring R₂] [AddCommMonoid M₂] [Module R₂ M₂] [AddCommMonoid M] [Module R M] {I₁ : R₁ →+* R} {I₂ : R₂ →+* R} {I₁' : R₁ →+* R} /-- The proposition that two elements of a sesquilinear map space are orthogonal -/ def IsOrtho (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) (x : M₁) (y : M₂) : Prop := B x y = 0 theorem isOrtho_def {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} {x y} : B.IsOrtho x y ↔ B x y = 0 := Iff.rfl theorem isOrtho_zero_left (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) (x) : IsOrtho B (0 : M₁) x := by dsimp only [IsOrtho] rw [map_zero B, zero_apply] theorem isOrtho_zero_right (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) (x) : IsOrtho B x (0 : M₂) := map_zero (B x) theorem isOrtho_flip {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₁'] M} {x y} : B.IsOrtho x y ↔ B.flip.IsOrtho y x := by simp_rw [isOrtho_def, flip_apply] open scoped Function in -- required for scoped `on` notation /-- A set of vectors `v` is orthogonal with respect to some bilinear map `B` if and only if for all `i ≠ j`, `B (v i) (v j) = 0`. For orthogonality between two elements, use `BilinForm.isOrtho` -/ def IsOrthoᵢ (B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₁'] M) (v : n → M₁) : Prop := Pairwise (B.IsOrtho on v) theorem isOrthoᵢ_def {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₁'] M} {v : n → M₁} : B.IsOrthoᵢ v ↔ ∀ i j : n, i ≠ j → B (v i) (v j) = 0 := Iff.rfl theorem isOrthoᵢ_flip (B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₁'] M) {v : n → M₁} : B.IsOrthoᵢ v ↔ B.flip.IsOrthoᵢ v := by simp_rw [isOrthoᵢ_def] constructor <;> exact fun h i j hij ↦ h j i hij.symm end CommRing section Field variable [Field K] [AddCommGroup V] [Module K V] [Field K₁] [AddCommGroup V₁] [Module K₁ V₁] [Field K₂] [AddCommGroup V₂] [Module K₂ V₂] {I₁ : K₁ →+* K} {I₂ : K₂ →+* K} {I₁' : K₁ →+* K} {J₁ : K →+* K} {J₂ : K →+* K} -- todo: this also holds for [CommRing R] [IsDomain R] when J₁ is invertible theorem ortho_smul_left {B : V₁ →ₛₗ[I₁] V₂ →ₛₗ[I₂] V} {x y} {a : K₁} (ha : a ≠ 0) : IsOrtho B x y ↔ IsOrtho B (a • x) y := by dsimp only [IsOrtho] constructor <;> intro H · rw [map_smulₛₗ₂, H, smul_zero] · rw [map_smulₛₗ₂, smul_eq_zero] at H rcases H with H | H · rw [map_eq_zero I₁] at H trivial · exact H -- todo: this also holds for [CommRing R] [IsDomain R] when J₂ is invertible theorem ortho_smul_right {B : V₁ →ₛₗ[I₁] V₂ →ₛₗ[I₂] V} {x y} {a : K₂} {ha : a ≠ 0} : IsOrtho B x y ↔ IsOrtho B x (a • y) := by dsimp only [IsOrtho] constructor <;> intro H · rw [map_smulₛₗ, H, smul_zero] · rw [map_smulₛₗ, smul_eq_zero] at H rcases H with H | H · simp only [map_eq_zero] at H exfalso exact ha H · exact H /-- A set of orthogonal vectors `v` with respect to some sesquilinear map `B` is linearly independent if for all `i`, `B (v i) (v i) ≠ 0`. -/ theorem linearIndependent_of_isOrthoᵢ {B : V₁ →ₛₗ[I₁] V₁ →ₛₗ[I₁'] V} {v : n → V₁} (hv₁ : B.IsOrthoᵢ v) (hv₂ : ∀ i, ¬B.IsOrtho (v i) (v i)) : LinearIndependent K₁ v := by classical rw [linearIndependent_iff'] intro s w hs i hi have : B (s.sum fun i : n ↦ w i • v i) (v i) = 0 := by rw [hs, map_zero, zero_apply] have hsum : (s.sum fun j : n ↦ I₁ (w j) • B (v j) (v i)) = I₁ (w i) • B (v i) (v i) := by apply Finset.sum_eq_single_of_mem i hi intro j _hj hij rw [isOrthoᵢ_def.1 hv₁ _ _ hij, smul_zero] simp_rw [B.map_sum₂, map_smulₛₗ₂, hsum] at this apply (map_eq_zero I₁).mp exact (smul_eq_zero.mp this).elim _root_.id (hv₂ i · |>.elim) end Field /-! ### Reflexive bilinear maps -/ section Reflexive variable [CommSemiring R] [AddCommMonoid M] [Module R M] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] {I₁ : R₁ →+* R} {I₂ : R₁ →+* R} {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M} /-- The proposition that a sesquilinear map is reflexive -/ def IsRefl (B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M) : Prop := ∀ x y, B x y = 0 → B y x = 0 namespace IsRefl section variable (H : B.IsRefl) include H theorem eq_zero : ∀ {x y}, B x y = 0 → B y x = 0 := fun {x y} ↦ H x y theorem eq_iff {x y} : B x y = 0 ↔ B y x = 0 := ⟨H x y, H y x⟩ theorem ortho_comm {x y} : IsOrtho B x y ↔ IsOrtho B y x := ⟨eq_zero H, eq_zero H⟩ theorem domRestrict (p : Submodule R₁ M₁) : (B.domRestrict₁₂ p p).IsRefl := fun _ _ ↦ by simp_rw [domRestrict₁₂_apply] exact H _ _ end @[simp] theorem flip_isRefl_iff : B.flip.IsRefl ↔ B.IsRefl := ⟨fun h x y H ↦ h y x ((B.flip_apply _ _).trans H), fun h x y ↦ h y x⟩ theorem ker_flip_eq_bot (H : B.IsRefl) (h : LinearMap.ker B = ⊥) : LinearMap.ker B.flip = ⊥ := by refine ker_eq_bot'.mpr fun _ hx ↦ ker_eq_bot'.mp h _ ?_ ext exact H _ _ (LinearMap.congr_fun hx _) theorem ker_eq_bot_iff_ker_flip_eq_bot (H : B.IsRefl) : LinearMap.ker B = ⊥ ↔ LinearMap.ker B.flip = ⊥ := by refine ⟨ker_flip_eq_bot H, fun h ↦ ?_⟩ exact (congr_arg _ B.flip_flip.symm).trans (ker_flip_eq_bot (flip_isRefl_iff.mpr H) h) end IsRefl end Reflexive /-! ### Symmetric bilinear forms -/ section Symmetric variable [CommSemiring R] [AddCommMonoid M] [Module R M] {I : R →+* R} {B : M →ₛₗ[I] M →ₗ[R] R} /-- The proposition that a sesquilinear form is symmetric -/ structure IsSymm (B : M →ₛₗ[I] M →ₗ[R] R) : Prop where protected eq : ∀ x y, I (B x y) = B y x theorem isSymm_def {B : M →ₛₗ[I] M →ₗ[R] R} : B.IsSymm ↔ ∀ x y, I (B x y) = B y x := ⟨fun ⟨h⟩ ↦ h, fun h ↦ ⟨h⟩⟩ namespace IsSymm theorem isRefl (H : B.IsSymm) : B.IsRefl := fun x y H1 ↦ by rw [← H.eq] simp [H1] theorem ortho_comm (H : B.IsSymm) {x y} : IsOrtho B x y ↔ IsOrtho B y x := H.isRefl.ortho_comm theorem domRestrict (H : B.IsSymm) (p : Submodule R M) : (B.domRestrict₁₂ p p).IsSymm where eq _ _ := by simp_rw [domRestrict₁₂_apply] exact H.eq _ _ end IsSymm @[simp] theorem isSymm_zero : (0 : M →ₛₗ[I] M →ₗ[R] R).IsSymm := ⟨fun _ _ => map_zero _⟩ protected lemma IsSymm.add {C : M →ₛₗ[I] M →ₗ[R] R} (hB : B.IsSymm) (hC : C.IsSymm) : (B + C).IsSymm where eq x y := by simp [hB.eq, hC.eq] theorem BilinMap.isSymm_iff_eq_flip {N : Type*} [AddCommMonoid N] [Module R N] {B : LinearMap.BilinMap R M N} : (∀ x y, B x y = B y x) ↔ B = B.flip := by simp [LinearMap.ext_iff₂] theorem isSymm_iff_eq_flip {B : LinearMap.BilinForm R M} : B.IsSymm ↔ B = B.flip := isSymm_def.trans BilinMap.isSymm_iff_eq_flip end Symmetric /-! ### Positive semidefinite sesquilinear forms -/ section PositiveSemidefinite variable [CommSemiring R] [AddCommMonoid M] [Module R M] {I₁ I₂ : R →+* R} /-- A sesquilinear form `B` is **nonnegative** if for any `x` we have `0 ≤ B x x`. -/ structure IsNonneg [LE R] (B : M →ₛₗ[I₁] M →ₛₗ[I₂] R) where nonneg : ∀ x, 0 ≤ B x x lemma isNonneg_def [LE R] {B : M →ₛₗ[I₁] M →ₛₗ[I₂] R} : B.IsNonneg ↔ ∀ x, 0 ≤ B x x := ⟨fun ⟨h⟩ ↦ h, fun h ↦ ⟨h⟩⟩ @[simp] lemma isNonneg_zero [Preorder R] : IsNonneg (0 : M →ₛₗ[I₁] M →ₛₗ[I₂] R) := ⟨fun _ ↦ le_rfl⟩ protected lemma IsNonneg.add [Preorder R] [AddLeftMono R] {B C : M →ₛₗ[I₁] M →ₛₗ[I₂] R} (hB : B.IsNonneg) (hC : C.IsNonneg) : (B + C).IsNonneg where nonneg x := add_nonneg (hB.nonneg x) (hC.nonneg x) protected lemma IsNonneg.smul [Preorder R] [PosMulMono R] {B : M →ₛₗ[I₁] M →ₛₗ[I₂] R} {c : R} (hB : B.IsNonneg) (hc : 0 ≤ c) : (c • B).IsNonneg where nonneg x := mul_nonneg hc (hB.nonneg x) /-- A sesquilinear form `B` is **positive semidefinite** if it is symmetric and nonnegative. -/ structure IsPosSemidef [LE R] (B : M →ₛₗ[I₁] M →ₗ[R] R) extends isSymm : B.IsSymm, isNonneg : B.IsNonneg lemma isPosSemidef_def [LE R] {B : M →ₛₗ[I₁] M →ₗ[R] R} : B.IsPosSemidef ↔ B.IsSymm ∧ B.IsNonneg := ⟨fun h ↦ ⟨h.isSymm, h.isNonneg⟩, fun ⟨h₁, h₂⟩ ↦ ⟨h₁, h₂⟩⟩ @[simp] lemma isPosSemidef_zero [Preorder R] : IsPosSemidef (0 : M →ₛₗ[I₁] M →ₗ[R] R) where isSymm := isSymm_zero isNonneg := isNonneg_zero protected lemma IsPosSemidef.add [Preorder R] [AddLeftMono R] {B C : M →ₛₗ[I₁] M →ₗ[R] R} (hB : B.IsPosSemidef) (hC : C.IsPosSemidef) : (B + C).IsPosSemidef := isPosSemidef_def.2 ⟨hB.isSymm.add hC.isSymm, hB.isNonneg.add hC.isNonneg⟩ end PositiveSemidefinite /-! ### Alternating bilinear maps -/ section Alternating section CommSemiring section AddCommMonoid variable [CommSemiring R] [AddCommMonoid M] [Module R M] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] {I₁ : R₁ →+* R} {I₂ : R₁ →+* R} {I : R₁ →+* R} {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M} /-- The proposition that a sesquilinear map is alternating -/ def IsAlt (B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M) : Prop := ∀ x, B x x = 0 variable (H : B.IsAlt) include H theorem IsAlt.self_eq_zero (x : M₁) : B x x = 0 := H x theorem IsAlt.eq_of_add_add_eq_zero [IsCancelAdd M] {a b c : M₁} (hAdd : a + b + c = 0) : B a b = B b c := by have : B a a + B a b + B a c = B a c + B b c + B c c := by simp_rw [← map_add, ← map_add₂, hAdd, map_zero, LinearMap.zero_apply] rw [H, H, zero_add, add_zero, add_comm] at this exact add_left_cancel this end AddCommMonoid section AddCommGroup namespace IsAlt variable [CommSemiring R] [AddCommGroup M] [Module R M] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] {I₁ : R₁ →+* R} {I₂ : R₁ →+* R} {I : R₁ →+* R} {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M} theorem neg (H : B.IsAlt) (x y : M₁) : -B x y = B y x := by have H1 : B (y + x) (y + x) = 0 := self_eq_zero H (y + x) simpa [map_add, self_eq_zero H, add_eq_zero_iff_neg_eq] using H1 theorem isRefl (H : B.IsAlt) : B.IsRefl := by intro x y h rw [← neg H, h, neg_zero] theorem ortho_comm (H : B.IsAlt) {x y} : IsOrtho B x y ↔ IsOrtho B y x := H.isRefl.ortho_comm end IsAlt end AddCommGroup end CommSemiring section Semiring variable [CommRing R] [AddCommGroup M] [Module R M] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] {I : R₁ →+* R} theorem isAlt_iff_eq_neg_flip [NoZeroDivisors R] [CharZero R] {B : M₁ →ₛₗ[I] M₁ →ₛₗ[I] R} : B.IsAlt ↔ B = -B.flip := by constructor <;> intro h · ext simp_rw [neg_apply, flip_apply] exact (h.neg _ _).symm intro x let h' := congr_fun₂ h x x simp only [neg_apply, flip_apply, ← add_eq_zero_iff_eq_neg] at h' exact add_self_eq_zero.mp h' end Semiring end Alternating end LinearMap namespace Submodule /-! ### The orthogonal complement -/ variable [CommRing R] [CommRing R₁] [AddCommGroup M₁] [Module R₁ M₁] [AddCommGroup M] [Module R M] {I₁ : R₁ →+* R} {I₂ : R₁ →+* R} {B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M} /-- The orthogonal complement of a submodule `N` with respect to some bilinear map is the set of elements `x` which are orthogonal to all elements of `N`; i.e., for all `y` in `N`, `B x y = 0`. Note that for general (neither symmetric nor antisymmetric) bilinear maps this definition has a chirality; in addition to this "left" orthogonal complement one could define a "right" orthogonal complement for which, for all `y` in `N`, `B y x = 0`. This variant definition is not currently provided in mathlib. -/ def orthogonalBilin (N : Submodule R₁ M₁) (B : M₁ →ₛₗ[I₁] M₁ →ₛₗ[I₂] M) : Submodule R₁ M₁ where carrier := { m | ∀ n ∈ N, B.IsOrtho n m } zero_mem' x _ := B.isOrtho_zero_right x add_mem' hx hy n hn := by rw [LinearMap.IsOrtho, map_add, show B n _ = 0 from hx n hn, show B n _ = 0 from hy n hn, zero_add] smul_mem' c x hx n hn := by rw [LinearMap.IsOrtho, LinearMap.map_smulₛₗ, show B n x = 0 from hx n hn, smul_zero] variable {N L : Submodule R₁ M₁} @[simp] theorem mem_orthogonalBilin_iff {m : M₁} : m ∈ N.orthogonalBilin B ↔ ∀ n ∈ N, B.IsOrtho n m := Iff.rfl theorem orthogonalBilin_le (h : N ≤ L) : L.orthogonalBilin B ≤ N.orthogonalBilin B := fun _ hn l hl ↦ hn l (h hl) theorem le_orthogonalBilin_orthogonalBilin (b : B.IsRefl) : N ≤ (N.orthogonalBilin B).orthogonalBilin B := fun n hn _m hm ↦ b _ _ (hm n hn) end Submodule namespace LinearMap section Orthogonal variable [Field K] [AddCommGroup V] [Module K V] [Field K₁] [AddCommGroup V₁] [Module K₁ V₁] [AddCommGroup V₂] [Module K V₂] {J : K →+* K} {J₁ : K₁ →+* K} {J₁' : K₁ →+* K} -- ↓ This lemma only applies in fields as we require `a * b = 0 → a = 0 ∨ b = 0` theorem span_singleton_inf_orthogonal_eq_bot (B : V₁ →ₛₗ[J₁] V₁ →ₛₗ[J₁'] V₂) (x : V₁) (hx : ¬B.IsOrtho x x) : (K₁ ∙ x) ⊓ Submodule.orthogonalBilin (K₁ ∙ x) B = ⊥ := by rw [← Finset.coe_singleton] refine eq_bot_iff.2 fun y h ↦ ?_ obtain ⟨μ, -, rfl⟩ := Submodule.mem_span_finset.1 h.1 replace h := h.2 x (by simp [Submodule.mem_span] : x ∈ Submodule.span K₁ ({x} : Finset V₁)) rw [Finset.sum_singleton] at h ⊢ suffices hμzero : μ x = 0 by rw [hμzero, zero_smul, Submodule.mem_bot] rw [isOrtho_def, map_smulₛₗ] at h exact Or.elim (smul_eq_zero.mp h) (fun y ↦ by simpa using y) (fun hfalse ↦ False.elim <| hx hfalse) -- ↓ This lemma only applies in fields since we use the `mul_eq_zero` theorem orthogonal_span_singleton_eq_to_lin_ker {B : V →ₗ[K] V →ₛₗ[J] V₂} (x : V) : Submodule.orthogonalBilin (K ∙ x) B = LinearMap.ker (B x) := by ext y simp_rw [Submodule.mem_orthogonalBilin_iff, LinearMap.mem_ker, Submodule.mem_span_singleton] constructor · exact fun h ↦ h x ⟨1, one_smul _ _⟩ · rintro h _ ⟨z, rfl⟩ rw [isOrtho_def, map_smulₛₗ₂, smul_eq_zero] exact Or.intro_right _ h -- todo: Generalize this to sesquilinear maps theorem span_singleton_sup_orthogonal_eq_top {B : V →ₗ[K] V →ₗ[K] K} {x : V} (hx : ¬B.IsOrtho x x) : (K ∙ x) ⊔ Submodule.orthogonalBilin (N := K ∙ x) (B := B) = ⊤ := by rw [orthogonal_span_singleton_eq_to_lin_ker] exact (B x).span_singleton_sup_ker_eq_top hx -- todo: Generalize this to sesquilinear maps /-- Given a bilinear form `B` and some `x` such that `B x x ≠ 0`, the span of the singleton of `x` is complement to its orthogonal complement. -/ theorem isCompl_span_singleton_orthogonal {B : V →ₗ[K] V →ₗ[K] K} {x : V} (hx : ¬B.IsOrtho x x) : IsCompl (K ∙ x) (Submodule.orthogonalBilin (N := K ∙ x) (B := B)) := { disjoint := disjoint_iff.2 <| span_singleton_inf_orthogonal_eq_bot B x hx codisjoint := codisjoint_iff.2 <| span_singleton_sup_orthogonal_eq_top hx } end Orthogonal /-! ### Adjoint pairs -/ section AdjointPair section AddCommMonoid variable [CommSemiring R] variable [AddCommMonoid M] [Module R M] variable [AddCommMonoid M₁] [Module R M₁] variable [AddCommMonoid M₂] [Module R M₂] variable [AddCommMonoid M₃] [Module R M₃] variable {I : R →+* R} variable {B F : M →ₗ[R] M →ₛₗ[I] M₃} {B' : M₁ →ₗ[R] M₁ →ₛₗ[I] M₃} {B'' : M₂ →ₗ[R] M₂ →ₛₗ[I] M₃} variable {f f' : M →ₗ[R] M₁} {g g' : M₁ →ₗ[R] M} variable (B B' f g) /-- Given a pair of modules equipped with bilinear maps, this is the condition for a pair of maps between them to be mutually adjoint. -/ def IsAdjointPair (f : M → M₁) (g : M₁ → M) := ∀ x y, B' (f x) y = B x (g y) variable {B B' f g} theorem isAdjointPair_iff_comp_eq_compl₂ : IsAdjointPair B B' f g ↔ B'.comp f = B.compl₂ g := by constructor <;> intro h · ext x y rw [comp_apply, compl₂_apply] exact h x y · intro _ _ rw [← compl₂_apply, ← comp_apply, h] theorem isAdjointPair_zero : IsAdjointPair B B' 0 0 := fun _ _ ↦ by simp only [Pi.zero_apply, map_zero, zero_apply] theorem isAdjointPair_id : IsAdjointPair B B (_root_.id : M → M) (_root_.id : M → M) := fun _ _ ↦ rfl theorem isAdjointPair_one : IsAdjointPair B B (1 : Module.End R M) (1 : Module.End R M) := isAdjointPair_id theorem IsAdjointPair.add {f f' : M → M₁} {g g' : M₁ → M} (h : IsAdjointPair B B' f g) (h' : IsAdjointPair B B' f' g') : IsAdjointPair B B' (f + f') (g + g') := fun x _ ↦ by rw [Pi.add_apply, Pi.add_apply, B'.map_add₂, (B x).map_add, h, h'] theorem IsAdjointPair.comp {f : M → M₁} {g : M₁ → M} {f' : M₁ → M₂} {g' : M₂ → M₁} (h : IsAdjointPair B B' f g) (h' : IsAdjointPair B' B'' f' g') : IsAdjointPair B B'' (f' ∘ f) (g ∘ g') := fun _ _ ↦ by rw [Function.comp_def, Function.comp_def, h', h] theorem IsAdjointPair.mul {f g f' g' : Module.End R M} (h : IsAdjointPair B B f g) (h' : IsAdjointPair B B f' g') : IsAdjointPair B B (f * f') (g' * g) := h'.comp h end AddCommMonoid section AddCommGroup variable [CommRing R] variable [AddCommGroup M] [Module R M] variable [AddCommGroup M₁] [Module R M₁] variable [AddCommGroup M₂] [Module R M₂] variable {B F : M →ₗ[R] M →ₗ[R] M₂} {B' : M₁ →ₗ[R] M₁ →ₗ[R] M₂} variable {f f' : M → M₁} {g g' : M₁ → M} theorem IsAdjointPair.sub (h : IsAdjointPair B B' f g) (h' : IsAdjointPair B B' f' g') : IsAdjointPair B B' (f - f') (g - g') := fun x _ ↦ by rw [Pi.sub_apply, Pi.sub_apply, B'.map_sub₂, (B x).map_sub, h, h'] theorem IsAdjointPair.smul (c : R) (h : IsAdjointPair B B' f g) : IsAdjointPair B B' (c • f) (c • g) := fun _ _ ↦ by simp [h _] end AddCommGroup section OrthogonalMap variable {R M : Type*} [CommRing R] [AddCommGroup M] [Module R M] (B : LinearMap.BilinForm R M) (f : M → M) /-- A linear transformation `f` is orthogonal with respect to a bilinear form `B` if `B` is bi-invariant with respect to `f`. -/ def IsOrthogonal : Prop := ∀ x y, B (f x) (f y) = B x y variable {B f} @[simp] lemma _root_.LinearEquiv.isAdjointPair_symm_iff {f : M ≃ M} : LinearMap.IsAdjointPair B B f f.symm ↔ B.IsOrthogonal f := ⟨fun hf x y ↦ by simpa using hf x (f y), fun hf x y ↦ by simpa using hf x (f.symm y)⟩ lemma isOrthogonal_of_forall_apply_same {F : Type*} [FunLike F M M] [LinearMapClass F R M M] (f : F) (h : IsLeftRegular (2 : R)) (hB : B.IsSymm) (hf : ∀ x, B (f x) (f x) = B x x) : B.IsOrthogonal f := by intro x y suffices 2 * B (f x) (f y) = 2 * B x y from h this have := hf (x + y) simp only [map_add, LinearMap.add_apply, hf x, hf y, show B y x = B x y from hB.eq y x] at this rw [show B (f y) (f x) = B (f x) (f y) from hB.eq (f y) (f x)] at this simp only [add_assoc, add_right_inj] at this simp only [← add_assoc, add_left_inj] at this simpa only [← two_mul] using this end OrthogonalMap end AdjointPair /-! ### Self-adjoint pairs -/ section SelfadjointPair section AddCommMonoid variable [CommSemiring R] variable [AddCommMonoid M] [Module R M] variable [AddCommMonoid M₁] [Module R M₁] variable {I : R →+* R} variable (B F : M →ₗ[R] M →ₛₗ[I] M₁) /-- The condition for an endomorphism to be "self-adjoint" with respect to a pair of bilinear maps on the underlying module. In the case that these two maps are identical, this is the usual concept of self adjointness. In the case that one of the maps is the negation of the other, this is the usual concept of skew adjointness. -/ def IsPairSelfAdjoint (f : M → M) := IsAdjointPair B F f f /-- An endomorphism of a module is self-adjoint with respect to a bilinear map if it serves as an adjoint for itself. -/ protected def IsSelfAdjoint (f : M → M) := IsAdjointPair B B f f end AddCommMonoid section AddCommGroup variable [CommRing R] variable [AddCommGroup M] [Module R M] [AddCommGroup M₁] [Module R M₁] variable [AddCommGroup M₂] [Module R M₂] (B F : M →ₗ[R] M →ₗ[R] M₂) /-- The set of pair-self-adjoint endomorphisms are a submodule of the type of all endomorphisms. -/ def isPairSelfAdjointSubmodule : Submodule R (Module.End R M) where carrier := { f | IsPairSelfAdjoint B F f } zero_mem' := isAdjointPair_zero add_mem' hf hg := hf.add hg smul_mem' c _ h := h.smul c /-- An endomorphism of a module is skew-adjoint with respect to a bilinear map if its negation serves as an adjoint. -/ def IsSkewAdjoint (f : M → M) := IsAdjointPair B B f (-f) /-- The set of self-adjoint endomorphisms of a module with bilinear map is a submodule. (In fact it is a Jordan subalgebra.) -/ def selfAdjointSubmodule := isPairSelfAdjointSubmodule B B /-- The set of skew-adjoint endomorphisms of a module with bilinear map is a submodule. (In fact it is a Lie subalgebra.) -/ def skewAdjointSubmodule := isPairSelfAdjointSubmodule (-B) B variable {B F} @[simp] theorem mem_isPairSelfAdjointSubmodule (f : Module.End R M) : f ∈ isPairSelfAdjointSubmodule B F ↔ IsPairSelfAdjoint B F f := Iff.rfl theorem isPairSelfAdjoint_equiv (e : M₁ ≃ₗ[R] M) (f : Module.End R M) : IsPairSelfAdjoint B F f ↔ IsPairSelfAdjoint (B.compl₁₂ e e) (F.compl₁₂ e e) (e.symm.conj f) := by have hₗ : (F.compl₁₂ (↑e : M₁ →ₗ[R] M) (↑e : M₁ →ₗ[R] M)).comp (e.symm.conj f) = (F.comp f).compl₁₂ (↑e : M₁ →ₗ[R] M) (↑e : M₁ →ₗ[R] M) := by ext simp only [LinearEquiv.symm_conj_apply, coe_comp, LinearEquiv.coe_coe, compl₁₂_apply, LinearEquiv.apply_symm_apply, Function.comp_apply] have hᵣ : (B.compl₁₂ (↑e : M₁ →ₗ[R] M) (↑e : M₁ →ₗ[R] M)).compl₂ (e.symm.conj f) = (B.compl₂ f).compl₁₂ (↑e : M₁ →ₗ[R] M) (↑e : M₁ →ₗ[R] M) := by ext simp only [LinearEquiv.symm_conj_apply, compl₂_apply, coe_comp, LinearEquiv.coe_coe, compl₁₂_apply, LinearEquiv.apply_symm_apply, Function.comp_apply] have he : Function.Surjective (⇑(↑e : M₁ →ₗ[R] M) : M₁ → M) := e.surjective simp_rw [IsPairSelfAdjoint, isAdjointPair_iff_comp_eq_compl₂, hₗ, hᵣ, compl₁₂_inj he he] theorem isSkewAdjoint_iff_neg_self_adjoint (f : M → M) : B.IsSkewAdjoint f ↔ IsAdjointPair (-B) B f f := show (∀ x y, B (f x) y = B x ((-f) y)) ↔ ∀ x y, B (f x) y = (-B) x (f y) by simp @[simp] theorem mem_selfAdjointSubmodule (f : Module.End R M) : f ∈ B.selfAdjointSubmodule ↔ B.IsSelfAdjoint f := Iff.rfl @[simp] theorem mem_skewAdjointSubmodule (f : Module.End R M) : f ∈ B.skewAdjointSubmodule ↔ B.IsSkewAdjoint f := by rw [isSkewAdjoint_iff_neg_self_adjoint] exact Iff.rfl end AddCommGroup end SelfadjointPair /-! ### Nondegenerate bilinear maps -/ section Nondegenerate section CommSemiring variable [CommSemiring R] [AddCommMonoid M] [Module R M] [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] [CommSemiring R₂] [AddCommMonoid M₂] [Module R₂ M₂] {I₁ : R₁ →+* R} {I₂ : R₂ →+* R} {I₁' : R₁ →+* R} /-- A bilinear map is called left-separating if the only element that is left-orthogonal to every other element is `0`; i.e., for every nonzero `x` in `M₁`, there exists `y` in `M₂` with `B x y ≠ 0`. -/ def SeparatingLeft (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) : Prop := ∀ x : M₁, (∀ y : M₂, B x y = 0) → x = 0 variable (M₁ M₂ I₁ I₂) /-- In a non-trivial module, zero is not non-degenerate. -/ theorem not_separatingLeft_zero [Nontrivial M₁] : ¬(0 : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M).SeparatingLeft := let ⟨m, hm⟩ := exists_ne (0 : M₁) fun h ↦ hm (h m fun _n ↦ rfl) variable {M₁ M₂ I₁ I₂} theorem SeparatingLeft.ne_zero [Nontrivial M₁] {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} (h : B.SeparatingLeft) : B ≠ 0 := fun h0 ↦ not_separatingLeft_zero M₁ M₂ I₁ I₂ <| h0 ▸ h section Linear variable [AddCommMonoid Mₗ₁] [AddCommMonoid Mₗ₂] [AddCommMonoid Mₗ₁'] [AddCommMonoid Mₗ₂'] variable [Module R Mₗ₁] [Module R Mₗ₂] [Module R Mₗ₁'] [Module R Mₗ₂'] variable {B : Mₗ₁ →ₗ[R] Mₗ₂ →ₗ[R] M} (e₁ : Mₗ₁ ≃ₗ[R] Mₗ₁') (e₂ : Mₗ₂ ≃ₗ[R] Mₗ₂') theorem SeparatingLeft.congr (h : B.SeparatingLeft) : (e₁.arrowCongr (e₂.arrowCongr (LinearEquiv.refl R M)) B).SeparatingLeft := by intro x hx rw [← e₁.symm.map_eq_zero_iff] refine h (e₁.symm x) fun y ↦ ?_ specialize hx (e₂ y) simp only [LinearEquiv.arrowCongr_apply, LinearEquiv.symm_apply_apply, LinearEquiv.map_eq_zero_iff] at hx exact hx @[simp] theorem separatingLeft_congr_iff : (e₁.arrowCongr (e₂.arrowCongr (LinearEquiv.refl R M)) B).SeparatingLeft ↔ B.SeparatingLeft := ⟨fun h ↦ by convert h.congr e₁.symm e₂.symm ext x y simp, SeparatingLeft.congr e₁ e₂⟩ end Linear /-- A bilinear map is called right-separating if the only element that is right-orthogonal to every other element is `0`; i.e., for every nonzero `y` in `M₂`, there exists `x` in `M₁` with `B x y ≠ 0`. -/ def SeparatingRight (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) : Prop := ∀ y : M₂, (∀ x : M₁, B x y = 0) → y = 0 /-- A bilinear map is called non-degenerate if it is left-separating and right-separating. -/ def Nondegenerate (B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M) : Prop := SeparatingLeft B ∧ SeparatingRight B @[simp] theorem flip_separatingRight {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.flip.SeparatingRight ↔ B.SeparatingLeft := ⟨fun hB x hy ↦ hB x hy, fun hB x hy ↦ hB x hy⟩ @[simp] theorem flip_separatingLeft {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.flip.SeparatingLeft ↔ SeparatingRight B := by rw [← flip_separatingRight, flip_flip] @[simp] theorem flip_nondegenerate {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.flip.Nondegenerate ↔ B.Nondegenerate := Iff.trans and_comm (and_congr flip_separatingRight flip_separatingLeft) theorem separatingLeft_iff_linear_nontrivial {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.SeparatingLeft ↔ ∀ x : M₁, B x = 0 → x = 0 := by constructor <;> intro h x hB · simpa only [hB, zero_apply, eq_self_iff_true, forall_const] using h x have h' : B x = 0 := by ext rw [zero_apply] exact hB _ exact h x h' theorem separatingRight_iff_linear_flip_nontrivial {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.SeparatingRight ↔ ∀ y : M₂, B.flip y = 0 → y = 0 := by rw [← flip_separatingLeft, separatingLeft_iff_linear_nontrivial] /-- A bilinear map is left-separating if and only if it has a trivial kernel. -/ theorem separatingLeft_iff_ker_eq_bot {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.SeparatingLeft ↔ LinearMap.ker B = ⊥ := Iff.trans separatingLeft_iff_linear_nontrivial LinearMap.ker_eq_bot'.symm /-- A bilinear map is right-separating if and only if its flip has a trivial kernel. -/ theorem separatingRight_iff_flip_ker_eq_bot {B : M₁ →ₛₗ[I₁] M₂ →ₛₗ[I₂] M} : B.SeparatingRight ↔ LinearMap.ker B.flip = ⊥ := by rw [← flip_separatingLeft, separatingLeft_iff_ker_eq_bot] end CommSemiring section CommRing variable [CommRing R] [AddCommGroup M] [Module R M] [AddCommGroup M₁] [Module R M₁] {I I' : R →+* R} theorem IsRefl.nondegenerate_iff_separatingLeft {B : M →ₗ[R] M →ₗ[R] M₁} (hB : B.IsRefl) : B.Nondegenerate ↔ B.SeparatingLeft := by refine ⟨fun h ↦ h.1, fun hB' ↦ ⟨hB', ?_⟩⟩ rw [separatingRight_iff_flip_ker_eq_bot, hB.ker_eq_bot_iff_ker_flip_eq_bot.mp] rwa [← separatingLeft_iff_ker_eq_bot] theorem IsRefl.nondegenerate_iff_separatingRight {B : M →ₗ[R] M →ₗ[R] M₁} (hB : B.IsRefl) : B.Nondegenerate ↔ B.SeparatingRight := by refine ⟨fun h ↦ h.2, fun hB' ↦ ⟨?_, hB'⟩⟩ rw [separatingLeft_iff_ker_eq_bot, hB.ker_eq_bot_iff_ker_flip_eq_bot.mpr] rwa [← separatingRight_iff_flip_ker_eq_bot] lemma disjoint_ker_of_nondegenerate_restrict {B : M →ₗ[R] M →ₗ[R] M₁} {W : Submodule R M} (hW : (B.domRestrict₁₂ W W).Nondegenerate) : Disjoint W (LinearMap.ker B) := by refine Submodule.disjoint_def.mpr fun x hx hx' ↦ ?_ let x' : W := ⟨x, hx⟩ suffices x' = 0 by simpa [x'] apply hW.1 x' simp_rw [Subtype.forall, domRestrict₁₂_apply] intro y hy rw [mem_ker] at hx' simp [x', hx'] lemma IsSymm.nondegenerate_restrict_of_isCompl_ker {B : M →ₗ[R] M →ₗ[R] R} (hB : B.IsSymm) {W : Submodule R M} (hW : IsCompl W (LinearMap.ker B)) : (B.domRestrict₁₂ W W).Nondegenerate := by have hB' : (B.domRestrict₁₂ W W).IsRefl := fun x y ↦ hB.isRefl (W.subtype x) (W.subtype y) rw [LinearMap.IsRefl.nondegenerate_iff_separatingLeft hB'] intro ⟨x, hx⟩ hx' simp only [Submodule.mk_eq_zero] replace hx' : ∀ y ∈ W, B x y = 0 := by simpa [Subtype.forall] using hx' replace hx' : x ∈ W ⊓ ker B := by refine ⟨hx, ?_⟩ ext y obtain ⟨u, hu, v, hv, rfl⟩ : ∃ u ∈ W, ∃ v ∈ ker B, u + v = y := by rw [← Submodule.mem_sup, hW.sup_eq_top]; exact Submodule.mem_top suffices B x u = 0 by rw [mem_ker] at hv; simpa [← hB.eq v, hv] exact hx' u hu simpa [hW.inf_eq_bot] using hx' /-- The restriction of a reflexive bilinear map `B` onto a submodule `W` is nondegenerate if `W` has trivial intersection with its orthogonal complement, that is `Disjoint W (W.orthogonalBilin B)`. -/ theorem nondegenerate_restrict_of_disjoint_orthogonal {B : M →ₗ[R] M →ₗ[R] M₁} (hB : B.IsRefl) {W : Submodule R M} (hW : Disjoint W (W.orthogonalBilin B)) : (B.domRestrict₁₂ W W).Nondegenerate := by rw [(hB.domRestrict W).nondegenerate_iff_separatingLeft] rintro ⟨x, hx⟩ b₁ rw [Submodule.mk_eq_zero, ← Submodule.mem_bot R] refine hW.le_bot ⟨hx, fun y hy ↦ ?_⟩ specialize b₁ ⟨y, hy⟩ simp_rw [domRestrict₁₂_apply] at b₁ rw [hB.ortho_comm] exact b₁ /-- An orthogonal basis with respect to a left-separating bilinear map has no self-orthogonal elements. -/ theorem IsOrthoᵢ.not_isOrtho_basis_self_of_separatingLeft [Nontrivial R] {B : M →ₛₗ[I] M →ₛₗ[I'] M₁} {v : Basis n R M} (h : B.IsOrthoᵢ v) (hB : B.SeparatingLeft) (i : n) : ¬B.IsOrtho (v i) (v i) := by intro ho refine v.ne_zero i (hB (v i) fun m ↦ ?_) obtain ⟨vi, rfl⟩ := v.repr.symm.surjective m rw [Basis.repr_symm_apply, Finsupp.linearCombination_apply, Finsupp.sum, map_sum] apply Finset.sum_eq_zero rintro j - rw [map_smulₛₗ] suffices B (v i) (v j) = 0 by rw [this, smul_zero] obtain rfl | hij := eq_or_ne i j · exact ho · exact h hij /-- An orthogonal basis with respect to a right-separating bilinear map has no self-orthogonal elements. -/ theorem IsOrthoᵢ.not_isOrtho_basis_self_of_separatingRight [Nontrivial R] {B : M →ₛₗ[I] M →ₛₗ[I'] M₁} {v : Basis n R M} (h : B.IsOrthoᵢ v) (hB : B.SeparatingRight) (i : n) : ¬B.IsOrtho (v i) (v i) := by rw [isOrthoᵢ_flip] at h rw [isOrtho_flip] exact h.not_isOrtho_basis_self_of_separatingLeft (flip_separatingLeft.mpr hB) i /-- Given an orthogonal basis with respect to a bilinear map, the bilinear map is left-separating if the basis has no elements which are self-orthogonal. -/ theorem IsOrthoᵢ.separatingLeft_of_not_isOrtho_basis_self [NoZeroSMulDivisors R M₁] {B : M →ₗ[R] M →ₗ[R] M₁} (v : Basis n R M) (hO : B.IsOrthoᵢ v) (h : ∀ i, ¬B.IsOrtho (v i) (v i)) : B.SeparatingLeft := by intro m hB obtain ⟨vi, rfl⟩ := v.repr.symm.surjective m rw [LinearEquiv.map_eq_zero_iff] ext i rw [Finsupp.zero_apply] specialize hB (v i) simp_rw [Basis.repr_symm_apply, Finsupp.linearCombination_apply, Finsupp.sum, map_sum₂, map_smulₛₗ₂] at hB rw [Finset.sum_eq_single i] at hB · exact (smul_eq_zero.mp hB).elim _root_.id (h i).elim · intro j _hj hij replace hij : B (v j) (v i) = 0 := hO hij rw [hij, RingHom.id_apply, smul_zero] · intro hi replace hi : vi i = 0 := Finsupp.notMem_support_iff.mp hi rw [hi, RingHom.id_apply, zero_smul] /-- Given an orthogonal basis with respect to a bilinear map, the bilinear map is right-separating if the basis has no elements which are self-orthogonal. -/ theorem IsOrthoᵢ.separatingRight_iff_not_isOrtho_basis_self [NoZeroSMulDivisors R M₁] {B : M →ₗ[R] M →ₗ[R] M₁} (v : Basis n R M) (hO : B.IsOrthoᵢ v) (h : ∀ i, ¬B.IsOrtho (v i) (v i)) : B.SeparatingRight := by rw [isOrthoᵢ_flip] at hO rw [← flip_separatingLeft] refine IsOrthoᵢ.separatingLeft_of_not_isOrtho_basis_self v hO fun i ↦ ?_ rw [isOrtho_flip] exact h i /-- Given an orthogonal basis with respect to a bilinear map, the bilinear map is nondegenerate if the basis has no elements which are self-orthogonal. -/ theorem IsOrthoᵢ.nondegenerate_of_not_isOrtho_basis_self [NoZeroSMulDivisors R M₁] {B : M →ₗ[R] M →ₗ[R] M₁} (v : Basis n R M) (hO : B.IsOrthoᵢ v) (h : ∀ i, ¬B.IsOrtho (v i) (v i)) : B.Nondegenerate := ⟨IsOrthoᵢ.separatingLeft_of_not_isOrtho_basis_self v hO h, IsOrthoᵢ.separatingRight_iff_not_isOrtho_basis_self v hO h⟩ end CommRing end Nondegenerate namespace BilinForm lemma apply_smul_sub_smul_sub_eq [CommRing R] [AddCommGroup M] [Module R M] (B : LinearMap.BilinForm R M) (x y : M) : B ((B x y) • x - (B x x) • y) ((B x y) • x - (B x x) • y) = (B x x) * ((B x x) * (B y y) - (B x y) * (B y x)) := by simp only [map_sub, map_smul, sub_apply, smul_apply, smul_eq_mul, mul_sub, mul_comm (B x y) (B x x), mul_left_comm (B x y) (B x x)] abel variable [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] [AddCommGroup M] [Module R M] (B : LinearMap.BilinForm R M) /-- The **Cauchy-Schwarz inequality** for positive semidefinite forms. -/ lemma apply_mul_apply_le_of_forall_zero_le (hs : ∀ x, 0 ≤ B x x) (x y : M) : (B x y) * (B y x) ≤ (B x x) * (B y y) := by have aux (x y : M) : 0 ≤ (B x x) * ((B x x) * (B y y) - (B x y) * (B y x)) := by rw [← apply_smul_sub_smul_sub_eq B x y] exact hs (B x y • x - B x x • y) rcases lt_or_ge 0 (B x x) with hx | hx · exact sub_nonneg.mp <| nonneg_of_mul_nonneg_right (aux x y) hx · replace hx : B x x = 0 := le_antisymm hx (hs x) rcases lt_or_ge 0 (B y y) with hy | hy · rw [mul_comm (B x y), mul_comm (B x x)] exact sub_nonneg.mp <| nonneg_of_mul_nonneg_right (aux y x) hy · replace hy : B y y = 0 := le_antisymm hy (hs y) suffices B x y = - B y x by simpa [this, hx, hy] using mul_self_nonneg (B y x) rw [eq_neg_iff_add_eq_zero] apply le_antisymm · simpa [hx, hy, le_neg_iff_add_nonpos_left] using hs (x - y) · simpa [hx, hy] using hs (x + y) /-- The **Cauchy-Schwarz inequality** for positive semidefinite symmetric forms. -/ lemma apply_sq_le_of_symm (hs : ∀ x, 0 ≤ B x x) (hB : B.IsSymm) (x y : M) : (B x y) ^ 2 ≤ (B x x) * (B y y) := by rw [show (B x y) ^ 2 = (B x y) * (B y x) by rw [sq, ← hB.eq, RingHom.id_apply]] exact apply_mul_apply_le_of_forall_zero_le B hs x y /-- The equality case of **Cauchy-Schwarz**. -/ lemma not_linearIndependent_of_apply_mul_apply_eq (hp : ∀ x, x ≠ 0 → 0 < B x x) (x y : M) (he : (B x y) * (B y x) = (B x x) * (B y y)) : ¬ LinearIndependent R ![x, y] := by have hz : (B x y) • x - (B x x) • y = 0 := by by_contra hc exact (ne_of_lt (hp ((B x) y • x - (B x) x • y) hc)).symm <| (apply_smul_sub_smul_sub_eq B x y).symm ▸ (mul_eq_zero_of_right ((B x) x) (sub_eq_zero_of_eq he.symm)) by_contra hL by_cases hx : x = 0 · simpa [hx] using LinearIndependent.ne_zero 0 hL · have h := sub_eq_zero.mpr (sub_eq_zero.mp hz).symm rw [sub_eq_add_neg, ← neg_smul, add_comm] at h exact (Ne.symm (ne_of_lt (hp x hx))) (LinearIndependent.eq_zero_of_pair hL h).2 /-- Strict **Cauchy-Schwarz** is equivalent to linear independence for positive definite forms. -/ lemma apply_mul_apply_lt_iff_linearIndependent [NoZeroSMulDivisors R M] (hp : ∀ x, x ≠ 0 → 0 < B x x) (x y : M) : (B x y) * (B y x) < (B x x) * (B y y) ↔ LinearIndependent R ![x, y] := by have hle : ∀ z, 0 ≤ B z z := by intro z by_cases hz : z = 0 · simp [hz] exact le_of_lt (hp z hz) constructor · contrapose! intro h rw [LinearIndependent.pair_iff] at h push_neg at h obtain ⟨r, s, hl, h0⟩ := h by_cases hr : r = 0; · simp_all by_cases hs : s = 0; · simp_all suffices (B (r • x) (r • x)) * (B (s • y) (s • y)) = (B (r • x) (s • y)) * (B (s • y) (r • x)) by simp only [map_smul, smul_apply, smul_eq_mul] at this rw [show r * (r * (B x) x) * (s * (s * (B y) y)) = (r * r * s * s) * ((B x) x * (B y) y) by ring, show s * (r * (B x) y) * (r * (s * (B y) x)) = (r * r * s * s) * ((B x) y * (B y) x) by ring] at this have hrs : r * r * s * s ≠ 0 := by simp [hr, hs] exact le_of_eq <| mul_right_injective₀ hrs this simp [show s • y = - r • x by rwa [neg_smul, ← add_eq_zero_iff_eq_neg']] · contrapose! intro h refine not_linearIndependent_of_apply_mul_apply_eq B hp x y (le_antisymm (apply_mul_apply_le_of_forall_zero_le B hle x y) h) /-- Strict **Cauchy-Schwarz** is equivalent to linear independence for positive definite symmetric forms. -/ lemma apply_sq_lt_iff_linearIndependent_of_symm [NoZeroSMulDivisors R M] (hp : ∀ x, x ≠ 0 → 0 < B x x) (hB : B.IsSymm) (x y : M) : (B x y) ^ 2 < (B x x) * (B y y) ↔ LinearIndependent R ![x, y] := by rw [show (B x y) ^ 2 = (B x y) * (B y x) by rw [sq, ← hB.eq, RingHom.id_apply]] exact apply_mul_apply_lt_iff_linearIndependent B hp x y lemma apply_apply_same_eq_zero_iff (hs : ∀ x, 0 ≤ B x x) (hB : B.IsSymm) {x : M} : B x x = 0 ↔ x ∈ LinearMap.ker B := by rw [LinearMap.mem_ker] refine ⟨fun h ↦ ?_, fun h ↦ by simp [h]⟩ ext y have := B.apply_sq_le_of_symm hs hB x y simp only [h, zero_mul] at this exact eq_zero_of_pow_eq_zero <| le_antisymm this (sq_nonneg (B x y)) lemma nondegenerate_iff (hs : ∀ x, 0 ≤ B x x) (hB : B.IsSymm) : B.Nondegenerate ↔ ∀ x, B x x = 0 ↔ x = 0 := by simp_rw [hB.isRefl.nondegenerate_iff_separatingLeft, separatingLeft_iff_ker_eq_bot, Submodule.eq_bot_iff, B.apply_apply_same_eq_zero_iff hs hB, mem_ker] exact forall_congr' fun x ↦ by aesop /-- A convenience variant of `LinearMap.BilinForm.nondegenerate_iff` characterising nondegeneracy as positive definiteness. -/ lemma nondegenerate_iff' (hs : ∀ x, 0 ≤ B x x) (hB : B.IsSymm) : B.Nondegenerate ↔ ∀ x, x ≠ 0 → 0 < B x x := by rw [B.nondegenerate_iff hs hB, ← not_iff_not] push_neg exact exists_congr fun x ↦ ⟨by aesop, fun ⟨h₀, h⟩ ↦ Or.inl ⟨le_antisymm h (hs x), h₀⟩⟩ lemma nondegenerate_restrict_iff_disjoint_ker (hs : ∀ x, 0 ≤ B x x) (hB : B.IsSymm) {W : Submodule R M} : (B.domRestrict₁₂ W W).Nondegenerate ↔ Disjoint W (LinearMap.ker B) := by refine ⟨disjoint_ker_of_nondegenerate_restrict, fun hW ↦ ?_⟩ have hB' : (B.domRestrict₁₂ W W).IsRefl := fun x y ↦ hB.isRefl (W.subtype x) (W.subtype y) rw [IsRefl.nondegenerate_iff_separatingLeft hB'] intro ⟨x, hx⟩ h simp_rw [Subtype.forall, domRestrict₁₂_apply] at h specialize h x hx rw [B.apply_apply_same_eq_zero_iff hs hB] at h have key : x ∈ W ⊓ LinearMap.ker B := ⟨hx, h⟩ simpa [hW.eq_bot] using key end BilinForm end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/TensorAlgebra/ToTensorPower.lean
import Mathlib.LinearAlgebra.TensorAlgebra.Basic import Mathlib.LinearAlgebra.TensorPower.Basic /-! # Tensor algebras as direct sums of tensor powers In this file we show that `TensorAlgebra R M` is isomorphic to a direct sum of tensor powers, as `TensorAlgebra.equivDirectSum`. -/ open scoped DirectSum TensorProduct variable {R M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M] namespace TensorPower /-- The canonical embedding from a tensor power to the tensor algebra -/ def toTensorAlgebra {n} : ⨂[R]^n M →ₗ[R] TensorAlgebra R M := PiTensorProduct.lift (TensorAlgebra.tprod R M n) @[simp] theorem toTensorAlgebra_tprod {n} (x : Fin n → M) : TensorPower.toTensorAlgebra (PiTensorProduct.tprod R x) = TensorAlgebra.tprod R M n x := PiTensorProduct.lift.tprod _ @[simp] theorem toTensorAlgebra_gOne : TensorPower.toTensorAlgebra (@GradedMonoid.GOne.one _ (fun n => ⨂[R]^n M) _ _) = 1 := by simp [GradedMonoid.GOne.one, TensorPower.toTensorAlgebra_tprod] @[simp] theorem toTensorAlgebra_gMul {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) : TensorPower.toTensorAlgebra (@GradedMonoid.GMul.mul _ (fun n => ⨂[R]^n M) _ _ _ _ a b) = TensorPower.toTensorAlgebra a * TensorPower.toTensorAlgebra b := by -- change `a` and `b` to `tprod R a` and `tprod R b` rw [TensorPower.gMul_eq_coe_linearMap, ← LinearMap.compr₂_apply, ← @LinearMap.mul_apply' R, ← LinearMap.compl₂_apply, ← LinearMap.comp_apply] refine LinearMap.congr_fun (LinearMap.congr_fun ?_ a) b clear! a b ext (a b) simp only [LinearMap.compMultilinearMap_apply, LinearMap.compr₂_apply, ← gMul_def, TensorProduct.mk_apply, LinearEquiv.coe_coe, tprod_mul_tprod, toTensorAlgebra_tprod, TensorAlgebra.tprod_apply, LinearMap.comp_apply, LinearMap.compl₂_apply] refine Eq.trans ?_ List.prod_append congr rw [List.ofFn_comp' _ (TensorAlgebra.ι R), List.ofFn_comp' _ (TensorAlgebra.ι R), List.ofFn_comp' _ (TensorAlgebra.ι R), ← List.map_append, List.ofFn_fin_append] @[simp] theorem toTensorAlgebra_galgebra_toFun (r : R) : TensorPower.toTensorAlgebra (DirectSum.GAlgebra.toFun (R := R) (A := fun n => ⨂[R]^n M) r) = algebraMap _ _ r := by rw [TensorPower.galgebra_toFun_def, TensorPower.algebraMap₀_eq_smul_one, LinearMap.map_smul, TensorPower.toTensorAlgebra_gOne, Algebra.algebraMap_eq_smul_one] end TensorPower namespace TensorAlgebra /-- The canonical map from a direct sum of tensor powers to the tensor algebra. -/ def ofDirectSum : (⨁ n, ⨂[R]^n M) →ₐ[R] TensorAlgebra R M := DirectSum.toAlgebra _ _ (fun _ => TensorPower.toTensorAlgebra) TensorPower.toTensorAlgebra_gOne (fun {_ _} => TensorPower.toTensorAlgebra_gMul) @[simp] theorem ofDirectSum_of_tprod {n} (x : Fin n → M) : ofDirectSum (DirectSum.of _ n (PiTensorProduct.tprod R x)) = tprod R M n x := (DirectSum.toAddMonoid_of (fun _ ↦ LinearMap.toAddMonoidHom TensorPower.toTensorAlgebra) _ _).trans (TensorPower.toTensorAlgebra_tprod _) /-- The canonical map from the tensor algebra to a direct sum of tensor powers. -/ def toDirectSum : TensorAlgebra R M →ₐ[R] ⨁ n, ⨂[R]^n M := TensorAlgebra.lift R <| DirectSum.lof R ℕ (fun n => ⨂[R]^n M) _ ∘ₗ (LinearEquiv.symm <| PiTensorProduct.subsingletonEquiv (0 : Fin 1) : M ≃ₗ[R] _).toLinearMap @[simp] theorem toDirectSum_ι (x : M) : toDirectSum (ι R x) = DirectSum.of (fun n => ⨂[R]^n M) _ (PiTensorProduct.tprod R fun _ : Fin 1 => x) := TensorAlgebra.lift_ι_apply _ _ theorem ofDirectSum_comp_toDirectSum : ofDirectSum.comp toDirectSum = AlgHom.id R (TensorAlgebra R M) := by ext simp [tprod_apply] @[simp] theorem ofDirectSum_toDirectSum (x : TensorAlgebra R M) : ofDirectSum (TensorAlgebra.toDirectSum x) = x := AlgHom.congr_fun ofDirectSum_comp_toDirectSum x @[simp] theorem mk_reindex_cast {n m : ℕ} (h : n = m) (x : ⨂[R]^n M) : GradedMonoid.mk (A := fun i => (⨂[R]^i) M) m (PiTensorProduct.reindex R (fun _ ↦ M) (Equiv.cast <| congr_arg Fin h) x) = GradedMonoid.mk n x := Eq.symm (PiTensorProduct.gradedMonoid_eq_of_reindex_cast h rfl) @[simp] theorem mk_reindex_fin_cast {n m : ℕ} (h : n = m) (x : ⨂[R]^n M) : GradedMonoid.mk (A := fun i => (⨂[R]^i) M) m (PiTensorProduct.reindex R (fun _ ↦ M) (finCongr h) x) = GradedMonoid.mk n x := by rw [finCongr_eq_equivCast, mk_reindex_cast h] /-- The product of tensor products made of a single vector is the same as a single product of all the vectors. -/ theorem _root_.TensorPower.list_prod_gradedMonoid_mk_single (n : ℕ) (x : Fin n → M) : ((List.finRange n).map fun a => (GradedMonoid.mk _ (PiTensorProduct.tprod R fun _ : Fin 1 => x a) : GradedMonoid fun n => ⨂[R]^n M)).prod = GradedMonoid.mk n (PiTensorProduct.tprod R x) := by refine Fin.consInduction ?_ ?_ x <;> clear x · rw [List.finRange_zero, List.map_nil, List.prod_nil] rfl · intro n x₀ x ih rw [List.finRange_succ, List.map_cons, List.prod_cons, List.map_map] simp_rw [Function.comp_def, Fin.cons_zero, Fin.cons_succ] rw [ih, GradedMonoid.mk_mul_mk, TensorPower.tprod_mul_tprod] refine TensorPower.gradedMonoid_eq_of_cast (add_comm _ _) ?_ dsimp only [GradedMonoid.mk] rw [TensorPower.cast_tprod] simp_rw [Fin.append_left_eq_cons, Function.comp_def] congr 1 with i theorem toDirectSum_tensorPower_tprod {n} (x : Fin n → M) : toDirectSum (tprod R M n x) = DirectSum.of _ n (PiTensorProduct.tprod R x) := by rw [tprod_apply, map_list_prod, List.map_ofFn] simp_rw [Function.comp_def, toDirectSum_ι] rw [DirectSum.list_prod_ofFn_of_eq_dProd] apply DirectSum.of_eq_of_gradedMonoid_eq rw [GradedMonoid.mk_list_dProd] rw [TensorPower.list_prod_gradedMonoid_mk_single] theorem toDirectSum_comp_ofDirectSum : toDirectSum.comp ofDirectSum = AlgHom.id R (⨁ n, ⨂[R]^n M) := by ext simp [DirectSum.lof_eq_of, -tprod_apply, toDirectSum_tensorPower_tprod] @[simp] theorem toDirectSum_ofDirectSum (x : ⨁ n, ⨂[R]^n M) : TensorAlgebra.toDirectSum (ofDirectSum x) = x := AlgHom.congr_fun toDirectSum_comp_ofDirectSum x /-- The tensor algebra is isomorphic to a direct sum of tensor powers. -/ @[simps!] def equivDirectSum : TensorAlgebra R M ≃ₐ[R] ⨁ n, ⨂[R]^n M := AlgEquiv.ofAlgHom toDirectSum ofDirectSum toDirectSum_comp_ofDirectSum ofDirectSum_comp_toDirectSum end TensorAlgebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/TensorAlgebra/Basic.lean
import Mathlib.Algebra.FreeAlgebra import Mathlib.Algebra.RingQuot import Mathlib.Algebra.TrivSqZeroExt import Mathlib.Algebra.Algebra.Operations import Mathlib.LinearAlgebra.Multilinear.Basic /-! # Tensor Algebras Given a commutative semiring `R`, and an `R`-module `M`, we construct the tensor algebra of `M`. This is the free `R`-algebra generated (`R`-linearly) by the module `M`. ## Notation 1. `TensorAlgebra R M` is the tensor algebra itself. It is endowed with an R-algebra structure. 2. `TensorAlgebra.ι R` is the canonical R-linear map `M → TensorAlgebra R M`. 3. Given a linear map `f : M → A` to an R-algebra `A`, `lift R f` is the lift of `f` to an `R`-algebra morphism `TensorAlgebra R M → A`. ## Theorems 1. `ι_comp_lift` states that the composition `(lift R f) ∘ (ι R)` is identical to `f`. 2. `lift_unique` states that whenever an R-algebra morphism `g : TensorAlgebra R M → A` is given whose composition with `ι R` is `f`, then one has `g = lift R f`. 3. `hom_ext` is a variant of `lift_unique` in the form of an extensionality theorem. 4. `lift_comp_ι` is a combination of `ι_comp_lift` and `lift_unique`. It states that the lift of the composition of an algebra morphism with `ι` is the algebra morphism itself. ## Implementation details As noted above, the tensor algebra of `M` is constructed as the free `R`-algebra generated by `M`, modulo the additional relations making the inclusion of `M` into an `R`-linear map. -/ variable (R : Type*) [CommSemiring R] variable (M : Type*) [AddCommMonoid M] [Module R M] namespace TensorAlgebra /-- An inductively defined relation on `Pre R M` used to force the initial algebra structure on the associated quotient. -/ inductive Rel : FreeAlgebra R M → FreeAlgebra R M → Prop -- force `ι` to be linear | add {a b : M} : Rel (FreeAlgebra.ι R (a + b)) (FreeAlgebra.ι R a + FreeAlgebra.ι R b) | smul {r : R} {a : M} : Rel (FreeAlgebra.ι R (r • a)) (algebraMap R (FreeAlgebra R M) r * FreeAlgebra.ι R a) end TensorAlgebra /-- The tensor algebra of the module `M` over the commutative semiring `R`. -/ def TensorAlgebra := RingQuot (TensorAlgebra.Rel R M) deriving Inhabited, Semiring -- `IsScalarTower` is not needed, but the instance isn't really canonical without it. @[nolint unusedArguments] instance instAlgebra {R A M} [CommSemiring R] [AddCommMonoid M] [CommSemiring A] [Algebra R A] [Module R M] [Module A M] [IsScalarTower R A M] : Algebra R (TensorAlgebra A M) := RingQuot.instAlgebra _ -- verify there is no diamond -- but doesn't work at `reducible_and_instances` https://github.com/leanprover-community/mathlib4/issues/10906 example : (Semiring.toNatAlgebra : Algebra ℕ (TensorAlgebra R M)) = instAlgebra := rfl instance {R S A M} [CommSemiring R] [CommSemiring S] [AddCommMonoid M] [CommSemiring A] [Algebra R A] [Algebra S A] [Module R M] [Module S M] [Module A M] [IsScalarTower R A M] [IsScalarTower S A M] : SMulCommClass R S (TensorAlgebra A M) := RingQuot.instSMulCommClass _ instance {R S A M} [CommSemiring R] [CommSemiring S] [AddCommMonoid M] [CommSemiring A] [SMul R S] [Algebra R A] [Algebra S A] [Module R M] [Module S M] [Module A M] [IsScalarTower R A M] [IsScalarTower S A M] [IsScalarTower R S A] : IsScalarTower R S (TensorAlgebra A M) := RingQuot.instIsScalarTower _ namespace TensorAlgebra instance {S : Type*} [CommRing S] [Module S M] : Ring (TensorAlgebra S M) := RingQuot.instRing (Rel S M) -- verify there is no diamond -- but doesn't work at `reducible_and_instances` https://github.com/leanprover-community/mathlib4/issues/10906 variable (S M : Type) [CommRing S] [AddCommGroup M] [Module S M] in example : (Ring.toIntAlgebra _ : Algebra ℤ (TensorAlgebra S M)) = instAlgebra := rfl variable {M} /-- The canonical linear map `M →ₗ[R] TensorAlgebra R M`. -/ irreducible_def ι : M →ₗ[R] TensorAlgebra R M := { toFun := fun m => RingQuot.mkAlgHom R _ (FreeAlgebra.ι R m) map_add' := fun x y => by rw [← map_add (RingQuot.mkAlgHom R (Rel R M))] exact RingQuot.mkAlgHom_rel R Rel.add map_smul' := fun r x => by rw [← map_smul (RingQuot.mkAlgHom R (Rel R M))] exact RingQuot.mkAlgHom_rel R Rel.smul } theorem ringQuot_mkAlgHom_freeAlgebra_ι_eq_ι (m : M) : RingQuot.mkAlgHom R (Rel R M) (FreeAlgebra.ι R m) = ι R m := by rw [ι] rfl /-- Given a linear map `f : M → A` where `A` is an `R`-algebra, `lift R f` is the unique lift of `f` to a morphism of `R`-algebras `TensorAlgebra R M → A`. -/ @[simps symm_apply] def lift {A : Type*} [Semiring A] [Algebra R A] : (M →ₗ[R] A) ≃ (TensorAlgebra R M →ₐ[R] A) := { toFun := RingQuot.liftAlgHom R ∘ fun f => ⟨FreeAlgebra.lift R (⇑f), fun x y (h : Rel R M x y) => by induction h <;> simp only [Algebra.smul_def, FreeAlgebra.lift_ι_apply, LinearMap.map_smulₛₗ, RingHom.id_apply, map_mul, AlgHom.commutes, map_add]⟩ invFun := fun F => F.toLinearMap.comp (ι R) left_inv := fun f => by rw [ι] ext1 x exact (RingQuot.liftAlgHom_mkAlgHom_apply _ _ _ _).trans (FreeAlgebra.lift_ι_apply f x) right_inv := fun F => RingQuot.ringQuot_ext' _ _ _ <| FreeAlgebra.hom_ext <| funext fun x => by rw [ι] exact (RingQuot.liftAlgHom_mkAlgHom_apply _ _ _ _).trans (FreeAlgebra.lift_ι_apply _ _) } variable {R} @[simp] theorem ι_comp_lift {A : Type*} [Semiring A] [Algebra R A] (f : M →ₗ[R] A) : (lift R f).toLinearMap.comp (ι R) = f := by convert (lift R).symm_apply_apply f @[simp] theorem lift_ι_apply {A : Type*} [Semiring A] [Algebra R A] (f : M →ₗ[R] A) (x) : lift R f (ι R x) = f x := by conv_rhs => rw [← ι_comp_lift f] rfl @[simp] theorem lift_unique {A : Type*} [Semiring A] [Algebra R A] (f : M →ₗ[R] A) (g : TensorAlgebra R M →ₐ[R] A) : g.toLinearMap.comp (ι R) = f ↔ g = lift R f := by rw [← (lift R).symm_apply_eq] simp only [lift, Equiv.coe_fn_symm_mk] -- Marking `TensorAlgebra` irreducible makes `Ring` instances inaccessible on quotients. -- https://leanprover.zulipchat.com/#narrow/stream/113488-general/topic/algebra.2Esemiring_to_ring.20breaks.20semimodule.20typeclass.20lookup/near/212580241 -- For now, we avoid this by not marking it irreducible. @[simp] theorem lift_comp_ι {A : Type*} [Semiring A] [Algebra R A] (g : TensorAlgebra R M →ₐ[R] A) : lift R (g.toLinearMap.comp (ι R)) = g := by rw [← lift_symm_apply] exact (lift R).apply_symm_apply g /-- See note [partially-applied ext lemmas]. -/ @[ext] theorem hom_ext {A : Type*} [Semiring A] [Algebra R A] {f g : TensorAlgebra R M →ₐ[R] A} (w : f.toLinearMap.comp (ι R) = g.toLinearMap.comp (ι R)) : f = g := by rw [← lift_symm_apply, ← lift_symm_apply] at w exact (lift R).symm.injective w -- This proof closely follows `FreeAlgebra.induction` /-- If `C` holds for the `algebraMap` of `r : R` into `TensorAlgebra R M`, the `ι` of `x : M`, and is preserved under addition and multiplication, then it holds for all of `TensorAlgebra R M`. -/ @[elab_as_elim] theorem induction {C : TensorAlgebra R M → Prop} (algebraMap : ∀ r, C (algebraMap R (TensorAlgebra R M) r)) (ι : ∀ x, C (ι R x)) (mul : ∀ a b, C a → C b → C (a * b)) (add : ∀ a b, C a → C b → C (a + b)) (a : TensorAlgebra R M) : C a := by -- the arguments are enough to construct a subalgebra, and a mapping into it from M let s : Subalgebra R (TensorAlgebra R M) := { carrier := C mul_mem' := @mul add_mem' := @add algebraMap_mem' := algebraMap } let of : M →ₗ[R] s := (TensorAlgebra.ι R).codRestrict (Subalgebra.toSubmodule s) ι -- the mapping through the subalgebra is the identity have of_id : AlgHom.id R (TensorAlgebra R M) = s.val.comp (lift R of) := by ext simp only [AlgHom.toLinearMap_id, LinearMap.id_comp, AlgHom.comp_toLinearMap, LinearMap.coe_comp, Function.comp_apply, AlgHom.toLinearMap_apply, lift_ι_apply, Subalgebra.coe_val] erw [LinearMap.codRestrict_apply] -- finding a proof is finding an element of the subalgebra rw [← AlgHom.id_apply (R := R) a, of_id] exact Subtype.prop (lift R of a) @[simp] theorem adjoin_range_ι : Algebra.adjoin R (Set.range (ι R (M := M))) = ⊤ := by refine top_unique fun x hx => ?_; clear hx induction x using induction with | algebraMap => exact algebraMap_mem _ _ | add x y hx hy => exact add_mem hx hy | mul x y hx hy => exact mul_mem hx hy | ι x => exact Algebra.subset_adjoin (Set.mem_range_self _) @[simp] theorem range_lift {A : Type*} [Semiring A] [Algebra R A] (f : M →ₗ[R] A) : (lift R f).range = Algebra.adjoin R (Set.range f) := by simp_rw [← Algebra.map_top, ← adjoin_range_ι, AlgHom.map_adjoin, ← Set.range_comp, Function.comp_def, lift_ι_apply] /-- The left-inverse of `algebraMap`. -/ def algebraMapInv : TensorAlgebra R M →ₐ[R] R := lift R (0 : M →ₗ[R] R) variable (M) theorem algebraMap_leftInverse : Function.LeftInverse algebraMapInv (algebraMap R <| TensorAlgebra R M) := fun x => by simp [algebraMapInv] @[simp] theorem algebraMap_inj (x y : R) : algebraMap R (TensorAlgebra R M) x = algebraMap R (TensorAlgebra R M) y ↔ x = y := (algebraMap_leftInverse M).injective.eq_iff @[simp] theorem algebraMap_eq_zero_iff (x : R) : algebraMap R (TensorAlgebra R M) x = 0 ↔ x = 0 := map_eq_zero_iff (algebraMap _ _) (algebraMap_leftInverse _).injective @[simp] theorem algebraMap_eq_one_iff (x : R) : algebraMap R (TensorAlgebra R M) x = 1 ↔ x = 1 := map_eq_one_iff (algebraMap _ _) (algebraMap_leftInverse _).injective /-- A `TensorAlgebra` over a nontrivial semiring is nontrivial. -/ instance [Nontrivial R] : Nontrivial (TensorAlgebra R M) := (algebraMap_leftInverse M).injective.nontrivial variable {M} /-- The canonical map from `TensorAlgebra R M` into `TrivSqZeroExt R M` that sends `TensorAlgebra.ι` to `TrivSqZeroExt.inr`. -/ def toTrivSqZeroExt [Module Rᵐᵒᵖ M] [IsCentralScalar R M] : TensorAlgebra R M →ₐ[R] TrivSqZeroExt R M := lift R (TrivSqZeroExt.inrHom R M) @[simp] theorem toTrivSqZeroExt_ι (x : M) [Module Rᵐᵒᵖ M] [IsCentralScalar R M] : toTrivSqZeroExt (ι R x) = TrivSqZeroExt.inr x := lift_ι_apply _ _ /-- The left-inverse of `ι`. As an implementation detail, we implement this using `TrivSqZeroExt` which has a suitable algebra structure. -/ def ιInv : TensorAlgebra R M →ₗ[R] M := by letI : Module Rᵐᵒᵖ M := Module.compHom _ ((RingHom.id R).fromOpposite mul_comm) haveI : IsCentralScalar R M := ⟨fun r m => rfl⟩ exact (TrivSqZeroExt.sndHom R M).comp toTrivSqZeroExt.toLinearMap theorem ι_leftInverse : Function.LeftInverse ιInv (ι R : M → TensorAlgebra R M) := fun x ↦ by simp [ιInv] variable (R) @[simp] theorem ι_inj (x y : M) : ι R x = ι R y ↔ x = y := ι_leftInverse.injective.eq_iff @[simp] theorem ι_eq_zero_iff (x : M) : ι R x = 0 ↔ x = 0 := by rw [← ι_inj R x 0, LinearMap.map_zero] variable {R} @[simp] theorem ι_eq_algebraMap_iff (x : M) (r : R) : ι R x = algebraMap R _ r ↔ x = 0 ∧ r = 0 := by refine ⟨fun h => ?_, ?_⟩ · letI : Module Rᵐᵒᵖ M := Module.compHom _ ((RingHom.id R).fromOpposite mul_comm) haveI : IsCentralScalar R M := ⟨fun r m => rfl⟩ have hf0 : toTrivSqZeroExt (ι R x) = (0, x) := lift_ι_apply _ _ rw [h, AlgHom.commutes] at hf0 have : r = 0 ∧ 0 = x := Prod.ext_iff.1 hf0 exact this.symm.imp_left Eq.symm · rintro ⟨rfl, rfl⟩ rw [LinearMap.map_zero, RingHom.map_zero] @[simp] theorem ι_ne_one [Nontrivial R] (x : M) : ι R x ≠ 1 := by rw [← (algebraMap R (TensorAlgebra R M)).map_one, Ne, ι_eq_algebraMap_iff] exact one_ne_zero ∘ And.right /-- The generators of the tensor algebra are disjoint from its scalars. -/ theorem ι_range_disjoint_one : Disjoint (LinearMap.range (ι R : M →ₗ[R] TensorAlgebra R M)) (1 : Submodule R (TensorAlgebra R M)) := by rw [Submodule.disjoint_def, Submodule.one_eq_range] rintro _ ⟨x, hx⟩ ⟨r, rfl⟩ rw [Algebra.linearMap_apply, ι_eq_algebraMap_iff] at hx rw [hx.2, map_zero] variable (R M) /-- Construct a product of `n` elements of the module within the tensor algebra. See also `PiTensorProduct.tprod`. -/ def tprod (n : ℕ) : MultilinearMap R (fun _ : Fin n => M) (TensorAlgebra R M) := (MultilinearMap.mkPiAlgebraFin R n (TensorAlgebra R M)).compLinearMap fun _ => ι R @[simp] theorem tprod_apply {n : ℕ} (x : Fin n → M) : tprod R M n x = (List.ofFn fun i => ι R (x i)).prod := rfl variable {R M} end TensorAlgebra namespace FreeAlgebra variable {R M} /-- The canonical image of the `FreeAlgebra` in the `TensorAlgebra`, which maps `FreeAlgebra.ι R x` to `TensorAlgebra.ι R x`. -/ def toTensor : FreeAlgebra R M →ₐ[R] TensorAlgebra R M := FreeAlgebra.lift R (TensorAlgebra.ι R) @[simp] theorem toTensor_ι (m : M) : FreeAlgebra.toTensor (FreeAlgebra.ι R m) = TensorAlgebra.ι R m := by simp [toTensor] end FreeAlgebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/TensorAlgebra/Grading.lean
import Mathlib.LinearAlgebra.TensorAlgebra.Basic import Mathlib.RingTheory.GradedAlgebra.Basic /-! # Results about the grading structure of the tensor algebra The main result is `TensorAlgebra.gradedAlgebra`, which says that the tensor algebra is a ℕ-graded algebra. -/ namespace TensorAlgebra variable {R M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M] open scoped DirectSum variable (R M) /-- A version of `TensorAlgebra.ι` that maps directly into the graded structure. This is primarily an auxiliary construction used to provide `TensorAlgebra.gradedAlgebra`. -/ nonrec def GradedAlgebra.ι : M →ₗ[R] ⨁ i : ℕ, ↥(LinearMap.range (ι R : M →ₗ[_] _) ^ i) := DirectSum.lof R ℕ (fun i => ↥(LinearMap.range (ι R : M →ₗ[_] _) ^ i)) 1 ∘ₗ (ι R).codRestrict _ fun m => by simpa only [pow_one] using LinearMap.mem_range_self _ m theorem GradedAlgebra.ι_apply (m : M) : GradedAlgebra.ι R M m = DirectSum.of (fun (i : ℕ) => ↥(LinearMap.range (TensorAlgebra.ι R : M →ₗ[_] _) ^ i)) 1 ⟨TensorAlgebra.ι R m, by simpa only [pow_one] using LinearMap.mem_range_self _ m⟩ := rfl variable {R M} /-- The tensor algebra is graded by the powers of the submodule `(TensorAlgebra.ι R).range`. -/ instance gradedAlgebra : GradedAlgebra ((LinearMap.range (ι R : M →ₗ[R] TensorAlgebra R M) ^ ·) : ℕ → Submodule R _) := GradedAlgebra.ofAlgHom _ (lift R <| GradedAlgebra.ι R M) (by ext m dsimp only [LinearMap.comp_apply, AlgHom.toLinearMap_apply, AlgHom.comp_apply, AlgHom.id_apply] rw [lift_ι_apply, GradedAlgebra.ι_apply R M, DirectSum.coeAlgHom_of, Subtype.coe_mk]) fun i x => by obtain ⟨x, hx⟩ := x dsimp only [Subtype.coe_mk, DirectSum.lof_eq_of] induction hx using Submodule.pow_induction_on_left' with | algebraMap r => rw [AlgHom.commutes, DirectSum.algebraMap_apply]; rfl | add x y i hx hy ihx ihy => rw [map_add, ihx, ihy, ← AddMonoidHom.map_add] rfl | mem_mul m hm i x hx ih => obtain ⟨_, rfl⟩ := hm rw [map_mul, ih, lift_ι_apply, GradedAlgebra.ι_apply R M, DirectSum.of_mul_of] exact DirectSum.of_eq_of_gradedMonoid_eq (Sigma.subtype_ext (add_comm _ _) rfl) end TensorAlgebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/TensorAlgebra/Basis.lean
import Mathlib.LinearAlgebra.TensorAlgebra.Basic import Mathlib.LinearAlgebra.FreeAlgebra /-! # A basis for `TensorAlgebra R M` ## Main definitions * `TensorAlgebra.equivMonoidAlgebra b : TensorAlgebra R M ≃ₐ[R] FreeAlgebra R κ`: the isomorphism given by a basis `b : Basis κ R M`. * `Basis.tensorAlgebra b : Basis (FreeMonoid κ) R (TensorAlgebra R M)`: the basis on the tensor algebra given by a basis `b : Basis κ R M`. ## Main results * `TensorAlgebra.instFreeModule`: the tensor algebra over `M` is free when `M` is * `TensorAlgebra.rank_eq` -/ open Module namespace TensorAlgebra universe uκ uR uM variable {κ : Type uκ} {R : Type uR} {M : Type uM} section CommSemiring variable [CommSemiring R] [AddCommMonoid M] [Module R M] /-- A basis provides an algebra isomorphism with the free algebra, replacing each basis vector with its index. -/ noncomputable def equivFreeAlgebra (b : Basis κ R M) : TensorAlgebra R M ≃ₐ[R] FreeAlgebra R κ := AlgEquiv.ofAlgHom (TensorAlgebra.lift _ (Finsupp.linearCombination _ (FreeAlgebra.ι _) ∘ₗ b.repr.toLinearMap)) (FreeAlgebra.lift _ (ι R ∘ b)) (by ext; simp) (hom_ext <| b.ext fun i => by simp) @[simp] lemma equivFreeAlgebra_ι_apply (b : Basis κ R M) (i : κ) : equivFreeAlgebra b (ι R (b i)) = FreeAlgebra.ι R i := (TensorAlgebra.lift_ι_apply _ _).trans <| by simp @[simp] lemma equivFreeAlgebra_symm_ι (b : Basis κ R M) (i : κ) : (equivFreeAlgebra b).symm (FreeAlgebra.ι R i) = ι R (b i) := (equivFreeAlgebra b).toEquiv.symm_apply_eq.mpr <| equivFreeAlgebra_ι_apply b i |>.symm /-- A basis on `M` can be lifted to a basis on `TensorAlgebra R M` -/ @[simps! repr_apply] noncomputable def _root_.Module.Basis.tensorAlgebra (b : Basis κ R M) : Basis (FreeMonoid κ) R (TensorAlgebra R M) := (FreeAlgebra.basisFreeMonoid R κ).map <| (equivFreeAlgebra b).symm.toLinearEquiv /-- `TensorAlgebra R M` is free when `M` is. -/ instance instModuleFree [Module.Free R M] : Module.Free R (TensorAlgebra R M) := let ⟨⟨_κ, b⟩⟩ := Module.Free.exists_basis (R := R) (M := M) .of_basis b.tensorAlgebra /-- The `TensorAlgebra` of a free module over a commutative semiring with no zero-divisors has no zero-divisors. -/ instance instNoZeroDivisors [NoZeroDivisors R] [Module.Free R M] : NoZeroDivisors (TensorAlgebra R M) := have ⟨⟨_, b⟩⟩ := ‹Module.Free R M› (equivFreeAlgebra b).toMulEquiv.noZeroDivisors end CommSemiring section CommRing variable [CommRing R] [AddCommGroup M] [Module R M] /-- The `TensorAlgebra` of a free module over an integral domain is a domain. -/ instance instIsDomain [IsDomain R] [Module.Free R M] : IsDomain (TensorAlgebra R M) := NoZeroDivisors.to_isDomain _ attribute [pp_with_univ] Cardinal.lift open Cardinal in lemma rank_eq [Nontrivial R] [Module.Free R M] : Module.rank R (TensorAlgebra R M) = Cardinal.lift.{uR} (sum fun n ↦ Module.rank R M ^ n) := by let ⟨⟨κ, b⟩⟩ := Module.Free.exists_basis (R := R) (M := M) rw [(equivFreeAlgebra b).toLinearEquiv.rank_eq, FreeAlgebra.rank_eq, mk_list_eq_sum_pow, Basis.mk_eq_rank'' b] end CommRing end TensorAlgebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/PerfectPairing/Matrix.lean
import Mathlib.LinearAlgebra.PerfectPairing.Basic import Mathlib.LinearAlgebra.Matrix.Dual import Mathlib.LinearAlgebra.Matrix.ToLinearEquiv /-! # Perfect pairings and matrices The file contains results connecting perfect pairings and matrices. ## Main definitions * `Matrix.toPerfectPairing`: regard an invertible matrix as a perfect pairing. -/ namespace Matrix variable {R n : Type*} [CommRing R] [Fintype n] [DecidableEq n] (A : Matrix n n R) (h : Invertible A) set_option linter.deprecated false in /-- We may regard an invertible matrix as a perfect pairing. -/ @[deprecated "No replacement" (since := "2025-08-16")] def toPerfectPairing : PerfectPairing R (n → R) (n → R) := ((A.toLinearEquiv' h).trans (dotProductEquiv R n)).toPerfectPairing set_option linter.deprecated false in @[deprecated "No replacement" (since := "2025-08-16")] lemma toPerfectPairing_apply_apply (v w : n → R) : A.toPerfectPairing h v w = A *ᵥ v ⬝ᵥ w := rfl end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/PerfectPairing/Restrict.lean
import Mathlib.LinearAlgebra.PerfectPairing.Basic import Mathlib.LinearAlgebra.Matrix.Basis import Mathlib.LinearAlgebra.Matrix.BaseChange /-! # Restriction to submodules and restriction of scalars for perfect pairings. We provide API for restricting perfect pairings to submodules and for restricting their scalars. ## Main definitions * `PerfectPairing.restrict`: restriction of a perfect pairing to submodules. * `PerfectPairing.restrictScalars`: restriction of scalars for a perfect pairing taking values in a subring. * `PerfectPairing.restrictScalarsField`: simultaneously restrict both the domains and scalars of a perfect pairing with coefficients in a field. -/ open Function Module Set open Submodule (span subset_span) noncomputable section namespace LinearMap section CommRing variable {R M N : Type*} [CommRing R] [AddCommGroup M] [Module R M] [AddCommGroup N] [Module R N] (p : M →ₗ[R] N →ₗ[R] R) [p.IsPerfPair] section Restrict variable {M' N' : Type*} [AddCommGroup M'] [Module R M'] [AddCommGroup N'] [Module R N'] (i : M' →ₗ[R] M) (j : N' →ₗ[R] N) (hi : Injective i) (hj : Injective j) (hij : p.IsPerfectCompl (LinearMap.range i) (LinearMap.range j)) include hi hj hij private lemma restrict_aux : Bijective (p.compl₁₂ i j) := by refine ⟨LinearMap.ker_eq_bot.mp <| eq_bot_iff.mpr fun m hm ↦ ?_, fun f ↦ ?_⟩ · replace hm : i m ∈ (LinearMap.range j).dualAnnihilator.map p.toPerfPair.symm := by simp only [Submodule.mem_map, Submodule.mem_dualAnnihilator] refine ⟨p.toPerfPair (i m), ?_, LinearEquiv.symm_apply_apply _ _⟩ rintro - ⟨n, rfl⟩ simpa using LinearMap.congr_fun hm n suffices i m ∈ (⊥ : Submodule R M) by simpa [hi] using this simpa only [← hij.isCompl_left.inf_eq_bot, Submodule.mem_inf] using ⟨LinearMap.mem_range_self i m, hm⟩ · set F : Module.Dual R N := f ∘ₗ j.linearProjOfIsCompl _ hj hij.isCompl_right with hF have hF (n : N') : F (j n) = f n := by simp [hF] set m : M := p.toPerfPair.symm F with hm obtain ⟨-, y, ⟨m₀, rfl⟩, hy, hm'⟩ := Submodule.codisjoint_iff_exists_add_eq.mp hij.isCompl_left.codisjoint m refine ⟨m₀, LinearMap.ext fun n ↦ ?_⟩ replace hy : (p y) (j n) = 0 := by simp only [Submodule.mem_map, Submodule.mem_dualAnnihilator] at hy obtain ⟨g, hg, rfl⟩ := hy simpa using hg _ (LinearMap.mem_range_self j n) rw [hm, ← LinearEquiv.symm_apply_eq, map_add, LinearEquiv.symm_symm] at hm' simpa [← hF, ← LinearMap.congr_fun hm' (j n)] /-- The restriction of a perfect pairing to submodules is a perfect pairing. -/ lemma IsPerfPair.restrict : (p.compl₁₂ i j).IsPerfPair where bijective_left := p.restrict_aux i j hi hj hij bijective_right := p.flip.restrict_aux j i hj hi hij.flip set_option linter.deprecated false in /-- The restriction of a perfect pairing to submodules (expressed as injections to provide definitional control). -/ @[deprecated IsPerfPair.restrict (since := "2025-05-28")] def _root_.PerfectPairing.restrict : PerfectPairing R M' N' where toLinearMap := p.compl₁₂ i j bijective_left := p.restrict_aux i j hi hj hij bijective_right := p.flip.restrict_aux j i hj hi hij.flip end Restrict section RestrictScalars variable {S M' N' : Type*} [CommRing S] [Algebra S R] [Module S M] [Module S N] [IsScalarTower S R M] [IsScalarTower S R N] [NoZeroSMulDivisors S R] [Nontrivial R] [AddCommGroup M'] [Module S M'] [AddCommGroup N'] [Module S N'] (i : M' →ₗ[S] M) (j : N' →ₗ[S] N) private lemma restrictScalars_injective_aux (hi : Injective i) (hN : span R (LinearMap.range j : Set N) = ⊤) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap S R).range) : Injective ((LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap S R) (FaithfulSMul.algebraMap_injective S R) p hp)) := by let f := LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap S R) (FaithfulSMul.algebraMap_injective S R) p hp rw [← LinearMap.ker_eq_bot] refine (Submodule.eq_bot_iff _).mpr fun x (hx : f x = 0) ↦ ?_ replace hx (n : N) : p (i x) n = 0 := by have hn : n ∈ span R (LinearMap.range j : Set N) := hN ▸ Submodule.mem_top induction hn using Submodule.span_induction with | mem z hz => obtain ⟨n', rfl⟩ := hz simpa [f] using LinearMap.congr_fun hx n' | zero => simp | add => rw [map_add]; aesop | smul => rw [map_smul]; aesop rw [← i.map_eq_zero_iff hi, ← p.map_eq_zero_iff p.toPerfPair.injective] ext n simpa using hx n private lemma restrictScalars_surjective_aux (h : ∀ g : Module.Dual S N', ∃ m, (p.toPerfPair (i m)).restrictScalars S ∘ₗ j = Algebra.linearMap S R ∘ₗ g) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap S R).range) : Surjective ((LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap S R) (FaithfulSMul.algebraMap_injective S R) p hp)) := by rw [← LinearMap.range_eq_top] refine Submodule.eq_top_iff'.mpr fun g : Module.Dual S N' ↦ ?_ obtain ⟨m, hm⟩ := h g refine ⟨m, ?_⟩ ext n apply FaithfulSMul.algebraMap_injective S R change Algebra.linearMap S R _ = _ simpa using LinearMap.congr_fun hm n /-- Restricting a perfect pairing to a subring of the scalars results in a perfect pairing. -/ lemma IsPerfPair.restrictScalars (hi : Injective i) (hj : Injective j) (hM : span R (LinearMap.range i : Set M) = ⊤) (hN : span R (LinearMap.range j : Set N) = ⊤) (h₁ : ∀ g : Module.Dual S N', ∃ m, (p.toPerfPair (i m)).restrictScalars S ∘ₗ j = Algebra.linearMap S R ∘ₗ g) (h₂ : ∀ g : Module.Dual S M', ∃ n, (p.flip.toPerfPair (j n)).restrictScalars S ∘ₗ i = Algebra.linearMap S R ∘ₗ g) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap S R).range) : (LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap S R) (FaithfulSMul.algebraMap_injective S R) p hp).IsPerfPair where bijective_left := ⟨p.restrictScalars_injective_aux i j hi hN hp, p.restrictScalars_surjective_aux i j h₁ hp⟩ bijective_right := ⟨p.flip.restrictScalars_injective_aux j i hj hM fun m n ↦ hp n m, p.flip.restrictScalars_surjective_aux j i h₂ fun m n ↦ hp n m⟩ set_option linter.deprecated false in /-- Restriction of scalars for a perfect pairing taking values in a subring. -/ @[deprecated IsPerfPair.restrictScalars (since := "2025-05-28")] def _root_.PerfectPairing.restrictScalars (hi : Injective i) (hj : Injective j) (hM : span R (LinearMap.range i : Set M) = ⊤) (hN : span R (LinearMap.range j : Set N) = ⊤) (h₁ : ∀ g : Module.Dual S N', ∃ m, (p.toPerfPair (i m)).restrictScalars S ∘ₗ j = Algebra.linearMap S R ∘ₗ g) (h₂ : ∀ g : Module.Dual S M', ∃ n, (p.flip.toPerfPair (j n)).restrictScalars S ∘ₗ i = Algebra.linearMap S R ∘ₗ g) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap S R).range) : PerfectPairing S M' N' := { toLinearMap := LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap S R) (FaithfulSMul.algebraMap_injective S R) p hp bijective_left := ⟨p.restrictScalars_injective_aux i j hi hN hp, p.restrictScalars_surjective_aux i j h₁ hp⟩ bijective_right := ⟨p.flip.restrictScalars_injective_aux j i hj hM (fun m n ↦ hp n m), p.flip.restrictScalars_surjective_aux j i h₂ (fun m n ↦ hp n m)⟩} end RestrictScalars end CommRing section Field variable {K L M N : Type*} [Field K] [Field L] [Algebra K L] [AddCommGroup M] [AddCommGroup N] [Module L M] [Module L N] [Module K M] [Module K N] [IsScalarTower K L M] (p : M →ₗ[L] N →ₗ[L] L) [p.IsPerfPair] /-- If a perfect pairing over a field `L` takes values in a subfield `K` along two `K`-subspaces whose `L` span is full, then these subspaces induce a `K`-structure in the sense of [*Algebra I*, Bourbaki : Chapter II, §8.1 Definition 1][bourbaki1989]. -/ lemma exists_basis_basis_of_span_eq_top_of_mem_algebraMap (M' : Submodule K M) (N' : Submodule K N) (hM : span L (M' : Set M) = ⊤) (hN : span L (N' : Set N) = ⊤) (hp : ∀ᵉ (x ∈ M') (y ∈ N'), p x y ∈ (algebraMap K L).range) : ∃ (n : ℕ) (b : Basis (Fin n) L M) (b' : Basis (Fin n) K M'), ∀ i, b i = b' i := by classical have : IsReflexive L M := .of_isPerfPair p have : IsReflexive L N := .of_isPerfPair p.flip obtain ⟨v, hv₁, hv₂, hv₃⟩ := exists_linearIndependent L (M' : Set M) rw [hM] at hv₂ let b : Basis _ L M := Basis.mk hv₃ <| by rw [← hv₂, Subtype.range_coe_subtype, Set.setOf_mem_eq] have : Fintype v := Set.Finite.fintype <| Module.Finite.finite_basis b set v' : v → M' := fun i ↦ ⟨i, hv₁ (Subtype.coe_prop i)⟩ have hv' : LinearIndependent K v' := by replace hv₃ := hv₃.restrict_scalars (R := K) <| by simp_rw [← Algebra.algebraMap_eq_smul_one] exact FaithfulSMul.algebraMap_injective K L rw [show ((↑) : v → M) = M'.subtype ∘ v' by ext; simp [v']] at hv₃ exact hv₃.of_comp suffices span K (Set.range v') = ⊤ by let e := (Module.Finite.finite_basis b).equivFin let b' : Basis _ K M' := Basis.mk hv' (by rw [this]) exact ⟨_, b.reindex e, b'.reindex e, fun i ↦ by simp [b, b', v']⟩ suffices span K v = M' by apply Submodule.map_injective_of_injective M'.injective_subtype rw [Submodule.map_span, ← Set.image_univ, Set.image_image] simpa [v'] refine le_antisymm (Submodule.span_le.mpr hv₁) fun m hm ↦ ?_ obtain ⟨w, hw₁, hw₂, hw₃⟩ := exists_linearIndependent L (N' : Set N) rw [hN] at hw₂ let bN : Basis _ L N := Basis.mk hw₃ <| by rw [← hw₂, Subtype.range_coe_subtype, Set.setOf_mem_eq] have : Fintype w := Set.Finite.fintype <| Module.Finite.finite_basis bN have e : v ≃ w := Fintype.equivOfCardEq <| by rw [← Module.finrank_eq_card_basis b, ← Module.finrank_eq_card_basis bN, Module.finrank_of_isPerfPair p] let bM := bN.dualBasis.map p.toPerfPair.symm have hbM (j : w) (x : M) (hx : x ∈ M') : bM.repr x j = p x (j : N) := by simp [bM, bN] have hj (j : w) : bM.repr m j ∈ (algebraMap K L).range := (hbM _ _ hm) ▸ hp m hm j (hw₁ j.2) replace hp (i : w) (j : v) : (bN.dualBasis.map p.toPerfPair.symm).toMatrix b i j ∈ (algebraMap K L).fieldRange := by simp only [Basis.toMatrix, Basis.map_repr, LinearEquiv.symm_symm, LinearEquiv.trans_apply, Basis.dualBasis_repr] exact hp (b j) (by simpa [b] using hv₁ j.2) (bN i) (by simpa [bN] using hw₁ i.2) have hA (i j) : b.toMatrix bM i j ∈ (algebraMap K L).range := Matrix.mem_subfield_of_mul_eq_one_of_mem_subfield_left e _ (by simp [bM]) hp i j have h_span : span K v = span K (Set.range b) := by simp [b] rw [h_span, Basis.mem_span_iff_repr_mem, ← Basis.toMatrix_mulVec_repr bM b m] exact fun i ↦ Subring.sum_mem _ fun j _ ↦ Subring.mul_mem _ (hA i j) (hj j) variable {M' N' : Type*} [AddCommGroup M'] [AddCommGroup N'] [Module K M'] [Module K N'] [IsScalarTower K L N] (i : M' →ₗ[K] M) (j : N' →ₗ[K] N) (hi : Injective i) (hj : Injective j) include hi hj in /-- An auxiliary definition used only to simplify the construction of the more general definition `PerfectPairing.restrictScalarsField`. -/ private lemma restrictScalars_field_aux (hM : span L (LinearMap.range i : Set M) = ⊤) (hN : span L (LinearMap.range j : Set N) = ⊤) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap K L).range) : (LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap K L) (FaithfulSMul.algebraMap_injective K L) p hp).IsPerfPair := by suffices FiniteDimensional K M' from .of_injective (p.restrictScalars_injective_aux i j hi hN hp) (p.flip.restrictScalars_injective_aux j i hj hM (fun m n ↦ hp n m)) obtain ⟨n, -, b', -⟩ := p.exists_basis_basis_of_span_eq_top_of_mem_algebraMap _ _ hM hN <| by rintro - ⟨m, rfl⟩ - ⟨n, rfl⟩ exact hp m n have : FiniteDimensional K (LinearMap.range i) := b'.finiteDimensional_of_finite exact Finite.equiv (LinearEquiv.ofInjective i hi).symm include hi hj in /-- Simultaneously restrict both the domains and scalars of a perfect pairing with coefficients in a field. -/ lemma IsPerfPair.restrictScalars_of_field (hij : p.IsPerfectCompl (span L <| LinearMap.range i) (span L <| LinearMap.range j)) (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap K L).range) : (LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap K L) (FaithfulSMul.algebraMap_injective K L) p hp).IsPerfPair := by have : (p.compl₁₂ (span L <| .range i).subtype (span L <| .range j).subtype).IsPerfPair := .restrict _ _ _ (by simp) (by simp) (by simpa) exact restrictScalars_field_aux (p.compl₁₂ (span L <| .range i).subtype (span L <| .range j).subtype) ((LinearMap.range i).inclusionSpan L ∘ₗ i.rangeRestrict) ((LinearMap.range j).inclusionSpan L ∘ₗ j.rangeRestrict) (((LinearMap.range i).injective_inclusionSpan L).comp (by simpa)) (((LinearMap.range j).injective_inclusionSpan L).comp (by simpa)) (by rw [LinearMap.range_comp_of_range_eq_top _ (LinearMap.range_rangeRestrict _)] exact (LinearMap.range i).span_range_inclusionSpan L) (by rw [LinearMap.range_comp_of_range_eq_top _ (LinearMap.range_rangeRestrict _)] exact (LinearMap.range j).span_range_inclusionSpan L) fun x y ↦ LinearMap.BilinMap.apply_apply_mem_of_mem_span (LinearMap.range <| Algebra.linearMap K L) (range i) (range j) ((LinearMap.restrictScalarsₗ K L _ _ _).comp (p.restrictScalars K)) (by simpa) (i x) (j y) (subset_span <| by simp) (subset_span <| by simp) omit [p.IsPerfPair] in @[simp] lemma restrictScalarsField_apply_apply (hp : ∀ m n, p (i m) (j n) ∈ (algebraMap K L).range) (x : M') (y : N') : algebraMap K L (LinearMap.restrictScalarsRange₂ i j (Algebra.linearMap K L) (FaithfulSMul.algebraMap_injective K L) p hp x y) = p (i x) (j y) := LinearMap.restrictScalarsRange₂_apply i j (Algebra.linearMap K L) (FaithfulSMul.algebraMap_injective K L) p hp x y end Field end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/PerfectPairing/Basic.lean
import Mathlib.LinearAlgebra.Dual.Lemmas /-! # Perfect pairings This file defines perfect pairings of modules. A perfect pairing of two (left) modules may be defined either as: 1. A bilinear map `M × N → R` such that the induced maps `M → Dual R N` and `N → Dual R M` are both bijective. It follows from this that both `M` and `N` are reflexive modules. 2. A linear equivalence `N ≃ Dual R M` for which `M` is reflexive. (It then follows that `N` is reflexive.) In this file we provide a definition `IsPerfPair` corresponding to 1 above, together with logic to connect 1 and 2. -/ open Function Module namespace LinearMap variable {R K M M' N N' : Type*} [AddCommGroup M] [AddCommGroup N] [AddCommGroup M'] [AddCommGroup N'] section CommRing variable [CommRing R] [Module R M] [Module R M'] [Module R N] [Module R N'] {p : M →ₗ[R] N →ₗ[R] R} {x : M} {y : N} /-- For a ring `R` and two modules `M` and `N`, a perfect pairing is a bilinear map `M × N → R` that is bijective in both arguments. -/ @[ext] class IsPerfPair (p : M →ₗ[R] N →ₗ[R] R) where bijective_left (p) : Bijective p bijective_right (p) : Bijective p.flip /-- Given a perfect pairing between `M`and `N`, we may interchange the roles of `M` and `N`. -/ protected lemma IsPerfPair.flip (hp : p.IsPerfPair) : p.flip.IsPerfPair where bijective_left := IsPerfPair.bijective_right p bijective_right := IsPerfPair.bijective_left p variable [p.IsPerfPair] /-- Given a perfect pairing between `M`and `N`, we may interchange the roles of `M` and `N`. -/ instance flip.instIsPerfPair : p.flip.IsPerfPair := .flip ‹_› variable (p) /-- Turn a perfect pairing between `M` and `N` into an isomorphism between `M` and the dual of `N`. -/ noncomputable def toPerfPair : M ≃ₗ[R] Dual R N := .ofBijective { toFun := _, map_add' x y := by simp, map_smul' r x := by simp } <| IsPerfPair.bijective_left p @[simp] lemma toLinearMap_toPerfPair (x : M) : p.toPerfPair x = p x := rfl @[simp] lemma toPerfPair_apply (x : M) (y : N) : p.toPerfPair x y = p x y := rfl @[simp] lemma apply_symm_toPerfPair_self (f : Dual R N) : p (p.toPerfPair.symm f) = f := p.toPerfPair.apply_symm_apply f @[simp] lemma apply_toPerfPair_flip (f : Dual R M) (x : M) : p x (p.flip.toPerfPair.symm f) = f x := congr($(p.flip.apply_symm_toPerfPair_self ..) x) include p in lemma _root_.Module.IsReflexive.of_isPerfPair : IsReflexive R M where bijective_dual_eval' := by convert (p.toPerfPair.trans p.flip.toPerfPair.dualMap.symm).bijective ext x f simp include p in lemma _root_.Module.finrank_of_isPerfPair [Module.Finite R M] [Module.Free R M] : finrank R M = finrank R N := ((Module.Free.chooseBasis R M).toDualEquiv.trans p.flip.toPerfPair.symm).finrank_eq /-- A reflexive module has a perfect pairing with its dual. -/ protected instance IsPerfPair.id [IsReflexive R M] : IsPerfPair (.id (R := R) (M := Dual R M)) where bijective_left := bijective_id bijective_right := bijective_dual_eval R M /-- A reflexive module has a perfect pairing with its dual. -/ instance IsPerfPair.dualEval [IsReflexive R M] : IsPerfPair (Dual.eval R M) := .flip .id instance IsPerfPair.compl₁₂ (eM : M' ≃ₗ[R] M) (eN : N' ≃ₗ[R] N) : (p.compl₁₂ eM eN : M' →ₗ[R] N' →ₗ[R] R).IsPerfPair := ⟨((LinearEquiv.congrLeft R R eN).symm.bijective.comp (IsPerfPair.bijective_left p)).comp eM.bijective, ((LinearEquiv.congrLeft R R eM).symm.bijective.comp (IsPerfPair.bijective_right p)).comp eN.bijective⟩ lemma IsPerfPair.congr (eM : M' ≃ₗ[R] M) (eN : N' ≃ₗ[R] N) (q : M' →ₗ[R] N' →ₗ[R] R) (H : q.compl₁₂ eM.symm eN.symm = p) : q.IsPerfPair := by obtain rfl : q = p.compl₁₂ eM eN := by subst H; ext; simp infer_instance lemma IsPerfPair.of_bijective (p : M →ₗ[R] N →ₗ[R] R) [IsReflexive R N] (h : Bijective p) : IsPerfPair p := inferInstanceAs ((LinearMap.id (R := R) (M := Dual R N)).compl₁₂ (LinearEquiv.ofBijective p h : M →ₗ[R] N →ₗ[R] R) (LinearEquiv.refl R N : N →ₗ[R] N)).IsPerfPair end CommRing section Field variable [Field K] [Module K M] [Module K N] {p : M →ₗ[K] N →ₗ[K] K} {x : M} {y : N} /-- If the coefficients are a field, and one of the spaces is finite-dimensional, it is sufficient to check only injectivity instead of bijectivity of the bilinear pairing. -/ lemma IsPerfPair.of_injective [FiniteDimensional K M] (h : Injective p) (h' : Injective p.flip) : p.IsPerfPair where bijective_left := ⟨h, by rwa [← p.flip_injective_iff₁]⟩ bijective_right := ⟨h', by have : FiniteDimensional K N := FiniteDimensional.of_injective p.flip h' rwa [← p.flip.flip_injective_iff₁, LinearMap.flip_flip]⟩ /-- If the coefficients are a field, and one of the spaces is finite-dimensional, it is sufficient to check only injectivity instead of bijectivity of the bilinear pairing. -/ lemma IsPerfPair.of_injective' [FiniteDimensional K N] (h : Injective p) (h' : Injective p.flip) : p.IsPerfPair := .flip <| .of_injective h' h end Field end LinearMap noncomputable section variable (R M N : Type*) [CommRing R] [AddCommGroup M] [Module R M] [AddCommGroup N] [Module R N] /-- A perfect pairing of two (left) modules over a commutative ring. -/ @[deprecated LinearMap.IsPerfPair (since := "2025-05-27")] structure PerfectPairing extends M →ₗ[R] N →ₗ[R] R where bijective_left : Bijective toLinearMap bijective_right : Bijective toLinearMap.flip /-- The underlying bilinear map of a perfect pairing. -/ add_decl_doc PerfectPairing.toLinearMap variable {R M N} namespace PerfectPairing set_option linter.deprecated false in /-- If the coefficients are a field, and one of the spaces is finite-dimensional, it is sufficient to check only injectivity instead of bijectivity of the bilinear form. -/ @[deprecated LinearMap.IsPerfPair.of_injective (since := "2025-05-27")] def mkOfInjective {K V W : Type*} [Field K] [AddCommGroup V] [Module K V] [AddCommGroup W] [Module K W] [FiniteDimensional K V] (B : V →ₗ[K] W →ₗ[K] K) (h : Injective B) (h' : Injective B.flip) : PerfectPairing K V W where toLinearMap := B bijective_left := ⟨h, by rwa [← B.flip_injective_iff₁]⟩ bijective_right := ⟨h', by have : FiniteDimensional K W := FiniteDimensional.of_injective B.flip h' rwa [← B.flip.flip_injective_iff₁, LinearMap.flip_flip]⟩ set_option linter.deprecated false in /-- If the coefficients are a field, and one of the spaces is finite-dimensional, it is sufficient to check only injectivity instead of bijectivity of the bilinear form. -/ @[deprecated LinearMap.IsPerfPair.of_injective' (since := "2025-05-27")] def mkOfInjective' {K V W : Type*} [Field K] [AddCommGroup V] [Module K V] [AddCommGroup W] [Module K W] [FiniteDimensional K W] (B : V →ₗ[K] W →ₗ[K] K) (h : Injective B) (h' : Injective B.flip) : PerfectPairing K V W where toLinearMap := B bijective_left := ⟨h, by have : FiniteDimensional K V := FiniteDimensional.of_injective B h rwa [← B.flip_injective_iff₁]⟩ bijective_right := ⟨h', by rwa [← B.flip.flip_injective_iff₁, LinearMap.flip_flip]⟩ set_option linter.deprecated false in @[deprecated "No replacement" (since := "2025-05-27")] instance instFunLike : FunLike (PerfectPairing R M N) M (N →ₗ[R] R) where coe f := f.toLinearMap coe_injective' x y h := by cases x; cases y; simpa using h set_option linter.deprecated false in @[deprecated "No replacement" (since := "2025-05-27")] lemma toLinearMap_apply (p : PerfectPairing R M N) (x : M) : p.toLinearMap x = p x := rfl set_option linter.deprecated false in @[deprecated "No replacement" (since := "2025-05-27")] lemma mk_apply_apply {f : M →ₗ[R] N →ₗ[R] R} {hl} {hr} {x : M} : (⟨f, hl, hr⟩ : PerfectPairing R M N) x = f x := rfl set_option linter.deprecated false variable (p : PerfectPairing R M N) /-- Given a perfect pairing between `M` and `N`, we may interchange the roles of `M` and `N`. -/ @[deprecated LinearMap.IsPerfPair.flip (since := "2025-05-27")] protected def flip : PerfectPairing R N M where toLinearMap := p.toLinearMap.flip bijective_left := p.bijective_right bijective_right := p.bijective_left @[deprecated "No replacement" (since := "2025-05-27")] lemma flip_apply_apply {x : M} {y : N} : p.flip y x = p x y := rfl @[deprecated "No replacement" (since := "2025-05-27")] lemma flip_flip : p.flip.flip = p := rfl /-- The linear equivalence from `M` to `Dual R N` induced by a perfect pairing. -/ @[deprecated LinearMap.toPerfPair (since := "2025-05-27")] def toDualLeft : M ≃ₗ[R] Dual R N := LinearEquiv.ofBijective p.toLinearMap p.bijective_left @[deprecated "No replacement" (since := "2025-05-27")] theorem toDualLeft_apply (a : M) : p.toDualLeft a = p a := rfl @[deprecated "No replacement" (since := "2025-05-27")] theorem apply_toDualLeft_symm_apply (f : Dual R N) (x : N) : p (p.toDualLeft.symm f) x = f x := by have h := LinearEquiv.apply_symm_apply p.toDualLeft f rw [toDualLeft_apply] at h exact congrFun (congrArg DFunLike.coe h) x /-- The linear equivalence from `N` to `Dual R M` induced by a perfect pairing. -/ @[deprecated LinearMap.toPerfPair (since := "2025-05-27")] def toDualRight : N ≃ₗ[R] Dual R M := toDualLeft p.flip @[deprecated "No replacement" (since := "2025-05-27")] theorem toDualRight_apply (a : N) : p.toDualRight a = p.flip a := rfl @[deprecated "No replacement" (since := "2025-05-27")] theorem apply_apply_toDualRight_symm (x : M) (f : Dual R M) : (p x) (p.toDualRight.symm f) = f x := by have h := LinearEquiv.apply_symm_apply p.toDualRight f rw [toDualRight_apply] at h exact congrFun (congrArg DFunLike.coe h) x @[deprecated "No replacement" (since := "2025-05-27")] theorem toDualLeft_of_toDualRight_symm (x : M) (f : Dual R M) : (p.toDualLeft x) (p.toDualRight.symm f) = f x := by rw [@toDualLeft_apply] exact apply_apply_toDualRight_symm p x f @[deprecated "No replacement" (since := "2025-05-27")] theorem toDualRight_symm_toDualLeft (x : M) : p.toDualRight.symm.dualMap (p.toDualLeft x) = Dual.eval R M x := by ext f simp only [LinearEquiv.dualMap_apply, Dual.eval_apply] exact toDualLeft_of_toDualRight_symm p x f @[deprecated "No replacement" (since := "2025-05-27")] theorem toDualRight_symm_comp_toDualLeft : p.toDualRight.symm.dualMap ∘ₗ (p.toDualLeft : M →ₗ[R] Dual R N) = Dual.eval R M := by ext1 x exact p.toDualRight_symm_toDualLeft x @[deprecated "No replacement" (since := "2025-05-27")] theorem bijective_toDualRight_symm_toDualLeft : Bijective (fun x => p.toDualRight.symm.dualMap (p.toDualLeft x)) := Bijective.comp (LinearEquiv.bijective p.toDualRight.symm.dualMap) (LinearEquiv.bijective p.toDualLeft) include p in @[deprecated Module.IsReflexive.of_isPerfPair (since := "2025-05-27")] theorem reflexive_left : IsReflexive R M where bijective_dual_eval' := by rw [← p.toDualRight_symm_comp_toDualLeft] exact p.bijective_toDualRight_symm_toDualLeft include p in @[deprecated Module.IsReflexive.of_isPerfPair (since := "2025-05-27")] theorem reflexive_right : IsReflexive R N := p.flip.reflexive_left @[deprecated "No replacement" (since := "2025-05-27")] instance : EquivLike (PerfectPairing R M N) M (Dual R N) where coe p := p.toDualLeft inv p := p.toDualLeft.symm left_inv p x := LinearEquiv.symm_apply_apply _ _ right_inv p x := LinearEquiv.apply_symm_apply _ _ coe_injective' p q h h' := by cases p cases q simp only [mk.injEq] ext m n simp only [DFunLike.coe_fn_eq] at h exact LinearMap.congr_fun (LinearEquiv.congr_fun h m) n @[deprecated "No replacement" (since := "2025-05-27")] instance : LinearEquivClass (PerfectPairing R M N) R M (Dual R N) where map_add p m₁ m₂ := p.toLinearMap.map_add m₁ m₂ map_smulₛₗ p t m := p.toLinearMap.map_smul t m include p in @[deprecated Module.finrank_of_isPerfPair (since := "2025-05-27")] theorem finrank_eq [Module.Finite R M] [Module.Free R M] : finrank R M = finrank R N := ((Module.Free.chooseBasis R M).toDualEquiv.trans p.toDualRight.symm).finrank_eq end PerfectPairing namespace LinearMap variable {p : M →ₗ[R] N →ₗ[R] R} [p.IsPerfPair] variable (p) in /-- Given a perfect pairing `p` between `M` and `N`, we say a pair of submodules `U` in `M` and `V` in `N` are perfectly complementary w.r.t. `p` if their dual annihilators are complementary, using `p` to identify `M` and `N` with dual spaces. -/ structure IsPerfectCompl (U : Submodule R M) (V : Submodule R N) : Prop where isCompl_left : IsCompl U (V.dualAnnihilator.map p.toPerfPair.symm) isCompl_right : IsCompl V (U.dualAnnihilator.map p.flip.toPerfPair.symm) namespace IsPerfectCompl variable {U : Submodule R M} {V : Submodule R N} protected lemma flip (h : p.IsPerfectCompl U V) : p.flip.IsPerfectCompl V U where isCompl_left := h.isCompl_right isCompl_right := h.isCompl_left @[simp] protected lemma flip_iff : p.flip.IsPerfectCompl V U ↔ p.IsPerfectCompl U V := ⟨fun h ↦ h.flip, fun h ↦ h.flip⟩ @[simp] lemma left_top_iff : p.IsPerfectCompl ⊤ V ↔ V = ⊤ := by refine ⟨fun h ↦ ?_, fun h ↦ ?_⟩ · exact eq_top_of_isCompl_bot <| by simpa using h.isCompl_right · rw [h] exact { isCompl_left := by simpa using isCompl_top_bot isCompl_right := by simpa using isCompl_top_bot } @[simp] lemma right_top_iff : p.IsPerfectCompl U ⊤ ↔ U = ⊤ := by rw [← IsPerfectCompl.flip_iff] exact left_top_iff end IsPerfectCompl end LinearMap variable [IsReflexive R M] set_option linter.deprecated false in /-- A reflexive module has a perfect pairing with its dual. -/ @[deprecated LinearMap.IsPerfPair.id (since := "2025-05-27")] def IsReflexive.toPerfectPairingDual : PerfectPairing R (Dual R M) M where toLinearMap := .id bijective_left := bijective_id bijective_right := bijective_dual_eval R M set_option linter.deprecated false in @[deprecated "No replacement" (since := "2025-05-27")] lemma IsReflexive.toPerfectPairingDual_apply {f : Dual R M} {x : M} : IsReflexive.toPerfectPairingDual (R := R) f x = f x := rfl variable (e : N ≃ₗ[R] Dual R M) namespace LinearEquiv /-- For a reflexive module `M`, an equivalence `N ≃ₗ[R] Dual R M` naturally yields an equivalence `M ≃ₗ[R] Dual R N`. Such equivalences are known as perfect pairings. -/ def flip : M ≃ₗ[R] Dual R N := (evalEquiv R M).trans e.dualMap @[simp] lemma coe_toLinearMap_flip : e.flip = (↑e : N →ₗ[R] Dual R M).flip := rfl @[simp] lemma flip_apply (m : M) (n : N) : e.flip m n = e n m := rfl lemma symm_flip : e.flip.symm = e.symm.dualMap.trans (evalEquiv R M).symm := rfl lemma trans_dualMap_symm_flip : e.trans e.flip.symm.dualMap = Dual.eval R N := by ext; simp [symm_flip] include e in /-- If `N` is in perfect pairing with `M`, then it is reflexive. -/ lemma isReflexive_of_equiv_dual_of_isReflexive : IsReflexive R N := by constructor rw [← trans_dualMap_symm_flip e] exact LinearEquiv.bijective _ @[simp] lemma flip_flip (h : IsReflexive R N := isReflexive_of_equiv_dual_of_isReflexive e) : e.flip.flip = e := by ext; rfl instance : e.toLinearMap.IsPerfPair where bijective_left := e.bijective bijective_right := e.flip.bijective set_option linter.deprecated false in /-- If `M` is reflexive then a linear equivalence `N ≃ Dual R M` is a perfect pairing. -/ @[deprecated "No replacement" (since := "2025-05-27")] def toPerfectPairing : PerfectPairing R N M where toLinearMap := e bijective_left := e.bijective bijective_right := e.flip.bijective end LinearEquiv set_option linter.deprecated false in /-- A perfect pairing induces a perfect pairing between dual spaces. -/ @[deprecated "No replacement" (since := "2025-05-27")] def PerfectPairing.dual (p : PerfectPairing R M N) : PerfectPairing R (Dual R M) (Dual R N) := let _i := p.reflexive_right (p.toDualRight.symm.trans (evalEquiv R N)).toPerfectPairing namespace Submodule open LinearEquiv @[simp] lemma dualCoannihilator_map_linearEquiv_flip (p : Submodule R M) : (p.map e.flip).dualCoannihilator = p.dualAnnihilator.map e.symm := by ext; simp [LinearEquiv.symm_apply_eq, Submodule.mem_dualCoannihilator] @[simp] lemma map_dualAnnihilator_linearEquiv_flip_symm (p : Submodule R N) : p.dualAnnihilator.map e.flip.symm = (p.map e).dualCoannihilator := by have : IsReflexive R N := e.isReflexive_of_equiv_dual_of_isReflexive rw [← dualCoannihilator_map_linearEquiv_flip, flip_flip] @[simp] lemma map_dualCoannihilator_linearEquiv_flip (p : Submodule R (Dual R M)) : p.dualCoannihilator.map e.flip = (p.map e.symm).dualAnnihilator := by have : IsReflexive R N := e.isReflexive_of_equiv_dual_of_isReflexive suffices (p.map e.symm).dualAnnihilator.map e.flip.symm = (p.dualCoannihilator.map e.flip).map e.flip.symm by exact (Submodule.map_injective_of_injective e.flip.symm.injective this).symm erw [← dualCoannihilator_map_linearEquiv_flip, flip_flip, ← map_comp, ← map_comp] simp [-coe_toLinearMap_flip] @[simp] lemma dualAnnihilator_map_linearEquiv_flip_symm (p : Submodule R (Dual R N)) : (p.map e.flip.symm).dualAnnihilator = p.dualCoannihilator.map e := by have : IsReflexive R N := e.isReflexive_of_equiv_dual_of_isReflexive rw [← map_dualCoannihilator_linearEquiv_flip, flip_flip] end Submodule
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Kronecker.lean
import Mathlib.Data.Matrix.Basic import Mathlib.Data.Matrix.Block import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.LinearAlgebra.Matrix.Trace import Mathlib.LinearAlgebra.TensorProduct.Basic import Mathlib.LinearAlgebra.TensorProduct.Associator import Mathlib.RingTheory.TensorProduct.Basic /-! # Kronecker product of matrices This defines the [Kronecker product](https://en.wikipedia.org/wiki/Kronecker_product). ## Main definitions * `Matrix.kroneckerMap`: A generalization of the Kronecker product: given a map `f : α → β → γ` and matrices `A` and `B` with coefficients in `α` and `β`, respectively, it is defined as the matrix with coefficients in `γ` such that `kroneckerMap f A B (i₁, i₂) (j₁, j₂) = f (A i₁ j₁) (B i₁ j₂)`. * `Matrix.kroneckerMapBilinear`: when `f` is bilinear, so is `kroneckerMap f`. ## Specializations * `Matrix.kronecker`: An alias of `kroneckerMap (*)`. Prefer using the notation. * `Matrix.kroneckerBilinear`: `Matrix.kronecker` is bilinear * `Matrix.kroneckerTMul`: An alias of `kroneckerMap (⊗ₜ)`. Prefer using the notation. * `Matrix.kroneckerTMulBilinear`: `Matrix.kroneckerTMul` is bilinear ## Notation These require `open Kronecker`: * `A ⊗ₖ B` for `kroneckerMap (*) A B`. Lemmas about this notation use the token `kronecker`. * `A ⊗ₖₜ B` and `A ⊗ₖₜ[R] B` for `kroneckerMap (⊗ₜ) A B`. Lemmas about this notation use the token `kroneckerTMul`. -/ namespace Matrix open scoped RightActions variable {R S α α' β β' γ γ' : Type*} variable {l m n p : Type*} {q r : Type*} {l' m' n' p' : Type*} section KroneckerMap /-- Produce a matrix with `f` applied to every pair of elements from `A` and `B`. -/ def kroneckerMap (f : α → β → γ) (A : Matrix l m α) (B : Matrix n p β) : Matrix (l × n) (m × p) γ := of fun (i : l × n) (j : m × p) => f (A i.1 j.1) (B i.2 j.2) -- TODO: set as an equation lemma for `kroneckerMap`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem kroneckerMap_apply (f : α → β → γ) (A : Matrix l m α) (B : Matrix n p β) (i j) : kroneckerMap f A B i j = f (A i.1 j.1) (B i.2 j.2) := rfl theorem kroneckerMap_transpose (f : α → β → γ) (A : Matrix l m α) (B : Matrix n p β) : kroneckerMap f Aᵀ Bᵀ = (kroneckerMap f A B)ᵀ := ext fun _ _ => rfl theorem kroneckerMap_map_left (f : α' → β → γ) (g : α → α') (A : Matrix l m α) (B : Matrix n p β) : kroneckerMap f (A.map g) B = kroneckerMap (fun a b => f (g a) b) A B := ext fun _ _ => rfl theorem kroneckerMap_map_right (f : α → β' → γ) (g : β → β') (A : Matrix l m α) (B : Matrix n p β) : kroneckerMap f A (B.map g) = kroneckerMap (fun a b => f a (g b)) A B := ext fun _ _ => rfl theorem kroneckerMap_map (f : α → β → γ) (g : γ → γ') (A : Matrix l m α) (B : Matrix n p β) : (kroneckerMap f A B).map g = kroneckerMap (fun a b => g (f a b)) A B := ext fun _ _ => rfl @[simp] theorem kroneckerMap_zero_left [Zero α] [Zero γ] (f : α → β → γ) (hf : ∀ b, f 0 b = 0) (B : Matrix n p β) : kroneckerMap f (0 : Matrix l m α) B = 0 := ext fun _ _ => hf _ @[simp] theorem kroneckerMap_zero_right [Zero β] [Zero γ] (f : α → β → γ) (hf : ∀ a, f a 0 = 0) (A : Matrix l m α) : kroneckerMap f A (0 : Matrix n p β) = 0 := ext fun _ _ => hf _ theorem kroneckerMap_add_left [Add α] [Add γ] (f : α → β → γ) (hf : ∀ a₁ a₂ b, f (a₁ + a₂) b = f a₁ b + f a₂ b) (A₁ A₂ : Matrix l m α) (B : Matrix n p β) : kroneckerMap f (A₁ + A₂) B = kroneckerMap f A₁ B + kroneckerMap f A₂ B := ext fun _ _ => hf _ _ _ theorem kroneckerMap_add_right [Add β] [Add γ] (f : α → β → γ) (hf : ∀ a b₁ b₂, f a (b₁ + b₂) = f a b₁ + f a b₂) (A : Matrix l m α) (B₁ B₂ : Matrix n p β) : kroneckerMap f A (B₁ + B₂) = kroneckerMap f A B₁ + kroneckerMap f A B₂ := ext fun _ _ => hf _ _ _ theorem kroneckerMap_smul_left [SMul R α] [SMul R γ] (f : α → β → γ) (r : R) (hf : ∀ a b, f (r • a) b = r • f a b) (A : Matrix l m α) (B : Matrix n p β) : kroneckerMap f (r • A) B = r • kroneckerMap f A B := ext fun _ _ => hf _ _ theorem kroneckerMap_smul_right [SMul R β] [SMul R γ] (f : α → β → γ) (r : R) (hf : ∀ a b, f a (r • b) = r • f a b) (A : Matrix l m α) (B : Matrix n p β) : kroneckerMap f A (r • B) = r • kroneckerMap f A B := ext fun _ _ => hf _ _ theorem kroneckerMap_single_single [Zero α] [Zero β] [Zero γ] [DecidableEq l] [DecidableEq m] [DecidableEq n] [DecidableEq p] (i₁ : l) (j₁ : m) (i₂ : n) (j₂ : p) (f : α → β → γ) (hf₁ : ∀ b, f 0 b = 0) (hf₂ : ∀ a, f a 0 = 0) (a : α) (b : β) : kroneckerMap f (single i₁ j₁ a) (single i₂ j₂ b) = single (i₁, i₂) (j₁, j₂) (f a b) := by ext ⟨i₁', i₂'⟩ ⟨j₁', j₂'⟩ dsimp [single] aesop @[deprecated (since := "2025-05-05")] alias kroneckerMap_stdBasisMatrix_stdBasisMatrix := kroneckerMap_single_single theorem kroneckerMap_diagonal_diagonal [Zero α] [Zero β] [Zero γ] [DecidableEq m] [DecidableEq n] (f : α → β → γ) (hf₁ : ∀ b, f 0 b = 0) (hf₂ : ∀ a, f a 0 = 0) (a : m → α) (b : n → β) : kroneckerMap f (diagonal a) (diagonal b) = diagonal fun mn => f (a mn.1) (b mn.2) := by ext ⟨i₁, i₂⟩ ⟨j₁, j₂⟩ simp [diagonal, apply_ite f, ite_and, ite_apply, apply_ite (f (a i₁)), hf₁, hf₂] theorem kroneckerMap_diagonal_right [Zero β] [Zero γ] [DecidableEq n] (f : α → β → γ) (hf : ∀ a, f a 0 = 0) (A : Matrix l m α) (b : n → β) : kroneckerMap f A (diagonal b) = blockDiagonal fun i => A.map fun a => f a (b i) := by ext ⟨i₁, i₂⟩ ⟨j₁, j₂⟩ simp [diagonal, blockDiagonal, apply_ite (f (A i₁ j₁)), hf] theorem kroneckerMap_diagonal_left [Zero α] [Zero γ] [DecidableEq l] (f : α → β → γ) (hf : ∀ b, f 0 b = 0) (a : l → α) (B : Matrix m n β) : kroneckerMap f (diagonal a) B = Matrix.reindex (Equiv.prodComm _ _) (Equiv.prodComm _ _) (blockDiagonal fun i => B.map fun b => f (a i) b) := by ext ⟨i₁, i₂⟩ ⟨j₁, j₂⟩ simp [diagonal, blockDiagonal, apply_ite f, ite_apply, hf] @[simp] theorem kroneckerMap_one_one [Zero α] [Zero β] [Zero γ] [One α] [One β] [One γ] [DecidableEq m] [DecidableEq n] (f : α → β → γ) (hf₁ : ∀ b, f 0 b = 0) (hf₂ : ∀ a, f a 0 = 0) (hf₃ : f 1 1 = 1) : kroneckerMap f (1 : Matrix m m α) (1 : Matrix n n β) = 1 := (kroneckerMap_diagonal_diagonal _ hf₁ hf₂ _ _).trans <| by simp only [hf₃, diagonal_one] theorem kroneckerMap_reindex (f : α → β → γ) (el : l ≃ l') (em : m ≃ m') (en : n ≃ n') (ep : p ≃ p') (M : Matrix l m α) (N : Matrix n p β) : kroneckerMap f (reindex el em M) (reindex en ep N) = reindex (el.prodCongr en) (em.prodCongr ep) (kroneckerMap f M N) := by ext ⟨i, i'⟩ ⟨j, j'⟩ rfl theorem kroneckerMap_reindex_left (f : α → β → γ) (el : l ≃ l') (em : m ≃ m') (M : Matrix l m α) (N : Matrix n n' β) : kroneckerMap f (Matrix.reindex el em M) N = reindex (el.prodCongr (Equiv.refl _)) (em.prodCongr (Equiv.refl _)) (kroneckerMap f M N) := kroneckerMap_reindex _ _ _ (Equiv.refl _) (Equiv.refl _) _ _ theorem kroneckerMap_reindex_right (f : α → β → γ) (em : m ≃ m') (en : n ≃ n') (M : Matrix l l' α) (N : Matrix m n β) : kroneckerMap f M (reindex em en N) = reindex ((Equiv.refl _).prodCongr em) ((Equiv.refl _).prodCongr en) (kroneckerMap f M N) := kroneckerMap_reindex _ (Equiv.refl _) (Equiv.refl _) _ _ _ _ theorem kroneckerMap_assoc {δ ξ ω ω' : Type*} (f : α → β → γ) (g : γ → δ → ω) (f' : α → ξ → ω') (g' : β → δ → ξ) (A : Matrix l m α) (B : Matrix n p β) (D : Matrix q r δ) (φ : ω ≃ ω') (hφ : ∀ a b d, φ (g (f a b) d) = f' a (g' b d)) : (reindex (Equiv.prodAssoc l n q) (Equiv.prodAssoc m p r)).trans (Equiv.mapMatrix φ) (kroneckerMap g (kroneckerMap f A B) D) = kroneckerMap f' A (kroneckerMap g' B D) := ext fun _ _ => hφ _ _ _ theorem kroneckerMap_assoc₁ {δ ξ ω : Type*} (f : α → β → γ) (g : γ → δ → ω) (f' : α → ξ → ω) (g' : β → δ → ξ) (A : Matrix l m α) (B : Matrix n p β) (D : Matrix q r δ) (h : ∀ a b d, g (f a b) d = f' a (g' b d)) : reindex (Equiv.prodAssoc l n q) (Equiv.prodAssoc m p r) (kroneckerMap g (kroneckerMap f A B) D) = kroneckerMap f' A (kroneckerMap g' B D) := ext fun _ _ => h _ _ _ /-- When `f` is bilinear then `Matrix.kroneckerMap f` is also bilinear. -/ @[simps!] def kroneckerMapBilinear [Semiring S] [Semiring R] [AddCommMonoid α] [AddCommMonoid β] [AddCommMonoid γ] [Module R α] [Module R γ] [Module S β] [Module S γ] [SMulCommClass S R γ] (f : α →ₗ[R] β →ₗ[S] γ) : Matrix l m α →ₗ[R] Matrix n p β →ₗ[S] Matrix (l × n) (m × p) γ := LinearMap.mk₂' R S (kroneckerMap fun r s => f r s) (kroneckerMap_add_left _ <| f.map_add₂) (fun _ => kroneckerMap_smul_left _ _ <| f.map_smul₂ _) (kroneckerMap_add_right _ fun a => (f a).map_add) fun r => kroneckerMap_smul_right _ _ fun a => (f a).map_smul r /-- `Matrix.kroneckerMapBilinear` commutes with `*` if `f` does. This is primarily used with `R = ℕ` to prove `Matrix.mul_kronecker_mul`. -/ theorem kroneckerMapBilinear_mul_mul [Semiring S] [Semiring R] [Fintype m] [Fintype m'] [NonUnitalNonAssocSemiring α] [NonUnitalNonAssocSemiring β] [NonUnitalNonAssocSemiring γ] [Module R α] [Module R γ] [Module S β] [Module S γ] [SMulCommClass S R γ] (f : α →ₗ[R] β →ₗ[S] γ) (h_comm : ∀ a b a' b', f (a * b) (a' * b') = f a a' * f b b') (A : Matrix l m α) (B : Matrix m n α) (A' : Matrix l' m' β) (B' : Matrix m' n' β) : kroneckerMapBilinear f (A * B) (A' * B') = kroneckerMapBilinear f A A' * kroneckerMapBilinear f B B' := by ext ⟨i, i'⟩ ⟨j, j'⟩ simp only [kroneckerMapBilinear_apply_apply, mul_apply, ← Finset.univ_product_univ, Finset.sum_product, kroneckerMap_apply] simp_rw [map_sum f, LinearMap.sum_apply, map_sum, h_comm] /-- `trace` distributes over `Matrix.kroneckerMapBilinear`. This is primarily used with `R = ℕ` to prove `Matrix.trace_kronecker`. -/ theorem trace_kroneckerMapBilinear [Semiring S] [Semiring R] [Fintype m] [Fintype n] [AddCommMonoid α] [AddCommMonoid β] [AddCommMonoid γ] [Module R α] [Module R γ] [Module S β] [Module S γ] [SMulCommClass S R γ] (f : α →ₗ[R] β →ₗ[S] γ) (A : Matrix m m α) (B : Matrix n n β) : trace (kroneckerMapBilinear f A B) = f (trace A) (trace B) := by simp_rw [Matrix.trace, Matrix.diag, kroneckerMapBilinear_apply_apply, LinearMap.map_sum₂, map_sum, ← Finset.univ_product_univ, Finset.sum_product, kroneckerMap_apply] /-- `determinant` of `Matrix.kroneckerMapBilinear`. This is primarily used with `R = ℕ` to prove `Matrix.det_kronecker`. -/ theorem det_kroneckerMapBilinear [Semiring S] [Semiring R] [Fintype m] [Fintype n] [DecidableEq m] [DecidableEq n] [NonAssocSemiring α] [NonAssocSemiring β] [CommRing γ] [Module R α] [Module S β] [Module R γ] [Module S γ] [SMulCommClass S R γ] (f : α →ₗ[R] β →ₗ[S] γ) (h_comm : ∀ a b a' b', f (a * b) (a' * b') = f a a' * f b b') (A : Matrix m m α) (B : Matrix n n β) : det (kroneckerMapBilinear f A B) = det (A.map fun a => f a 1) ^ Fintype.card n * det (B.map fun b => f 1 b) ^ Fintype.card m := calc det (kroneckerMapBilinear f A B) = det (kroneckerMapBilinear f A 1 * kroneckerMapBilinear f 1 B) := by rw [← kroneckerMapBilinear_mul_mul f h_comm, Matrix.mul_one, Matrix.one_mul] _ = det (blockDiagonal fun (_ : n) => A.map fun a => f a 1) * det (blockDiagonal fun (_ : m) => B.map fun b => f 1 b) := by rw [det_mul, ← diagonal_one, ← diagonal_one, kroneckerMapBilinear_apply_apply, kroneckerMap_diagonal_right _ fun _ => _, kroneckerMapBilinear_apply_apply, kroneckerMap_diagonal_left _ fun _ => _, det_reindex_self] · intro; exact LinearMap.map_zero₂ _ _ · intro; exact map_zero _ _ = _ := by simp_rw [det_blockDiagonal, Finset.prod_const, Finset.card_univ] end KroneckerMap /-! ### Specialization to `Matrix.kroneckerMap (*)` -/ section Kronecker open Matrix /-- The Kronecker product. This is just a shorthand for `kroneckerMap (*)`. Prefer the notation `⊗ₖ` rather than this definition. -/ @[simp] def kronecker [Mul α] : Matrix l m α → Matrix n p α → Matrix (l × n) (m × p) α := kroneckerMap (· * ·) @[inherit_doc Matrix.kroneckerMap] scoped[Kronecker] infixl:100 " ⊗ₖ " => Matrix.kroneckerMap (· * ·) open Kronecker @[simp] theorem kronecker_apply [Mul α] (A : Matrix l m α) (B : Matrix n p α) (i₁ i₂ j₁ j₂) : (A ⊗ₖ B) (i₁, i₂) (j₁, j₂) = A i₁ j₁ * B i₂ j₂ := rfl /-- `Matrix.kronecker` as a bilinear map. -/ def kroneckerBilinear [CommSemiring R] [Semiring α] [Algebra R α] : Matrix l m α →ₗ[R] Matrix n p α →ₗ[R] Matrix (l × n) (m × p) α := kroneckerMapBilinear (Algebra.lmul R α) /-! What follows is a copy, in order, of every `Matrix.kroneckerMap` lemma above that has hypotheses which can be filled by properties of `*`. -/ theorem zero_kronecker [MulZeroClass α] (B : Matrix n p α) : (0 : Matrix l m α) ⊗ₖ B = 0 := kroneckerMap_zero_left _ zero_mul B theorem kronecker_zero [MulZeroClass α] (A : Matrix l m α) : A ⊗ₖ (0 : Matrix n p α) = 0 := kroneckerMap_zero_right _ mul_zero A theorem add_kronecker [Distrib α] (A₁ A₂ : Matrix l m α) (B : Matrix n p α) : (A₁ + A₂) ⊗ₖ B = A₁ ⊗ₖ B + A₂ ⊗ₖ B := kroneckerMap_add_left _ add_mul _ _ _ theorem kronecker_add [Distrib α] (A : Matrix l m α) (B₁ B₂ : Matrix n p α) : A ⊗ₖ (B₁ + B₂) = A ⊗ₖ B₁ + A ⊗ₖ B₂ := kroneckerMap_add_right _ mul_add _ _ _ theorem smul_kronecker [Mul α] [SMul R α] [IsScalarTower R α α] (r : R) (A : Matrix l m α) (B : Matrix n p α) : (r • A) ⊗ₖ B = r • A ⊗ₖ B := kroneckerMap_smul_left _ _ (fun _ _ => smul_mul_assoc _ _ _) _ _ theorem kronecker_smul [Mul α] [SMul R α] [SMulCommClass R α α] (r : R) (A : Matrix l m α) (B : Matrix n p α) : A ⊗ₖ (r • B) = r • A ⊗ₖ B := kroneckerMap_smul_right _ _ (fun _ _ => mul_smul_comm _ _ _) _ _ theorem single_kronecker_single [MulZeroClass α] [DecidableEq l] [DecidableEq m] [DecidableEq n] [DecidableEq p] (ia : l) (ja : m) (ib : n) (jb : p) (a b : α) : single ia ja a ⊗ₖ single ib jb b = single (ia, ib) (ja, jb) (a * b) := kroneckerMap_single_single _ _ _ _ _ zero_mul mul_zero _ _ @[deprecated (since := "2025-05-05")] alias stdBasisMatrix_kronecker_stdBasisMatrix := single_kronecker_single theorem diagonal_kronecker_diagonal [MulZeroClass α] [DecidableEq m] [DecidableEq n] (a : m → α) (b : n → α) : diagonal a ⊗ₖ diagonal b = diagonal fun mn => a mn.1 * b mn.2 := kroneckerMap_diagonal_diagonal _ zero_mul mul_zero _ _ theorem kronecker_diagonal [MulZeroClass α] [DecidableEq n] (A : Matrix l m α) (b : n → α) : A ⊗ₖ diagonal b = blockDiagonal fun i => A <• b i := kroneckerMap_diagonal_right _ mul_zero _ _ theorem diagonal_kronecker [MulZeroClass α] [DecidableEq l] (a : l → α) (B : Matrix m n α) : diagonal a ⊗ₖ B = Matrix.reindex (Equiv.prodComm _ _) (Equiv.prodComm _ _) (blockDiagonal fun i => a i • B) := kroneckerMap_diagonal_left _ zero_mul _ _ @[simp] theorem natCast_kronecker_natCast [NonAssocSemiring α] [DecidableEq m] [DecidableEq n] (a b : ℕ) : (a : Matrix m m α) ⊗ₖ (b : Matrix n n α) = ↑(a * b) := (diagonal_kronecker_diagonal _ _).trans <| by simp_rw [← Nat.cast_mul]; rfl theorem kronecker_natCast [NonAssocSemiring α] [DecidableEq n] (A : Matrix l m α) (b : ℕ) : A ⊗ₖ (b : Matrix n n α) = blockDiagonal fun _ => b • A := kronecker_diagonal _ _ |>.trans <| by congr! 2 ext simp [(Nat.cast_commute b _).eq] theorem natCast_kronecker [NonAssocSemiring α] [DecidableEq l] (a : ℕ) (B : Matrix m n α) : (a : Matrix l l α) ⊗ₖ B = Matrix.reindex (Equiv.prodComm _ _) (Equiv.prodComm _ _) (blockDiagonal fun _ => a • B) := diagonal_kronecker _ _ |>.trans <| by congr! 2 ext simp [(Nat.cast_commute a _).eq] theorem kronecker_ofNat [NonAssocSemiring α] [DecidableEq n] (A : Matrix l m α) (b : ℕ) [b.AtLeastTwo] : A ⊗ₖ (ofNat(b) : Matrix n n α) = blockDiagonal fun _ => A <• (ofNat(b) : α) := kronecker_diagonal _ _ theorem ofNat_kronecker [NonAssocSemiring α] [DecidableEq l] (a : ℕ) [a.AtLeastTwo] (B : Matrix m n α) : (ofNat(a) : Matrix l l α) ⊗ₖ B = Matrix.reindex (.prodComm _ _) (.prodComm _ _) (blockDiagonal fun _ => (ofNat(a) : α) • B) := diagonal_kronecker _ _ theorem one_kronecker_one [MulZeroOneClass α] [DecidableEq m] [DecidableEq n] : (1 : Matrix m m α) ⊗ₖ (1 : Matrix n n α) = 1 := kroneckerMap_one_one _ zero_mul mul_zero (one_mul _) theorem kronecker_one [MulZeroOneClass α] [DecidableEq n] (A : Matrix l m α) : A ⊗ₖ (1 : Matrix n n α) = blockDiagonal fun _ => A := (kronecker_diagonal _ _).trans <| congr_arg _ <| funext fun _ => Matrix.ext fun _ _ => mul_one _ theorem one_kronecker [MulZeroOneClass α] [DecidableEq l] (B : Matrix m n α) : (1 : Matrix l l α) ⊗ₖ B = Matrix.reindex (Equiv.prodComm _ _) (Equiv.prodComm _ _) (blockDiagonal fun _ => B) := (diagonal_kronecker _ _).trans <| congr_arg _ <| congr_arg _ <| funext fun _ => Matrix.ext fun _ _ => one_mul _ theorem mul_kronecker_mul [Fintype m] [Fintype m'] [CommSemiring α] (A : Matrix l m α) (B : Matrix m n α) (A' : Matrix l' m' α) (B' : Matrix m' n' α) : (A * B) ⊗ₖ (A' * B') = A ⊗ₖ A' * B ⊗ₖ B' := kroneckerMapBilinear_mul_mul (Algebra.lmul ℕ α).toLinearMap mul_mul_mul_comm A B A' B' -- simp-normal form is `kronecker_assoc'` theorem kronecker_assoc [Semigroup α] (A : Matrix l m α) (B : Matrix n p α) (C : Matrix q r α) : reindex (Equiv.prodAssoc l n q) (Equiv.prodAssoc m p r) (A ⊗ₖ B ⊗ₖ C) = A ⊗ₖ (B ⊗ₖ C) := kroneckerMap_assoc₁ _ _ _ _ A B C mul_assoc @[simp] theorem kronecker_assoc' [Semigroup α] (A : Matrix l m α) (B : Matrix n p α) (C : Matrix q r α) : submatrix (A ⊗ₖ B ⊗ₖ C) (Equiv.prodAssoc l n q).symm (Equiv.prodAssoc m p r).symm = A ⊗ₖ (B ⊗ₖ C) := kroneckerMap_assoc₁ _ _ _ _ A B C mul_assoc theorem trace_kronecker [Fintype m] [Fintype n] [Semiring α] (A : Matrix m m α) (B : Matrix n n α) : trace (A ⊗ₖ B) = trace A * trace B := trace_kroneckerMapBilinear (Algebra.lmul ℕ α).toLinearMap _ _ theorem det_kronecker [Fintype m] [Fintype n] [DecidableEq m] [DecidableEq n] [CommRing R] (A : Matrix m m R) (B : Matrix n n R) : det (A ⊗ₖ B) = det A ^ Fintype.card n * det B ^ Fintype.card m := by refine (det_kroneckerMapBilinear (Algebra.lmul ℕ R).toLinearMap mul_mul_mul_comm _ _).trans ?_ congr 3 · ext i j exact mul_one _ · ext i j exact one_mul _ theorem conjTranspose_kronecker [CommMagma R] [StarMul R] (x : Matrix l m R) (y : Matrix n p R) : (x ⊗ₖ y)ᴴ = xᴴ ⊗ₖ yᴴ := by ext; simp theorem conjTranspose_kronecker' [Mul R] [StarMul R] (x : Matrix l m R) (y : Matrix n p R) : (x ⊗ₖ y)ᴴ = (yᴴ ⊗ₖ xᴴ).submatrix Prod.swap Prod.swap := by ext; simp end Kronecker /-! ### Specialization to `Matrix.kroneckerMap (⊗ₜ)` -/ section KroneckerTmul variable (R) open TensorProduct open Matrix TensorProduct section Module variable [CommSemiring R] variable [AddCommMonoid α] [AddCommMonoid β] [AddCommMonoid γ] variable [Module R α] [Module R β] [Module R γ] /-- The Kronecker tensor product. This is just a shorthand for `kroneckerMap (⊗ₜ)`. Prefer the notation `⊗ₖₜ` rather than this definition. -/ @[simp] def kroneckerTMul : Matrix l m α → Matrix n p β → Matrix (l × n) (m × p) (α ⊗[R] β) := kroneckerMap (· ⊗ₜ ·) @[inherit_doc kroneckerTMul] scoped[Kronecker] infixl:100 " ⊗ₖₜ " => Matrix.kroneckerMap (· ⊗ₜ ·) @[inherit_doc kroneckerTMul] scoped[Kronecker] notation:100 x " ⊗ₖₜ[" R "] " y:100 => Matrix.kroneckerMap (TensorProduct.tmul R) x y open Kronecker @[simp] theorem kroneckerTMul_apply (A : Matrix l m α) (B : Matrix n p β) (i₁ i₂ j₁ j₂) : (A ⊗ₖₜ B) (i₁, i₂) (j₁, j₂) = A i₁ j₁ ⊗ₜ[R] B i₂ j₂ := rfl variable (S) in /-- `Matrix.kronecker` as a bilinear map. -/ def kroneckerTMulBilinear [Semiring S] [Module S α] [SMulCommClass R S α] : Matrix l m α →ₗ[S] Matrix n p β →ₗ[R] Matrix (l × n) (m × p) (α ⊗[R] β) := kroneckerMapBilinear (AlgebraTensorModule.mk _ _ α β) @[simp] theorem kroneckerTMulBilinear_apply [Semiring S] [Module S α] [SMulCommClass R S α] (A : Matrix l m α) (B : Matrix n p β) : kroneckerTMulBilinear R S A B = A ⊗ₖₜ[R] B := rfl /-! What follows is a copy, in order, of every `Matrix.kroneckerMap` lemma above that has hypotheses which can be filled by properties of `⊗ₜ`. -/ theorem zero_kroneckerTMul (B : Matrix n p β) : (0 : Matrix l m α) ⊗ₖₜ[R] B = 0 := kroneckerMap_zero_left _ (zero_tmul α) B theorem kroneckerTMul_zero (A : Matrix l m α) : A ⊗ₖₜ[R] (0 : Matrix n p β) = 0 := kroneckerMap_zero_right _ (tmul_zero β) A theorem add_kroneckerTMul (A₁ A₂ : Matrix l m α) (B : Matrix n p α) : (A₁ + A₂) ⊗ₖₜ[R] B = A₁ ⊗ₖₜ B + A₂ ⊗ₖₜ B := kroneckerMap_add_left _ add_tmul _ _ _ theorem kroneckerTMul_add (A : Matrix l m α) (B₁ B₂ : Matrix n p β) : A ⊗ₖₜ[R] (B₁ + B₂) = A ⊗ₖₜ B₁ + A ⊗ₖₜ B₂ := kroneckerMap_add_right _ tmul_add _ _ _ theorem smul_kroneckerTMul [Monoid S] [DistribMulAction S α] [SMulCommClass R S α] (r : S) (A : Matrix l m α) (B : Matrix n p β) : (r • A) ⊗ₖₜ[R] B = r • A ⊗ₖₜ[R] B := kroneckerMap_smul_left _ _ (fun _ _ => smul_tmul' _ _ _) _ _ theorem kroneckerTMul_smul [Monoid S] [DistribMulAction S α] [DistribMulAction S β] [SMul S R] [SMulCommClass R S α] [IsScalarTower S R α] [IsScalarTower S R β] (r : S) (A : Matrix l m α) (B : Matrix n p β) : A ⊗ₖₜ[R] (r • B) = r • A ⊗ₖₜ[R] B := kroneckerMap_smul_right _ _ (fun _ _ => tmul_smul _ _ _) _ _ theorem single_kroneckerTMul_single [DecidableEq l] [DecidableEq m] [DecidableEq n] [DecidableEq p] (i₁ : l) (j₁ : m) (i₂ : n) (j₂ : p) (a : α) (b : β) : single i₁ j₁ a ⊗ₖₜ[R] single i₂ j₂ b = single (i₁, i₂) (j₁, j₂) (a ⊗ₜ b) := kroneckerMap_single_single _ _ _ _ _ (zero_tmul _) (tmul_zero _) _ _ @[deprecated (since := "2025-05-05")] alias stdBasisMatrix_kroneckerTMul_stdBasisMatrix := single_kroneckerTMul_single theorem diagonal_kroneckerTMul_diagonal [DecidableEq m] [DecidableEq n] (a : m → α) (b : n → β) : diagonal a ⊗ₖₜ[R] diagonal b = diagonal fun mn => a mn.1 ⊗ₜ b mn.2 := kroneckerMap_diagonal_diagonal _ (zero_tmul _) (tmul_zero _) _ _ theorem kroneckerTMul_diagonal [DecidableEq n] (A : Matrix l m α) (b : n → β) : A ⊗ₖₜ[R] diagonal b = blockDiagonal fun i => A.map fun a => a ⊗ₜ[R] b i := kroneckerMap_diagonal_right _ (tmul_zero _) _ _ theorem diagonal_kroneckerTMul [DecidableEq l] (a : l → α) (B : Matrix m n β) : diagonal a ⊗ₖₜ[R] B = Matrix.reindex (Equiv.prodComm _ _) (Equiv.prodComm _ _) (blockDiagonal fun i => B.map fun b => a i ⊗ₜ[R] b) := kroneckerMap_diagonal_left _ (zero_tmul _) _ _ -- simp-normal form is `kroneckerTMul_assoc'` theorem kroneckerTMul_assoc (A : Matrix l m α) (B : Matrix n p β) (C : Matrix q r γ) : reindex (Equiv.prodAssoc l n q) (Equiv.prodAssoc m p r) (((A ⊗ₖₜ[R] B) ⊗ₖₜ[R] C).map (TensorProduct.assoc R α β γ)) = A ⊗ₖₜ[R] B ⊗ₖₜ[R] C := ext fun _ _ => assoc_tmul _ _ _ @[simp] theorem kroneckerTMul_assoc' (A : Matrix l m α) (B : Matrix n p β) (C : Matrix q r γ) : submatrix (((A ⊗ₖₜ[R] B) ⊗ₖₜ[R] C).map (TensorProduct.assoc R α β γ)) (Equiv.prodAssoc l n q).symm (Equiv.prodAssoc m p r).symm = A ⊗ₖₜ[R] B ⊗ₖₜ[R] C := ext fun _ _ => assoc_tmul _ _ _ theorem trace_kroneckerTMul [Fintype m] [Fintype n] (A : Matrix m m α) (B : Matrix n n β) : trace (A ⊗ₖₜ[R] B) = trace A ⊗ₜ[R] trace B := trace_kroneckerMapBilinear (TensorProduct.mk R α β) _ _ theorem conjTranspose_kroneckerTMul [StarRing R] [StarAddMonoid α] [StarAddMonoid β] [StarModule R α] [StarModule R β] (x : Matrix l m α) (y : Matrix n p β) : (x ⊗ₖₜ[R] y)ᴴ = xᴴ ⊗ₖₜ[R] yᴴ := by ext; simp end Module section Algebra open Kronecker open Algebra.TensorProduct section Semiring variable [CommSemiring R] @[simp] theorem one_kroneckerTMul_one [AddCommMonoidWithOne α] [AddCommMonoidWithOne β] [Module R α] [Module R β] [DecidableEq m] [DecidableEq n] : (1 : Matrix m m α) ⊗ₖₜ[R] (1 : Matrix n n β) = 1 := kroneckerMap_one_one _ (zero_tmul _) (tmul_zero _) rfl unseal mul in theorem mul_kroneckerTMul_mul [NonUnitalSemiring α] [NonUnitalSemiring β] [Module R α] [Module R β] [IsScalarTower R α α] [SMulCommClass R α α] [IsScalarTower R β β] [SMulCommClass R β β] [Fintype m] [Fintype m'] (A : Matrix l m α) (B : Matrix m n α) (A' : Matrix l' m' β) (B' : Matrix m' n' β) : (A * B) ⊗ₖₜ[R] (A' * B') = A ⊗ₖₜ[R] A' * B ⊗ₖₜ[R] B' := kroneckerMapBilinear_mul_mul (TensorProduct.mk R α β) tmul_mul_tmul A B A' B' end Semiring section CommRing variable [CommRing R] [CommRing α] [CommRing β] [Algebra R α] [Algebra R β] unseal mul in theorem det_kroneckerTMul [Fintype m] [Fintype n] [DecidableEq m] [DecidableEq n] (A : Matrix m m α) (B : Matrix n n β) : det (A ⊗ₖₜ[R] B) = (det A ^ Fintype.card n) ⊗ₜ[R] (det B ^ Fintype.card m) := by refine (det_kroneckerMapBilinear (TensorProduct.mk R α β) tmul_mul_tmul _ _).trans ?_ simp -eta only [mk_apply, ← includeLeft_apply (S := R), ← includeRight_apply] simp only [← AlgHom.mapMatrix_apply, ← AlgHom.map_det] simp only [includeLeft_apply, includeRight_apply, tmul_pow, tmul_mul_tmul, one_pow, _root_.mul_one, _root_.one_mul] end CommRing end Algebra -- insert lemmas specific to `kroneckerTMul` below this line end KroneckerTmul end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/ConjTranspose.lean
import Mathlib.Algebra.BigOperators.GroupWithZero.Action import Mathlib.Algebra.BigOperators.Ring.Finset import Mathlib.Algebra.BigOperators.RingEquiv import Mathlib.Algebra.Module.Pi import Mathlib.Algebra.Star.BigOperators import Mathlib.Algebra.Star.Module import Mathlib.Data.Fintype.BigOperators import Mathlib.Data.Matrix.Basis import Mathlib.Data.Matrix.Mul /-! # Matrices over star rings. ## Notation The scope `Matrix` gives the following notation: * `ᴴ` for `Matrix.conjTranspose` -/ universe u u' v w variable {l m n o : Type*} {m' : o → Type*} {n' : o → Type*} variable {R : Type*} {S : Type*} {α : Type v} {β : Type w} {γ : Type*} namespace Matrix /-- The conjugate transpose of a matrix defined in term of `star`. -/ def conjTranspose [Star α] (M : Matrix m n α) : Matrix n m α := M.transpose.map star @[inherit_doc] scoped postfix:1024 "ᴴ" => Matrix.conjTranspose @[simp] lemma conjTranspose_single [DecidableEq n] [DecidableEq m] [AddMonoid α] [StarAddMonoid α] (i : m) (j : n) (a : α) : (single i j a)ᴴ = single j i (star a) := by change (single i j a).transpose.map starAddEquiv = single j i (star a) simp @[deprecated (since := "2025-05-05")] alias conjTranspose_stdBasisMatrix := conjTranspose_single section Diagonal variable [DecidableEq n] @[simp] theorem diagonal_conjTranspose [AddMonoid α] [StarAddMonoid α] (v : n → α) : (diagonal v)ᴴ = diagonal (star v) := by rw [conjTranspose, diagonal_transpose, diagonal_map (star_zero _)] rfl end Diagonal section Diag @[simp] theorem diag_conjTranspose [Star α] (A : Matrix n n α) : diag Aᴴ = star (diag A) := rfl end Diag section DotProduct variable [Fintype m] [Fintype n] section StarRing variable [NonUnitalSemiring α] [StarRing α] (v w : m → α) theorem star_dotProduct_star : star v ⬝ᵥ star w = star (w ⬝ᵥ v) := by simp [dotProduct] theorem star_dotProduct : star v ⬝ᵥ w = star (star w ⬝ᵥ v) := by simp [dotProduct] theorem dotProduct_star : v ⬝ᵥ star w = star (w ⬝ᵥ star v) := by simp [dotProduct] end StarRing end DotProduct section NonUnitalSemiring variable [NonUnitalSemiring α] theorem star_mulVec [Fintype n] [StarRing α] (M : Matrix m n α) (v : n → α) : star (M *ᵥ v) = star v ᵥ* Mᴴ := funext fun _ => (star_dotProduct_star _ _).symm theorem star_vecMul [Fintype m] [StarRing α] (M : Matrix m n α) (v : m → α) : star (v ᵥ* M) = Mᴴ *ᵥ star v := funext fun _ => (star_dotProduct_star _ _).symm theorem mulVec_conjTranspose [Fintype m] [StarRing α] (A : Matrix m n α) (x : m → α) : Aᴴ *ᵥ x = star (star x ᵥ* A) := funext fun _ => star_dotProduct _ _ theorem vecMul_conjTranspose [Fintype n] [StarRing α] (A : Matrix m n α) (x : n → α) : x ᵥ* Aᴴ = star (A *ᵥ star x) := funext fun _ => dotProduct_star _ _ end NonUnitalSemiring @[simp] theorem conjTranspose_vecMulVec [Mul α] [StarMul α] (w : m → α) (v : n → α) : (vecMulVec w v)ᴴ = vecMulVec (star v) (star w) := ext fun _ _ => star_mul _ _ section ConjTranspose open Matrix /-- Tell `simp` what the entries are in a conjugate transposed matrix. Compare with `mul_apply`, `diagonal_apply_eq`, etc. -/ @[simp] theorem conjTranspose_apply [Star α] (M : Matrix m n α) (i j) : M.conjTranspose j i = star (M i j) := rfl @[simp] theorem conjTranspose_conjTranspose [InvolutiveStar α] (M : Matrix m n α) : Mᴴᴴ = M := Matrix.ext <| by simp theorem conjTranspose_transpose [Star α] (M : Matrix m n α) : Mᴴᵀ = M.map star := rfl theorem transpose_conjTranspose [Star α] (M : Matrix m n α) : Mᵀᴴ = M.map star := rfl theorem conjTranspose_injective [InvolutiveStar α] : Function.Injective (conjTranspose : Matrix m n α → Matrix n m α) := (map_injective star_injective).comp transpose_injective @[simp] theorem conjTranspose_inj [InvolutiveStar α] {A B : Matrix m n α} : Aᴴ = Bᴴ ↔ A = B := conjTranspose_injective.eq_iff @[simp] theorem conjTranspose_eq_diagonal [DecidableEq n] [AddMonoid α] [StarAddMonoid α] {M : Matrix n n α} {v : n → α} : Mᴴ = diagonal v ↔ M = diagonal (star v) := (Function.Involutive.eq_iff conjTranspose_conjTranspose).trans <| by rw [diagonal_conjTranspose] @[simp] theorem conjTranspose_zero [AddMonoid α] [StarAddMonoid α] : (0 : Matrix m n α)ᴴ = 0 := Matrix.ext <| by simp @[simp] theorem conjTranspose_eq_zero [AddMonoid α] [StarAddMonoid α] {M : Matrix m n α} : Mᴴ = 0 ↔ M = 0 := by rw [← conjTranspose_inj (A := M), conjTranspose_zero] @[simp] theorem conjTranspose_one [DecidableEq n] [NonAssocSemiring α] [StarRing α] : (1 : Matrix n n α)ᴴ = 1 := by simp [conjTranspose] @[simp] theorem conjTranspose_eq_one [DecidableEq n] [NonAssocSemiring α] [StarRing α] {M : Matrix n n α} : Mᴴ = 1 ↔ M = 1 := (Function.Involutive.eq_iff conjTranspose_conjTranspose).trans <| by rw [conjTranspose_one] @[simp] theorem conjTranspose_natCast [DecidableEq n] [NonAssocSemiring α] [StarRing α] (d : ℕ) : (d : Matrix n n α)ᴴ = d := by simp [conjTranspose, Matrix.map_natCast, diagonal_natCast] @[simp] theorem conjTranspose_eq_natCast [DecidableEq n] [NonAssocSemiring α] [StarRing α] {M : Matrix n n α} {d : ℕ} : Mᴴ = d ↔ M = d := (Function.Involutive.eq_iff conjTranspose_conjTranspose).trans <| by rw [conjTranspose_natCast] @[simp] theorem conjTranspose_ofNat [DecidableEq n] [NonAssocSemiring α] [StarRing α] (d : ℕ) [d.AtLeastTwo] : (ofNat(d) : Matrix n n α)ᴴ = OfNat.ofNat d := conjTranspose_natCast _ @[simp] theorem conjTranspose_eq_ofNat [DecidableEq n] [Semiring α] [StarRing α] {M : Matrix n n α} {d : ℕ} [d.AtLeastTwo] : Mᴴ = ofNat(d) ↔ M = OfNat.ofNat d := conjTranspose_eq_natCast @[simp] theorem conjTranspose_intCast [DecidableEq n] [Ring α] [StarRing α] (d : ℤ) : (d : Matrix n n α)ᴴ = d := by simp [conjTranspose, Matrix.map_intCast, diagonal_intCast] @[simp] theorem conjTranspose_eq_intCast [DecidableEq n] [Ring α] [StarRing α] {M : Matrix n n α} {d : ℤ} : Mᴴ = d ↔ M = d := (Function.Involutive.eq_iff conjTranspose_conjTranspose).trans <| by rw [conjTranspose_intCast] @[simp] theorem conjTranspose_add [AddMonoid α] [StarAddMonoid α] (M N : Matrix m n α) : (M + N)ᴴ = Mᴴ + Nᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_sub [AddGroup α] [StarAddMonoid α] (M N : Matrix m n α) : (M - N)ᴴ = Mᴴ - Nᴴ := Matrix.ext <| by simp /-- Note that `StarModule` is quite a strong requirement; as such we also provide the following variants which this lemma would not apply to: * `Matrix.conjTranspose_smul_non_comm` * `Matrix.conjTranspose_nsmul` * `Matrix.conjTranspose_zsmul` * `Matrix.conjTranspose_natCast_smul` * `Matrix.conjTranspose_intCast_smul` * `Matrix.conjTranspose_inv_natCast_smul` * `Matrix.conjTranspose_inv_intCast_smul` * `Matrix.conjTranspose_ratCast_smul` -/ @[simp] theorem conjTranspose_smul [Star R] [Star α] [SMul R α] [StarModule R α] (c : R) (M : Matrix m n α) : (c • M)ᴴ = star c • Mᴴ := Matrix.ext fun _ _ => star_smul _ _ @[simp] theorem conjTranspose_smul_non_comm [Star R] [Star α] [SMul R α] [SMul Rᵐᵒᵖ α] (c : R) (M : Matrix m n α) (h : ∀ (r : R) (a : α), star (r • a) = MulOpposite.op (star r) • star a) : (c • M)ᴴ = MulOpposite.op (star c) • Mᴴ := Matrix.ext <| by simp [h] theorem conjTranspose_smul_self [Mul α] [StarMul α] (c : α) (M : Matrix m n α) : (c • M)ᴴ = MulOpposite.op (star c) • Mᴴ := conjTranspose_smul_non_comm c M star_mul @[simp] theorem conjTranspose_nsmul [AddMonoid α] [StarAddMonoid α] (c : ℕ) (M : Matrix m n α) : (c • M)ᴴ = c • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_zsmul [AddGroup α] [StarAddMonoid α] (c : ℤ) (M : Matrix m n α) : (c • M)ᴴ = c • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_natCast_smul [Semiring R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] (c : ℕ) (M : Matrix m n α) : ((c : R) • M)ᴴ = (c : R) • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_ofNat_smul [Semiring R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] (c : ℕ) [c.AtLeastTwo] (M : Matrix m n α) : ((ofNat(c) : R) • M)ᴴ = (OfNat.ofNat c : R) • Mᴴ := conjTranspose_natCast_smul c M @[simp] theorem conjTranspose_intCast_smul [Ring R] [AddCommGroup α] [StarAddMonoid α] [Module R α] (c : ℤ) (M : Matrix m n α) : ((c : R) • M)ᴴ = (c : R) • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_inv_natCast_smul [DivisionSemiring R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] (c : ℕ) (M : Matrix m n α) : ((c : R)⁻¹ • M)ᴴ = (c : R)⁻¹ • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_inv_ofNat_smul [DivisionSemiring R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] (c : ℕ) [c.AtLeastTwo] (M : Matrix m n α) : ((ofNat(c) : R)⁻¹ • M)ᴴ = (OfNat.ofNat c : R)⁻¹ • Mᴴ := conjTranspose_inv_natCast_smul c M @[simp] theorem conjTranspose_inv_intCast_smul [DivisionRing R] [AddCommGroup α] [StarAddMonoid α] [Module R α] (c : ℤ) (M : Matrix m n α) : ((c : R)⁻¹ • M)ᴴ = (c : R)⁻¹ • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_ratCast_smul [DivisionRing R] [AddCommGroup α] [StarAddMonoid α] [Module R α] (c : ℚ) (M : Matrix m n α) : ((c : R) • M)ᴴ = (c : R) • Mᴴ := Matrix.ext <| by simp theorem conjTranspose_rat_smul [AddCommGroup α] [StarAddMonoid α] [Module ℚ α] (c : ℚ) (M : Matrix m n α) : (c • M)ᴴ = c • Mᴴ := Matrix.ext <| by simp @[simp] theorem conjTranspose_mul [Fintype n] [NonUnitalNonAssocSemiring α] [StarRing α] (M : Matrix m n α) (N : Matrix n l α) : (M * N)ᴴ = Nᴴ * Mᴴ := Matrix.ext <| by simp [mul_apply] @[simp] theorem conjTranspose_neg [AddGroup α] [StarAddMonoid α] (M : Matrix m n α) : (-M)ᴴ = -Mᴴ := Matrix.ext <| by simp theorem conjTranspose_map [Star α] [Star β] {A : Matrix m n α} (f : α → β) (hf : Function.Semiconj f star star) : Aᴴ.map f = (A.map f)ᴴ := Matrix.ext fun _ _ => hf _ /-- When `star x = x` on the coefficients (such as the real numbers) `conjTranspose` and `transpose` are the same operation. -/ @[simp] theorem conjTranspose_eq_transpose_of_trivial [Star α] [TrivialStar α] (A : Matrix m n α) : Aᴴ = Aᵀ := Matrix.ext fun _ _ => star_trivial _ variable (m n α) /-- `Matrix.conjTranspose` as an `AddEquiv` -/ @[simps apply] def conjTransposeAddEquiv [AddMonoid α] [StarAddMonoid α] : Matrix m n α ≃+ Matrix n m α where toFun := conjTranspose invFun := conjTranspose left_inv := conjTranspose_conjTranspose right_inv := conjTranspose_conjTranspose map_add' := conjTranspose_add @[simp] theorem conjTransposeAddEquiv_symm [AddMonoid α] [StarAddMonoid α] : (conjTransposeAddEquiv m n α).symm = conjTransposeAddEquiv n m α := rfl variable {m n α} theorem conjTranspose_list_sum [AddMonoid α] [StarAddMonoid α] (l : List (Matrix m n α)) : l.sumᴴ = (l.map conjTranspose).sum := map_list_sum (conjTransposeAddEquiv m n α) l theorem conjTranspose_multiset_sum [AddCommMonoid α] [StarAddMonoid α] (s : Multiset (Matrix m n α)) : s.sumᴴ = (s.map conjTranspose).sum := (conjTransposeAddEquiv m n α).toAddMonoidHom.map_multiset_sum s theorem conjTranspose_sum [AddCommMonoid α] [StarAddMonoid α] {ι : Type*} (s : Finset ι) (M : ι → Matrix m n α) : (∑ i ∈ s, M i)ᴴ = ∑ i ∈ s, (M i)ᴴ := map_sum (conjTransposeAddEquiv m n α) _ s variable (m n R α) /-- `Matrix.conjTranspose` as a `LinearMap` -/ @[simps apply] def conjTransposeLinearEquiv [CommSemiring R] [StarRing R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] [StarModule R α] : Matrix m n α ≃ₗ⋆[R] Matrix n m α := { conjTransposeAddEquiv m n α with map_smul' := conjTranspose_smul } @[simp] theorem conjTransposeLinearEquiv_symm [CommSemiring R] [StarRing R] [AddCommMonoid α] [StarAddMonoid α] [Module R α] [StarModule R α] : (conjTransposeLinearEquiv m n R α).symm = conjTransposeLinearEquiv n m R α := rfl variable {m n R α} variable (m α) /-- `Matrix.conjTranspose` as a `RingEquiv` to the opposite ring -/ @[simps] def conjTransposeRingEquiv [Semiring α] [StarRing α] [Fintype m] : Matrix m m α ≃+* (Matrix m m α)ᵐᵒᵖ := { (conjTransposeAddEquiv m m α).trans MulOpposite.opAddEquiv with toFun := fun M => MulOpposite.op Mᴴ invFun := fun M => M.unopᴴ map_mul' := fun M N => (congr_arg MulOpposite.op (conjTranspose_mul M N)).trans (MulOpposite.op_mul _ _) } variable {m α} @[simp] theorem conjTranspose_pow [Semiring α] [StarRing α] [Fintype m] [DecidableEq m] (M : Matrix m m α) (k : ℕ) : (M ^ k)ᴴ = Mᴴ ^ k := MulOpposite.op_injective <| map_pow (conjTransposeRingEquiv m α) M k theorem conjTranspose_list_prod [Semiring α] [StarRing α] [Fintype m] [DecidableEq m] (l : List (Matrix m m α)) : l.prodᴴ = (l.map conjTranspose).reverse.prod := (conjTransposeRingEquiv m α).unop_map_list_prod l end ConjTranspose section Star /-- When `α` has a star operation, square matrices `Matrix n n α` have a star operation equal to `Matrix.conjTranspose`. -/ instance [Star α] : Star (Matrix n n α) where star := conjTranspose theorem star_eq_conjTranspose [Star α] (M : Matrix m m α) : star M = Mᴴ := rfl @[simp] theorem star_apply [Star α] (M : Matrix n n α) (i j) : (star M) i j = star (M j i) := rfl instance [InvolutiveStar α] : InvolutiveStar (Matrix n n α) where star_involutive := conjTranspose_conjTranspose /-- When `α` is a `*`-additive monoid, `Matrix.star` is also a `*`-additive monoid. -/ instance [AddMonoid α] [StarAddMonoid α] : StarAddMonoid (Matrix n n α) where star_add := conjTranspose_add instance [Star α] [Star β] [SMul α β] [StarModule α β] : StarModule α (Matrix n n β) where star_smul := conjTranspose_smul /-- When `α` is a `*`-(semi)ring, `Matrix.star` is also a `*`-(semi)ring. -/ instance [Fintype n] [NonUnitalSemiring α] [StarRing α] : StarRing (Matrix n n α) where star_add := conjTranspose_add star_mul := conjTranspose_mul /-- A version of `star_mul` for `*` instead of `*`. -/ theorem star_mul [Fintype n] [NonUnitalNonAssocSemiring α] [StarRing α] (M N : Matrix n n α) : star (M * N) = star N * star M := conjTranspose_mul _ _ end Star @[simp] theorem conjTranspose_submatrix [Star α] (A : Matrix m n α) (r : l → m) (c : o → n) : (A.submatrix r c)ᴴ = Aᴴ.submatrix c r := ext fun _ _ => rfl theorem conjTranspose_reindex [Star α] (eₘ : m ≃ l) (eₙ : n ≃ o) (M : Matrix m n α) : (reindex eₘ eₙ M)ᴴ = reindex eₙ eₘ Mᴴ := rfl end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/NonsingularInverse.lean
import Mathlib.Data.Matrix.Invertible import Mathlib.LinearAlgebra.FiniteDimensional.Basic import Mathlib.LinearAlgebra.Matrix.Adjugate import Mathlib.LinearAlgebra.Matrix.Kronecker import Mathlib.LinearAlgebra.Matrix.SemiringInverse import Mathlib.LinearAlgebra.Matrix.ToLin import Mathlib.LinearAlgebra.Matrix.Trace /-! # Nonsingular inverses In this file, we define an inverse for square matrices of invertible determinant. For matrices that are not square or not of full rank, there is a more general notion of pseudoinverses which we do not consider here. The definition of inverse used in this file is the adjugate divided by the determinant. We show that dividing the adjugate by `det A` (if possible), giving a matrix `A⁻¹` (`nonsing_inv`), will result in a multiplicative inverse to `A`. Note that there are at least three different inverses in mathlib: * `A⁻¹` (`Inv.inv`): alone, this satisfies no properties, although it is usually used in conjunction with `Group` or `GroupWithZero`. On matrices, this is defined to be zero when no inverse exists. * `⅟A` (`invOf`): this is only available in the presence of `[Invertible A]`, which guarantees an inverse exists. * `Ring.inverse A`: this is defined on any `MonoidWithZero`, and just like `⁻¹` on matrices, is defined to be zero when no inverse exists. We start by working with `Invertible`, and show the main results: * `Matrix.invertibleOfDetInvertible` * `Matrix.detInvertibleOfInvertible` * `Matrix.isUnit_iff_isUnit_det` * `Matrix.mul_eq_one_comm` After this we define `Matrix.inv` and show it matches `⅟A` and `Ring.inverse A`. The rest of the results in the file are then about `A⁻¹` ## References * https://en.wikipedia.org/wiki/Cramer's_rule#Finding_inverse_matrix ## Tags matrix inverse, cramer, cramer's rule, adjugate -/ namespace Matrix universe u u' v variable {l : Type*} {m : Type u} {n : Type u'} {α : Type v} open Matrix Equiv Equiv.Perm Finset /-! ### Matrices are `Invertible` iff their determinants are -/ section Invertible variable [Fintype n] [DecidableEq n] [CommRing α] variable (A : Matrix n n α) (B : Matrix n n α) /-- If `A.det` has a constructive inverse, produce one for `A`. -/ def invertibleOfDetInvertible [Invertible A.det] : Invertible A where invOf := ⅟A.det • A.adjugate mul_invOf_self := by rw [mul_smul_comm, mul_adjugate, smul_smul, invOf_mul_self, one_smul] invOf_mul_self := by rw [smul_mul_assoc, adjugate_mul, smul_smul, invOf_mul_self, one_smul] theorem invOf_eq [Invertible A.det] [Invertible A] : ⅟A = ⅟A.det • A.adjugate := by letI := invertibleOfDetInvertible A convert (rfl : ⅟A = _) /-- `A.det` is invertible if `A` has a left inverse. -/ def detInvertibleOfLeftInverse (h : B * A = 1) : Invertible A.det where invOf := B.det mul_invOf_self := by rw [mul_comm, ← det_mul, h, det_one] invOf_mul_self := by rw [← det_mul, h, det_one] /-- `A.det` is invertible if `A` has a right inverse. -/ def detInvertibleOfRightInverse (h : A * B = 1) : Invertible A.det where invOf := B.det mul_invOf_self := by rw [← det_mul, h, det_one] invOf_mul_self := by rw [mul_comm, ← det_mul, h, det_one] /-- If `A` has a constructive inverse, produce one for `A.det`. -/ def detInvertibleOfInvertible [Invertible A] : Invertible A.det := detInvertibleOfLeftInverse A (⅟A) (invOf_mul_self _) theorem det_invOf [Invertible A] [Invertible A.det] : (⅟A).det = ⅟A.det := by letI := detInvertibleOfInvertible A convert (rfl : _ = ⅟A.det) /-- Together `Matrix.detInvertibleOfInvertible` and `Matrix.invertibleOfDetInvertible` form an equivalence, although both sides of the equiv are subsingleton anyway. -/ @[simps] def invertibleEquivDetInvertible : Invertible A ≃ Invertible A.det where toFun := @detInvertibleOfInvertible _ _ _ _ _ A invFun := @invertibleOfDetInvertible _ _ _ _ _ A left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ /-- Given a proof that `A.det` has a constructive inverse, lift `A` to `(Matrix n n α)ˣ` -/ def unitOfDetInvertible [Invertible A.det] : (Matrix n n α)ˣ := @unitOfInvertible _ _ A (invertibleOfDetInvertible A) /-- When lowered to a prop, `Matrix.invertibleEquivDetInvertible` forms an `iff`. -/ theorem isUnit_iff_isUnit_det : IsUnit A ↔ IsUnit A.det := by simp only [← nonempty_invertible_iff_isUnit, (invertibleEquivDetInvertible A).nonempty_congr] @[simp] theorem isUnits_det_units (A : (Matrix n n α)ˣ) : IsUnit (A : Matrix n n α).det := isUnit_iff_isUnit_det _ |>.mp A.isUnit /-! #### Variants of the statements above with `IsUnit` -/ theorem isUnit_det_of_invertible [Invertible A] : IsUnit A.det := @isUnit_of_invertible _ _ _ (detInvertibleOfInvertible A) variable {A B} theorem isUnit_det_of_left_inverse (h : B * A = 1) : IsUnit A.det := @isUnit_of_invertible _ _ _ (detInvertibleOfLeftInverse _ _ h) theorem isUnit_det_of_right_inverse (h : A * B = 1) : IsUnit A.det := @isUnit_of_invertible _ _ _ (detInvertibleOfRightInverse _ _ h) theorem det_ne_zero_of_left_inverse [Nontrivial α] (h : B * A = 1) : A.det ≠ 0 := (isUnit_det_of_left_inverse h).ne_zero theorem det_ne_zero_of_right_inverse [Nontrivial α] (h : A * B = 1) : A.det ≠ 0 := (isUnit_det_of_right_inverse h).ne_zero end Invertible section Inv variable [Fintype n] [DecidableEq n] [CommRing α] variable (A : Matrix n n α) (B : Matrix n n α) theorem isUnit_det_transpose (h : IsUnit A.det) : IsUnit Aᵀ.det := by rw [det_transpose] exact h /-! ### A noncomputable `Inv` instance -/ /-- The inverse of a square matrix, when it is invertible (and zero otherwise). -/ noncomputable instance inv : Inv (Matrix n n α) := ⟨fun A => Ring.inverse A.det • A.adjugate⟩ theorem inv_def (A : Matrix n n α) : A⁻¹ = Ring.inverse A.det • A.adjugate := rfl theorem nonsing_inv_apply_not_isUnit (h : ¬IsUnit A.det) : A⁻¹ = 0 := by rw [inv_def, Ring.inverse_non_unit _ h, zero_smul] theorem nonsing_inv_apply (h : IsUnit A.det) : A⁻¹ = (↑h.unit⁻¹ : α) • A.adjugate := by rw [inv_def, ← Ring.inverse_unit h.unit, IsUnit.unit_spec] /-- The nonsingular inverse is the same as `invOf` when `A` is invertible. -/ @[simp] theorem invOf_eq_nonsing_inv [Invertible A] : ⅟A = A⁻¹ := by letI := detInvertibleOfInvertible A rw [inv_def, Ring.inverse_invertible, invOf_eq] /-- Coercing the result of `Units.instInv` is the same as coercing first and applying the nonsingular inverse. -/ @[simp, norm_cast] theorem coe_units_inv (A : (Matrix n n α)ˣ) : ↑A⁻¹ = (A⁻¹ : Matrix n n α) := by letI := A.invertible rw [← invOf_eq_nonsing_inv, invOf_units] /-- The nonsingular inverse is the same as the general `Ring.inverse`. -/ theorem nonsing_inv_eq_ringInverse : A⁻¹ = Ring.inverse A := by by_cases h_det : IsUnit A.det · cases (A.isUnit_iff_isUnit_det.mpr h_det).nonempty_invertible rw [← invOf_eq_nonsing_inv, Ring.inverse_invertible] · have h := mt A.isUnit_iff_isUnit_det.mp h_det rw [Ring.inverse_non_unit _ h, nonsing_inv_apply_not_isUnit A h_det] @[deprecated (since := "2025-04-22")] alias nonsing_inv_eq_ring_inverse := nonsing_inv_eq_ringInverse theorem transpose_nonsing_inv : A⁻¹ᵀ = Aᵀ⁻¹ := by rw [inv_def, inv_def, transpose_smul, det_transpose, adjugate_transpose] theorem conjTranspose_nonsing_inv [StarRing α] : A⁻¹ᴴ = Aᴴ⁻¹ := by rw [inv_def, inv_def, conjTranspose_smul, det_conjTranspose, adjugate_conjTranspose, Ring.inverse_star] /-- The `nonsing_inv` of `A` is a right inverse. -/ @[simp] theorem mul_nonsing_inv (h : IsUnit A.det) : A * A⁻¹ = 1 := by cases (A.isUnit_iff_isUnit_det.mpr h).nonempty_invertible rw [← invOf_eq_nonsing_inv, mul_invOf_self] /-- The nonsingular inverse of `A` is a left inverse. -/ @[simp] theorem nonsing_inv_mul (h : IsUnit A.det) : A⁻¹ * A = 1 := by cases (A.isUnit_iff_isUnit_det.mpr h).nonempty_invertible rw [← invOf_eq_nonsing_inv, invOf_mul_self] instance [Invertible A] : Invertible A⁻¹ := by rw [← invOf_eq_nonsing_inv] infer_instance @[simp] theorem inv_inv_of_invertible [Invertible A] : A⁻¹⁻¹ = A := by simp only [← invOf_eq_nonsing_inv, invOf_invOf] @[simp] theorem mul_nonsing_inv_cancel_right (B : Matrix m n α) (h : IsUnit A.det) : B * A * A⁻¹ = B := by simp [Matrix.mul_assoc, mul_nonsing_inv A h] @[simp] theorem mul_nonsing_inv_cancel_left (B : Matrix n m α) (h : IsUnit A.det) : A * (A⁻¹ * B) = B := by simp [← Matrix.mul_assoc, mul_nonsing_inv A h] @[simp] theorem nonsing_inv_mul_cancel_right (B : Matrix m n α) (h : IsUnit A.det) : B * A⁻¹ * A = B := by simp [Matrix.mul_assoc, nonsing_inv_mul A h] @[simp] theorem nonsing_inv_mul_cancel_left (B : Matrix n m α) (h : IsUnit A.det) : A⁻¹ * (A * B) = B := by simp [← Matrix.mul_assoc, nonsing_inv_mul A h] @[simp] theorem mul_inv_of_invertible [Invertible A] : A * A⁻¹ = 1 := mul_nonsing_inv A (isUnit_det_of_invertible A) @[simp] theorem inv_mul_of_invertible [Invertible A] : A⁻¹ * A = 1 := nonsing_inv_mul A (isUnit_det_of_invertible A) @[simp] theorem mul_inv_cancel_right_of_invertible (B : Matrix m n α) [Invertible A] : B * A * A⁻¹ = B := mul_nonsing_inv_cancel_right A B (isUnit_det_of_invertible A) @[simp] theorem mul_inv_cancel_left_of_invertible (B : Matrix n m α) [Invertible A] : A * (A⁻¹ * B) = B := mul_nonsing_inv_cancel_left A B (isUnit_det_of_invertible A) @[simp] theorem inv_mul_cancel_right_of_invertible (B : Matrix m n α) [Invertible A] : B * A⁻¹ * A = B := nonsing_inv_mul_cancel_right A B (isUnit_det_of_invertible A) @[simp] theorem inv_mul_cancel_left_of_invertible (B : Matrix n m α) [Invertible A] : A⁻¹ * (A * B) = B := nonsing_inv_mul_cancel_left A B (isUnit_det_of_invertible A) theorem inv_mul_eq_iff_eq_mul_of_invertible (A : Matrix n n α) [Invertible A] (B C : Matrix n m α) : A⁻¹ * B = C ↔ B = A * C := ⟨fun h => by rw [← h, mul_inv_cancel_left_of_invertible], fun h => by rw [h, inv_mul_cancel_left_of_invertible]⟩ theorem mul_inv_eq_iff_eq_mul_of_invertible (A : Matrix n n α) [Invertible A] (B C : Matrix m n α) : B * A⁻¹ = C ↔ B = C * A := ⟨fun h => by rw [← h, inv_mul_cancel_right_of_invertible], fun h => by rw [h, mul_inv_cancel_right_of_invertible]⟩ lemma inv_mulVec_eq_vec {A : Matrix n n α} [Invertible A] {u v : n → α} (hM : u = A.mulVec v) : A⁻¹.mulVec u = v := by rw [hM, Matrix.mulVec_mulVec, Matrix.inv_mul_of_invertible, Matrix.one_mulVec] lemma mul_right_injective_of_invertible [Invertible A] : Function.Injective (fun (x : Matrix n m α) => A * x) := fun _ _ h => by simpa only [inv_mul_cancel_left_of_invertible] using congr_arg (A⁻¹ * ·) h lemma mul_left_injective_of_invertible [Invertible A] : Function.Injective (fun (x : Matrix m n α) => x * A) := fun a x hax => by simpa only [mul_inv_cancel_right_of_invertible] using congr_arg (· * A⁻¹) hax lemma mul_right_inj_of_invertible [Invertible A] {x y : Matrix n m α} : A * x = A * y ↔ x = y := (mul_right_injective_of_invertible A).eq_iff lemma mul_left_inj_of_invertible [Invertible A] {x y : Matrix m n α} : x * A = y * A ↔ x = y := (mul_left_injective_of_invertible A).eq_iff end Inv section InjectiveMul variable [Fintype n] [Fintype m] [DecidableEq m] [CommRing α] lemma mul_left_injective_of_inv (A : Matrix m n α) (B : Matrix n m α) (h : A * B = 1) : Function.Injective (fun x : Matrix l m α => x * A) := fun _ _ g => by simpa only [Matrix.mul_assoc, Matrix.mul_one, h] using congr_arg (· * B) g lemma mul_right_injective_of_inv (A : Matrix m n α) (B : Matrix n m α) (h : A * B = 1) : Function.Injective (fun x : Matrix m l α => B * x) := fun _ _ g => by simpa only [← Matrix.mul_assoc, Matrix.one_mul, h] using congr_arg (A * ·) g end InjectiveMul section vecMul section Semiring variable {R : Type*} [Semiring R] theorem vecMul_surjective_iff_exists_left_inverse [DecidableEq n] [Fintype m] [Finite n] {A : Matrix m n R} : Function.Surjective A.vecMul ↔ ∃ B : Matrix n m R, B * A = 1 := by cases nonempty_fintype n refine ⟨fun h ↦ ?_, fun ⟨B, hBA⟩ y ↦ ⟨y ᵥ* B, by simp [hBA]⟩⟩ choose rows hrows using (h <| Pi.single · 1) refine ⟨Matrix.of rows, Matrix.ext fun i j => ?_⟩ rw [mul_apply_eq_vecMul, one_eq_pi_single, ← hrows] rfl theorem mulVec_surjective_iff_exists_right_inverse [DecidableEq m] [Finite m] [Fintype n] {A : Matrix m n R} : Function.Surjective A.mulVec ↔ ∃ B : Matrix n m R, A * B = 1 := by cases nonempty_fintype m refine ⟨fun h ↦ ?_, fun ⟨B, hBA⟩ y ↦ ⟨B *ᵥ y, by simp [hBA]⟩⟩ choose cols hcols using (h <| Pi.single · 1) refine ⟨(Matrix.of cols)ᵀ, Matrix.ext fun i j ↦ ?_⟩ rw [one_eq_pi_single, Pi.single_comm, ← hcols j] rfl end Semiring variable [DecidableEq m] {R K : Type*} [CommRing R] [Field K] [Fintype m] theorem vecMul_surjective_iff_isUnit {A : Matrix m m R} : Function.Surjective A.vecMul ↔ IsUnit A := by rw [vecMul_surjective_iff_exists_left_inverse, exists_left_inverse_iff_isUnit] theorem mulVec_surjective_iff_isUnit {A : Matrix m m R} : Function.Surjective A.mulVec ↔ IsUnit A := by rw [mulVec_surjective_iff_exists_right_inverse, exists_right_inverse_iff_isUnit] theorem vecMul_injective_iff_isUnit {A : Matrix m m K} : Function.Injective A.vecMul ↔ IsUnit A := by refine ⟨fun h ↦ ?_, fun h ↦ ?_⟩ · rw [← vecMul_surjective_iff_isUnit] exact LinearMap.surjective_of_injective (f := A.vecMulLinear) h change Function.Injective A.vecMulLinear rw [← LinearMap.ker_eq_bot, LinearMap.ker_eq_bot'] intro c hc replace h := h.invertible simpa using congr_arg A⁻¹.vecMulLinear hc theorem mulVec_injective_iff_isUnit {A : Matrix m m K} : Function.Injective A.mulVec ↔ IsUnit A := by rw [← isUnit_transpose, ← vecMul_injective_iff_isUnit] simp_rw [vecMul_transpose] theorem linearIndependent_rows_iff_isUnit {A : Matrix m m K} : LinearIndependent K A.row ↔ IsUnit A := by rw [← col_transpose, ← mulVec_injective_iff, ← coe_mulVecLin, mulVecLin_transpose, ← vecMul_injective_iff_isUnit, coe_vecMulLinear] theorem linearIndependent_cols_iff_isUnit {A : Matrix m m K} : LinearIndependent K A.col ↔ IsUnit A := by rw [← row_transpose, linearIndependent_rows_iff_isUnit, isUnit_transpose] theorem vecMul_surjective_of_invertible (A : Matrix m m R) [Invertible A] : Function.Surjective A.vecMul := vecMul_surjective_iff_isUnit.2 <| isUnit_of_invertible A theorem mulVec_surjective_of_invertible (A : Matrix m m R) [Invertible A] : Function.Surjective A.mulVec := mulVec_surjective_iff_isUnit.2 <| isUnit_of_invertible A theorem vecMul_injective_of_invertible (A : Matrix m m K) [Invertible A] : Function.Injective A.vecMul := vecMul_injective_iff_isUnit.2 <| isUnit_of_invertible A theorem mulVec_injective_of_invertible (A : Matrix m m K) [Invertible A] : Function.Injective A.mulVec := mulVec_injective_iff_isUnit.2 <| isUnit_of_invertible A theorem linearIndependent_rows_of_invertible (A : Matrix m m K) [Invertible A] : LinearIndependent K A.row := linearIndependent_rows_iff_isUnit.2 <| isUnit_of_invertible A theorem linearIndependent_cols_of_invertible (A : Matrix m m K) [Invertible A] : LinearIndependent K A.col := linearIndependent_cols_iff_isUnit.2 <| isUnit_of_invertible A end vecMul variable [Fintype n] [DecidableEq n] [CommRing α] variable (A : Matrix n n α) (B : Matrix n n α) theorem nonsing_inv_cancel_or_zero : A⁻¹ * A = 1 ∧ A * A⁻¹ = 1 ∨ A⁻¹ = 0 := by by_cases h : IsUnit A.det · exact Or.inl ⟨nonsing_inv_mul _ h, mul_nonsing_inv _ h⟩ · exact Or.inr (nonsing_inv_apply_not_isUnit _ h) theorem det_nonsing_inv_mul_det (h : IsUnit A.det) : A⁻¹.det * A.det = 1 := by rw [← det_mul, A.nonsing_inv_mul h, det_one] @[simp] theorem det_nonsing_inv : A⁻¹.det = Ring.inverse A.det := by by_cases h : IsUnit A.det · cases h.nonempty_invertible letI := invertibleOfDetInvertible A rw [Ring.inverse_invertible, ← invOf_eq_nonsing_inv, det_invOf] cases isEmpty_or_nonempty n · rw [det_isEmpty, det_isEmpty, Ring.inverse_one] · rw [Ring.inverse_non_unit _ h, nonsing_inv_apply_not_isUnit _ h, det_zero ‹_›] theorem isUnit_nonsing_inv_det (h : IsUnit A.det) : IsUnit A⁻¹.det := .of_mul_eq_one _ (A.det_nonsing_inv_mul_det h) @[simp] theorem nonsing_inv_nonsing_inv (h : IsUnit A.det) : A⁻¹⁻¹ = A := calc A⁻¹⁻¹ = 1 * A⁻¹⁻¹ := by rw [Matrix.one_mul] _ = A * A⁻¹ * A⁻¹⁻¹ := by rw [A.mul_nonsing_inv h] _ = A := by rw [Matrix.mul_assoc, A⁻¹.mul_nonsing_inv (A.isUnit_nonsing_inv_det h), Matrix.mul_one] theorem isUnit_nonsing_inv_det_iff {A : Matrix n n α} : IsUnit A⁻¹.det ↔ IsUnit A.det := by rw [Matrix.det_nonsing_inv, isUnit_ringInverse] @[simp] theorem isUnit_nonsing_inv_iff {A : Matrix n n α} : IsUnit A⁻¹ ↔ IsUnit A := by simp_rw [isUnit_iff_isUnit_det, isUnit_nonsing_inv_det_iff] -- `IsUnit.invertible` lifts the proposition `IsUnit A` to a constructive inverse of `A`. /-- A version of `Matrix.invertibleOfDetInvertible` with the inverse defeq to `A⁻¹` that is therefore noncomputable. -/ noncomputable def invertibleOfIsUnitDet (h : IsUnit A.det) : Invertible A := ⟨A⁻¹, nonsing_inv_mul A h, mul_nonsing_inv A h⟩ /-- A version of `Matrix.unitOfDetInvertible` with the inverse defeq to `A⁻¹` that is therefore noncomputable. -/ noncomputable def nonsingInvUnit (h : IsUnit A.det) : (Matrix n n α)ˣ := @unitOfInvertible _ _ _ (invertibleOfIsUnitDet A h) theorem unitOfDetInvertible_eq_nonsingInvUnit [Invertible A.det] : unitOfDetInvertible A = nonsingInvUnit A (isUnit_of_invertible _) := by ext rfl variable {A} {B} /-- If matrix A is left invertible, then its inverse equals its left inverse. -/ theorem inv_eq_left_inv (h : B * A = 1) : A⁻¹ = B := letI := invertibleOfLeftInverse _ _ h invOf_eq_nonsing_inv A ▸ invOf_eq_left_inv h /-- If matrix A is right invertible, then its inverse equals its right inverse. -/ theorem inv_eq_right_inv (h : A * B = 1) : A⁻¹ = B := inv_eq_left_inv (mul_eq_one_comm.2 h) section InvEqInv variable {C : Matrix n n α} /-- The left inverse of matrix A is unique when existing. -/ theorem left_inv_eq_left_inv (h : B * A = 1) (g : C * A = 1) : B = C := by rw [← inv_eq_left_inv h, ← inv_eq_left_inv g] /-- The right inverse of matrix A is unique when existing. -/ theorem right_inv_eq_right_inv (h : A * B = 1) (g : A * C = 1) : B = C := by rw [← inv_eq_right_inv h, ← inv_eq_right_inv g] /-- The right inverse of matrix A equals the left inverse of A when they exist. -/ theorem right_inv_eq_left_inv (h : A * B = 1) (g : C * A = 1) : B = C := by rw [← inv_eq_right_inv h, ← inv_eq_left_inv g] theorem inv_inj (h : A⁻¹ = B⁻¹) (h' : IsUnit A.det) : A = B := by refine left_inv_eq_left_inv (mul_nonsing_inv _ h') ?_ rw [h] refine mul_nonsing_inv _ ?_ rwa [← isUnit_nonsing_inv_det_iff, ← h, isUnit_nonsing_inv_det_iff] end InvEqInv variable (A) @[simp] theorem inv_zero : (0 : Matrix n n α)⁻¹ = 0 := by rcases subsingleton_or_nontrivial α with ht | ht · simp [eq_iff_true_of_subsingleton] rcases (Fintype.card n).zero_le.eq_or_lt with hc | hc · rw [eq_comm, Fintype.card_eq_zero_iff] at hc subsingleton · have hn : Nonempty n := Fintype.card_pos_iff.mp hc refine nonsing_inv_apply_not_isUnit _ ?_ simp noncomputable instance : InvOneClass (Matrix n n α) := { Matrix.one, Matrix.inv with inv_one := inv_eq_left_inv (by simp) } theorem inv_smul (k : α) [Invertible k] (h : IsUnit A.det) : (k • A)⁻¹ = ⅟k • A⁻¹ := inv_eq_left_inv (by simp [h, smul_smul]) theorem inv_smul' (k : αˣ) (h : IsUnit A.det) : (k • A)⁻¹ = k⁻¹ • A⁻¹ := inv_eq_left_inv (by simp [h, smul_smul]) theorem inv_adjugate (A : Matrix n n α) (h : IsUnit A.det) : (adjugate A)⁻¹ = h.unit⁻¹ • A := by refine inv_eq_left_inv ?_ rw [smul_mul, mul_adjugate, Units.smul_def, smul_smul, h.val_inv_mul, one_smul] section Diagonal /-- `diagonal v` is invertible if `v` is -/ def diagonalInvertible {α} [NonAssocSemiring α] (v : n → α) [Invertible v] : Invertible (diagonal v) := Invertible.map (diagonalRingHom n α) v theorem invOf_diagonal_eq {α} [Semiring α] (v : n → α) [Invertible v] [Invertible (diagonal v)] : ⅟(diagonal v) = diagonal (⅟v) := by rw [@Invertible.congr _ _ _ _ _ (diagonalInvertible v) rfl] rfl /-- `v` is invertible if `diagonal v` is -/ def invertibleOfDiagonalInvertible (v : n → α) [Invertible (diagonal v)] : Invertible v where invOf := diag (⅟(diagonal v)) invOf_mul_self := funext fun i => by letI : Invertible (diagonal v).det := detInvertibleOfInvertible _ rw [invOf_eq, diag_smul, adjugate_diagonal, diag_diagonal] dsimp rw [mul_assoc, prod_erase_mul _ _ (Finset.mem_univ _), ← det_diagonal] exact mul_invOf_self _ mul_invOf_self := funext fun i => by letI : Invertible (diagonal v).det := detInvertibleOfInvertible _ rw [invOf_eq, diag_smul, adjugate_diagonal, diag_diagonal] dsimp rw [mul_left_comm, mul_prod_erase _ _ (Finset.mem_univ _), ← det_diagonal] exact mul_invOf_self _ /-- Together `Matrix.diagonalInvertible` and `Matrix.invertibleOfDiagonalInvertible` form an equivalence, although both sides of the equiv are subsingleton anyway. -/ @[simps] def diagonalInvertibleEquivInvertible (v : n → α) : Invertible (diagonal v) ≃ Invertible v where toFun := @invertibleOfDiagonalInvertible _ _ _ _ _ _ invFun := @diagonalInvertible _ _ _ _ _ _ left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ /-- When lowered to a prop, `Matrix.diagonalInvertibleEquivInvertible` forms an `iff`. -/ @[simp] theorem isUnit_diagonal {v : n → α} : IsUnit (diagonal v) ↔ IsUnit v := by simp only [← nonempty_invertible_iff_isUnit, (diagonalInvertibleEquivInvertible v).nonempty_congr] theorem inv_diagonal (v : n → α) : (diagonal v)⁻¹ = diagonal (Ring.inverse v) := by rw [nonsing_inv_eq_ringInverse] by_cases h : IsUnit v · have := isUnit_diagonal.mpr h cases this.nonempty_invertible cases h.nonempty_invertible rw [Ring.inverse_invertible, Ring.inverse_invertible, invOf_diagonal_eq] · have := isUnit_diagonal.not.mpr h rw [Ring.inverse_non_unit _ h, Pi.zero_def, diagonal_zero, Ring.inverse_non_unit _ this] end Diagonal /-- The inverse of a 1×1 or 0×0 matrix is always diagonal. While we could write this as `of fun _ _ => Ring.inverse (A default default)` on the RHS, this is less useful because: * It wouldn't work for 0×0 matrices. * More things are true about diagonal matrices than constant matrices, and so more lemmas exist. `Matrix.diagonal_unique` can be used to reach this form, while `Ring.inverse_eq_inv` can be used to replace `Ring.inverse` with `⁻¹`. -/ @[simp] theorem inv_subsingleton [Subsingleton m] [Fintype m] [DecidableEq m] (A : Matrix m m α) : A⁻¹ = diagonal fun i => Ring.inverse (A i i) := by rw [inv_def, adjugate_subsingleton, smul_one_eq_diagonal] congr! with i exact det_eq_elem_of_subsingleton _ _ section Woodbury variable [Fintype m] [DecidableEq m] variable (A : Matrix n n α) (U : Matrix n m α) (C : Matrix m m α) (V : Matrix m n α) /-- The **Woodbury Identity** (`⁻¹` version). See ``add_mul_mul_inv_eq_sub'` for the binomial inverse theorem. -/ theorem add_mul_mul_inv_eq_sub (hA : IsUnit A) (hC : IsUnit C) (hAC : IsUnit (C⁻¹ + V * A⁻¹ * U)) : (A + U * C * V)⁻¹ = A⁻¹ - A⁻¹ * U * (C⁻¹ + V * A⁻¹ * U)⁻¹ * V * A⁻¹ := by obtain ⟨_⟩ := hA.nonempty_invertible obtain ⟨_⟩ := hC.nonempty_invertible obtain ⟨iAC⟩ := hAC.nonempty_invertible simp only [← invOf_eq_nonsing_inv] at iAC letI := invertibleAddMulMul A U C V simp only [← invOf_eq_nonsing_inv] apply invOf_add_mul_mul /-- The **binomial inverse theorem** (variant of the Woodbury identity). -/ theorem add_mul_mul_inv_eq_sub' (hA : IsUnit A) (h : IsUnit (C + C * V * A⁻¹ * U * C)) : (A + U * C * V)⁻¹ = A⁻¹ - A⁻¹ * U * C * (C + C * V * A⁻¹ * U * C)⁻¹ * C * V * A⁻¹ := by obtain ⟨_⟩ := hA.nonempty_invertible obtain ⟨ih⟩ := h.nonempty_invertible simp only [← invOf_eq_nonsing_inv] at ih letI := invertibleAddMulMul' A U C V simp only [← invOf_eq_nonsing_inv] apply invOf_add_mul_mul' end Woodbury @[simp] theorem inv_inv_inv (A : Matrix n n α) : A⁻¹⁻¹⁻¹ = A⁻¹ := by by_cases h : IsUnit A.det · rw [nonsing_inv_nonsing_inv _ h] · simp [nonsing_inv_apply_not_isUnit _ h] /-- The `Matrix` version of `inv_add_inv'` -/ theorem inv_add_inv {A B : Matrix n n α} (h : IsUnit A ↔ IsUnit B) : A⁻¹ + B⁻¹ = A⁻¹ * (A + B) * B⁻¹ := by simpa only [nonsing_inv_eq_ringInverse] using Ring.inverse_add_inverse h /-- The `Matrix` version of `inv_sub_inv'` -/ theorem inv_sub_inv {A B : Matrix n n α} (h : IsUnit A ↔ IsUnit B) : A⁻¹ - B⁻¹ = A⁻¹ * (B - A) * B⁻¹ := by simpa only [nonsing_inv_eq_ringInverse] using Ring.inverse_sub_inverse h theorem mul_inv_rev (A B : Matrix n n α) : (A * B)⁻¹ = B⁻¹ * A⁻¹ := by simp only [inv_def] rw [Matrix.smul_mul, Matrix.mul_smul, smul_smul, det_mul, adjugate_mul_distrib, Ring.mul_inverse_rev] /-- A version of `List.prod_inv_reverse` for `Matrix.inv`. -/ theorem list_prod_inv_reverse : ∀ l : List (Matrix n n α), l.prod⁻¹ = (l.reverse.map Inv.inv).prod | [] => by rw [List.reverse_nil, List.map_nil, List.prod_nil, inv_one] | A::Xs => by rw [List.reverse_cons', List.map_concat, List.prod_concat, List.prod_cons, mul_inv_rev, list_prod_inv_reverse Xs] /-- One form of **Cramer's rule**. See `Matrix.mulVec_cramer` for a stronger form. -/ @[simp] theorem det_smul_inv_mulVec_eq_cramer (A : Matrix n n α) (b : n → α) (h : IsUnit A.det) : A.det • A⁻¹ *ᵥ b = cramer A b := by rw [cramer_eq_adjugate_mulVec, A.nonsing_inv_apply h, ← smul_mulVec, smul_smul, h.mul_val_inv, one_smul] /-- One form of **Cramer's rule**. See `Matrix.mulVec_cramer` for a stronger form. -/ @[simp] theorem det_smul_inv_vecMul_eq_cramer_transpose (A : Matrix n n α) (b : n → α) (h : IsUnit A.det) : A.det • b ᵥ* A⁻¹ = cramer Aᵀ b := by rw [← A⁻¹.transpose_transpose, vecMul_transpose, transpose_nonsing_inv, ← det_transpose, Aᵀ.det_smul_inv_mulVec_eq_cramer _ (isUnit_det_transpose A h)] /-! ### Inverses of permutated matrices Note that the simp-normal form of `Matrix.reindex` is `Matrix.submatrix`, so we prove most of these results about only the latter. -/ section Submatrix variable [Fintype m] variable [DecidableEq m] /-- `A.submatrix e₁ e₂` is invertible if `A` is -/ def submatrixEquivInvertible (A : Matrix m m α) (e₁ e₂ : n ≃ m) [Invertible A] : Invertible (A.submatrix e₁ e₂) := invertibleOfRightInverse _ ((⅟A).submatrix e₂ e₁) <| by rw [Matrix.submatrix_mul_equiv, mul_invOf_self, submatrix_one_equiv] /-- `A` is invertible if `A.submatrix e₁ e₂` is -/ def invertibleOfSubmatrixEquivInvertible (A : Matrix m m α) (e₁ e₂ : n ≃ m) [Invertible (A.submatrix e₁ e₂)] : Invertible A := invertibleOfRightInverse _ ((⅟(A.submatrix e₁ e₂)).submatrix e₂.symm e₁.symm) <| by have : A = (A.submatrix e₁ e₂).submatrix e₁.symm e₂.symm := by simp conv in _ * _ => congr rw [this] rw [Matrix.submatrix_mul_equiv, mul_invOf_self, submatrix_one_equiv] theorem invOf_submatrix_equiv_eq (A : Matrix m m α) (e₁ e₂ : n ≃ m) [Invertible A] [Invertible (A.submatrix e₁ e₂)] : ⅟(A.submatrix e₁ e₂) = (⅟A).submatrix e₂ e₁ := by rw [@Invertible.congr _ _ _ _ _ (submatrixEquivInvertible A e₁ e₂) rfl] rfl /-- Together `Matrix.submatrixEquivInvertible` and `Matrix.invertibleOfSubmatrixEquivInvertible` form an equivalence, although both sides of the equiv are subsingleton anyway. -/ @[simps] def submatrixEquivInvertibleEquivInvertible (A : Matrix m m α) (e₁ e₂ : n ≃ m) : Invertible (A.submatrix e₁ e₂) ≃ Invertible A where toFun _ := invertibleOfSubmatrixEquivInvertible A e₁ e₂ invFun _ := submatrixEquivInvertible A e₁ e₂ left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ /-- When lowered to a prop, `Matrix.invertibleOfSubmatrixEquivInvertible` forms an `iff`. -/ @[simp] theorem isUnit_submatrix_equiv {A : Matrix m m α} (e₁ e₂ : n ≃ m) : IsUnit (A.submatrix e₁ e₂) ↔ IsUnit A := by simp only [← nonempty_invertible_iff_isUnit, (submatrixEquivInvertibleEquivInvertible A _ _).nonempty_congr] @[simp] theorem inv_submatrix_equiv (A : Matrix m m α) (e₁ e₂ : n ≃ m) : (A.submatrix e₁ e₂)⁻¹ = A⁻¹.submatrix e₂ e₁ := by by_cases h : IsUnit A · cases h.nonempty_invertible letI := submatrixEquivInvertible A e₁ e₂ rw [← invOf_eq_nonsing_inv, ← invOf_eq_nonsing_inv, invOf_submatrix_equiv_eq A] · have := (isUnit_submatrix_equiv e₁ e₂).not.mpr h simp_rw [nonsing_inv_eq_ringInverse, Ring.inverse_non_unit _ h, Ring.inverse_non_unit _ this, submatrix_zero, Pi.zero_apply] theorem inv_reindex (e₁ e₂ : n ≃ m) (A : Matrix n n α) : (reindex e₁ e₂ A)⁻¹ = reindex e₂ e₁ A⁻¹ := inv_submatrix_equiv A e₁.symm e₂.symm end Submatrix open scoped Kronecker in theorem inv_kronecker [Fintype m] [DecidableEq m] (A : Matrix m m α) (B : Matrix n n α) : (A ⊗ₖ B)⁻¹ = A⁻¹ ⊗ₖ B⁻¹ := by -- handle the special cases where either matrix is not invertible by_cases hA : IsUnit A.det swap · cases isEmpty_or_nonempty n · subsingleton have hAB : ¬IsUnit (A ⊗ₖ B).det := by refine mt (fun hAB => ?_) hA rw [det_kronecker] at hAB exact (isUnit_pow_iff Fintype.card_ne_zero).mp (isUnit_of_mul_isUnit_left hAB) rw [nonsing_inv_apply_not_isUnit _ hA, zero_kronecker, nonsing_inv_apply_not_isUnit _ hAB] by_cases hB : IsUnit B.det; swap · cases isEmpty_or_nonempty m · subsingleton have hAB : ¬IsUnit (A ⊗ₖ B).det := by refine mt (fun hAB => ?_) hB rw [det_kronecker] at hAB exact (isUnit_pow_iff Fintype.card_ne_zero).mp (isUnit_of_mul_isUnit_right hAB) rw [nonsing_inv_apply_not_isUnit _ hB, kronecker_zero, nonsing_inv_apply_not_isUnit _ hAB] -- otherwise follows trivially from `mul_kronecker_mul` · apply inv_eq_right_inv rw [← mul_kronecker_mul, ← one_kronecker_one, mul_nonsing_inv _ hA, mul_nonsing_inv _ hB] /-! ### More results about determinants -/ section Det variable [Fintype m] [DecidableEq m] /-- A variant of `Matrix.det_units_conj`. -/ theorem det_conj {M : Matrix m m α} (h : IsUnit M) (N : Matrix m m α) : det (M * N * M⁻¹) = det N := by rw [← h.unit_spec, ← coe_units_inv, det_units_conj] /-- A variant of `Matrix.det_units_conj'`. -/ theorem det_conj' {M : Matrix m m α} (h : IsUnit M) (N : Matrix m m α) : det (M⁻¹ * N * M) = det N := by rw [← h.unit_spec, ← coe_units_inv, det_units_conj'] end Det /-! ### More results about traces -/ section trace variable [Fintype m] [DecidableEq m] /-- A variant of `Matrix.trace_units_conj`. -/ theorem trace_conj {M : Matrix m m α} (h : IsUnit M) (N : Matrix m m α) : trace (M * N * M⁻¹) = trace N := by rw [← h.unit_spec, ← coe_units_inv, trace_units_conj] /-- A variant of `Matrix.trace_units_conj'`. -/ theorem trace_conj' {M : Matrix m m α} (h : IsUnit M) (N : Matrix m m α) : trace (M⁻¹ * N * M) = trace N := by rw [← h.unit_spec, ← coe_units_inv, trace_units_conj'] end trace end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/IsDiag.lean
import Mathlib.LinearAlgebra.Matrix.Kronecker import Mathlib.LinearAlgebra.Matrix.Orthogonal import Mathlib.LinearAlgebra.Matrix.Symmetric /-! # Diagonal matrices This file contains the definition and basic results about diagonal matrices. ## Main results - `Matrix.IsDiag`: a proposition that states a given square matrix `A` is diagonal. ## Tags diag, diagonal, matrix -/ namespace Matrix variable {α β R n m : Type*} open Function open Matrix Kronecker /-- `A.IsDiag` means square matrix `A` is a diagonal matrix. -/ def IsDiag [Zero α] (A : Matrix n n α) : Prop := Pairwise fun i j => A i j = 0 @[simp] theorem isDiag_diagonal [Zero α] [DecidableEq n] (d : n → α) : (diagonal d).IsDiag := fun _ _ => Matrix.diagonal_apply_ne _ /-- Diagonal matrices are generated by the `Matrix.diagonal` of their `Matrix.diag`. -/ theorem IsDiag.diagonal_diag [Zero α] [DecidableEq n] {A : Matrix n n α} (h : A.IsDiag) : diagonal (diag A) = A := ext fun i j => by obtain rfl | hij := Decidable.eq_or_ne i j · rw [diagonal_apply_eq, diag] · rw [diagonal_apply_ne _ hij, h hij] /-- `Matrix.IsDiag.diagonal_diag` as an iff. -/ theorem isDiag_iff_diagonal_diag [Zero α] [DecidableEq n] (A : Matrix n n α) : A.IsDiag ↔ diagonal (diag A) = A := ⟨IsDiag.diagonal_diag, fun hd => hd ▸ isDiag_diagonal (diag A)⟩ /-- Every matrix indexed by a subsingleton is diagonal. -/ theorem isDiag_of_subsingleton [Zero α] [Subsingleton n] (A : Matrix n n α) : A.IsDiag := fun i j h => (h <| Subsingleton.elim i j).elim /-- Every zero matrix is diagonal. -/ @[simp] theorem isDiag_zero [Zero α] : (0 : Matrix n n α).IsDiag := fun _ _ _ => rfl /-- Every identity matrix is diagonal. -/ @[simp] theorem isDiag_one [DecidableEq n] [Zero α] [One α] : (1 : Matrix n n α).IsDiag := fun _ _ => one_apply_ne theorem IsDiag.map [Zero α] [Zero β] {A : Matrix n n α} (ha : A.IsDiag) {f : α → β} (hf : f 0 = 0) : (A.map f).IsDiag := by intro i j h simp [ha h, hf] theorem IsDiag.neg [SubtractionMonoid α] {A : Matrix n n α} (ha : A.IsDiag) : (-A).IsDiag := by intro i j h simp [ha h] @[simp] theorem isDiag_neg_iff [SubtractionMonoid α] {A : Matrix n n α} : (-A).IsDiag ↔ A.IsDiag := ⟨fun ha _ _ h => neg_eq_zero.1 (ha h), IsDiag.neg⟩ theorem IsDiag.add [AddZeroClass α] {A B : Matrix n n α} (ha : A.IsDiag) (hb : B.IsDiag) : (A + B).IsDiag := by intro i j h simp [ha h, hb h] theorem IsDiag.sub [SubtractionMonoid α] {A B : Matrix n n α} (ha : A.IsDiag) (hb : B.IsDiag) : (A - B).IsDiag := by intro i j h simp [ha h, hb h] theorem IsDiag.smul [Zero α] [SMulZeroClass R α] (k : R) {A : Matrix n n α} (ha : A.IsDiag) : (k • A).IsDiag := by intro i j h simp [ha h] @[simp] theorem isDiag_smul_one (n) [MulZeroOneClass α] [DecidableEq n] (k : α) : (k • (1 : Matrix n n α)).IsDiag := isDiag_one.smul k theorem IsDiag.transpose [Zero α] {A : Matrix n n α} (ha : A.IsDiag) : Aᵀ.IsDiag := fun _ _ h => ha h.symm @[simp] theorem isDiag_transpose_iff [Zero α] {A : Matrix n n α} : Aᵀ.IsDiag ↔ A.IsDiag := ⟨IsDiag.transpose, IsDiag.transpose⟩ theorem IsDiag.conjTranspose [NonUnitalNonAssocSemiring α] [StarRing α] {A : Matrix n n α} (ha : A.IsDiag) : Aᴴ.IsDiag := ha.transpose.map (star_zero _) @[simp] theorem isDiag_conjTranspose_iff [NonUnitalNonAssocSemiring α] [StarRing α] {A : Matrix n n α} : Aᴴ.IsDiag ↔ A.IsDiag := ⟨fun ha => by convert ha.conjTranspose simp, IsDiag.conjTranspose⟩ theorem IsDiag.submatrix [Zero α] {A : Matrix n n α} (ha : A.IsDiag) {f : m → n} (hf : Injective f) : (A.submatrix f f).IsDiag := fun _ _ h => ha (hf.ne h) /-- `(A ⊗ B).IsDiag` if both `A` and `B` are diagonal. -/ theorem IsDiag.kronecker [MulZeroClass α] {A : Matrix m m α} {B : Matrix n n α} (hA : A.IsDiag) (hB : B.IsDiag) : (A ⊗ₖ B).IsDiag := by rintro ⟨a, b⟩ ⟨c, d⟩ h simp only [Prod.mk_inj, Ne, not_and_or] at h rcases h with hac | hbd · simp [hA hac] · simp [hB hbd] theorem IsDiag.isSymm [Zero α] {A : Matrix n n α} (h : A.IsDiag) : A.IsSymm := by ext i j by_cases g : i = j; · rw [g, transpose_apply] simp [h g, h (Ne.symm g)] /-- The block matrix `A.fromBlocks 0 0 D` is diagonal if `A` and `D` are diagonal. -/ theorem IsDiag.fromBlocks [Zero α] {A : Matrix m m α} {D : Matrix n n α} (ha : A.IsDiag) (hd : D.IsDiag) : (A.fromBlocks 0 0 D).IsDiag := by rintro (i | i) (j | j) hij · exact ha (ne_of_apply_ne _ hij) · rfl · rfl · exact hd (ne_of_apply_ne _ hij) /-- This is the `iff` version of `Matrix.IsDiag.fromBlocks`. -/ theorem isDiag_fromBlocks_iff [Zero α] {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} : (A.fromBlocks B C D).IsDiag ↔ A.IsDiag ∧ B = 0 ∧ C = 0 ∧ D.IsDiag := by constructor · intro h refine ⟨fun i j hij => ?_, ext fun i j => ?_, ext fun i j => ?_, fun i j hij => ?_⟩ · exact h (Sum.inl_injective.ne hij) · exact h Sum.inl_ne_inr · exact h Sum.inr_ne_inl · exact h (Sum.inr_injective.ne hij) · rintro ⟨ha, hb, hc, hd⟩ convert IsDiag.fromBlocks ha hd /-- A symmetric block matrix `A.fromBlocks B C D` is diagonal if `A` and `D` are diagonal and `B` is `0`. -/ theorem IsDiag.fromBlocks_of_isSymm [Zero α] {A : Matrix m m α} {C : Matrix n m α} {D : Matrix n n α} (h : (A.fromBlocks 0 C D).IsSymm) (ha : A.IsDiag) (hd : D.IsDiag) : (A.fromBlocks 0 C D).IsDiag := by rw [← (isSymm_fromBlocks_iff.1 h).2.1] exact ha.fromBlocks hd theorem mul_transpose_self_isDiag_iff_hasOrthogonalRows [Fintype n] [Mul α] [AddCommMonoid α] {A : Matrix m n α} : (A * Aᵀ).IsDiag ↔ A.HasOrthogonalRows := Iff.rfl theorem transpose_mul_self_isDiag_iff_hasOrthogonalCols [Fintype m] [Mul α] [AddCommMonoid α] {A : Matrix m n α} : (Aᵀ * A).IsDiag ↔ A.HasOrthogonalCols := Iff.rfl end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Circulant.lean
import Mathlib.Algebra.Group.Fin.Basic import Mathlib.LinearAlgebra.Matrix.Symmetric import Mathlib.Tactic.Abel /-! # Circulant matrices This file contains the definition and basic results about circulant matrices. Given a vector `v : n → α` indexed by a type that is endowed with subtraction, `Matrix.circulant v` is the matrix whose `(i, j)`th entry is `v (i - j)`. ## Main results - `Matrix.circulant`: the circulant matrix generated by a given vector `v : n → α`. - `Matrix.circulant_mul`: the product of two circulant matrices `circulant v` and `circulant w` is the circulant matrix generated by `circulant v *ᵥ w`. - `Matrix.circulant_mul_comm`: multiplication of circulant matrices commutes when the elements do. ## Implementation notes `Matrix.Fin.foo` is the `Fin n` version of `Matrix.foo`. Namely, the index type of the circulant matrices in discussion is `Fin n`. ## Tags circulant, matrix -/ variable {α β n R : Type*} namespace Matrix open Function open Matrix /-- Given the condition `[Sub n]` and a vector `v : n → α`, we define `circulant v` to be the circulant matrix generated by `v` of type `Matrix n n α`. The `(i,j)`th entry is defined to be `v (i - j)`. -/ def circulant [Sub n] (v : n → α) : Matrix n n α := of fun i j => v (i - j) -- TODO: set as an equation lemma for `circulant`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem circulant_apply [Sub n] (v : n → α) (i j) : circulant v i j = v (i - j) := rfl theorem circulant_col_zero_eq [SubtractionMonoid n] (v : n → α) (i : n) : circulant v i 0 = v i := congr_arg v (sub_zero _) theorem circulant_injective [SubtractionMonoid n] : Injective (circulant : (n → α) → Matrix n n α) := by intro v w h ext k rw [← circulant_col_zero_eq v, ← circulant_col_zero_eq w, h] theorem Fin.circulant_injective : ∀ n, Injective fun v : Fin n → α => circulant v | 0 => by simp [Injective] | _ + 1 => Matrix.circulant_injective @[simp] theorem circulant_inj [SubtractionMonoid n] {v w : n → α} : circulant v = circulant w ↔ v = w := circulant_injective.eq_iff @[simp] theorem Fin.circulant_inj {n} {v w : Fin n → α} : circulant v = circulant w ↔ v = w := (Fin.circulant_injective n).eq_iff theorem transpose_circulant [SubtractionMonoid n] (v : n → α) : (circulant v)ᵀ = circulant fun i => v (-i) := by ext; simp theorem conjTranspose_circulant [Star α] [SubtractionMonoid n] (v : n → α) : (circulant v)ᴴ = circulant (star fun i => v (-i)) := by ext; simp theorem Fin.transpose_circulant : ∀ {n} (v : Fin n → α), (circulant v)ᵀ = circulant fun i => v (-i) | 0 => by simp [eq_iff_true_of_subsingleton] | _ + 1 => Matrix.transpose_circulant theorem Fin.conjTranspose_circulant [Star α] : ∀ {n} (v : Fin n → α), (circulant v)ᴴ = circulant (star fun i => v (-i)) | 0 => by simp [eq_iff_true_of_subsingleton] | _ + 1 => Matrix.conjTranspose_circulant theorem map_circulant [Sub n] (v : n → α) (f : α → β) : (circulant v).map f = circulant fun i => f (v i) := ext fun _ _ => rfl theorem circulant_neg [Neg α] [Sub n] (v : n → α) : circulant (-v) = -circulant v := ext fun _ _ => rfl @[simp] theorem circulant_zero (α n) [Zero α] [Sub n] : circulant 0 = (0 : Matrix n n α) := ext fun _ _ => rfl theorem circulant_add [Add α] [Sub n] (v w : n → α) : circulant (v + w) = circulant v + circulant w := ext fun _ _ => rfl theorem circulant_sub [Sub α] [Sub n] (v w : n → α) : circulant (v - w) = circulant v - circulant w := ext fun _ _ => rfl /-- The product of two circulant matrices `circulant v` and `circulant w` is the circulant matrix generated by `circulant v *ᵥ w`. -/ theorem circulant_mul [NonUnitalNonAssocSemiring α] [Fintype n] [AddGroup n] (v w : n → α) : circulant v * circulant w = circulant (circulant v *ᵥ w) := by ext i j simp only [mul_apply, mulVec, circulant_apply, dotProduct] refine Fintype.sum_equiv (Equiv.subRight j) _ _ ?_ intro x simp only [Equiv.subRight_apply, sub_sub_sub_cancel_right] theorem Fin.circulant_mul [NonUnitalNonAssocSemiring α] : ∀ {n} (v w : Fin n → α), circulant v * circulant w = circulant (circulant v *ᵥ w) | 0 => by simp [eq_iff_true_of_subsingleton] | _ + 1 => Matrix.circulant_mul /-- Multiplication of circulant matrices commutes when the elements do. -/ theorem circulant_mul_comm [CommMagma α] [AddCommMonoid α] [Fintype n] [AddCommGroup n] (v w : n → α) : circulant v * circulant w = circulant w * circulant v := by ext i j simp only [mul_apply, circulant_apply] refine Fintype.sum_equiv ((Equiv.subLeft i).trans (Equiv.addRight j)) _ _ ?_ intro x simp only [Equiv.trans_apply, Equiv.subLeft_apply, Equiv.coe_addRight, add_sub_cancel_right, mul_comm] congr 2 abel theorem Fin.circulant_mul_comm [CommMagma α] [AddCommMonoid α] : ∀ {n} (v w : Fin n → α), circulant v * circulant w = circulant w * circulant v | 0 => by simp | _ + 1 => Matrix.circulant_mul_comm /-- `k • circulant v` is another circulant matrix `circulant (k • v)`. -/ theorem circulant_smul [Sub n] [SMul R α] (k : R) (v : n → α) : circulant (k • v) = k • circulant v := rfl @[simp] theorem circulant_single_one (α n) [Zero α] [One α] [DecidableEq n] [AddGroup n] : circulant (Pi.single 0 1 : n → α) = (1 : Matrix n n α) := by ext i j simp [one_apply, Pi.single_apply, sub_eq_zero] @[simp] theorem circulant_single (n) [Semiring α] [DecidableEq n] [AddGroup n] [Fintype n] (a : α) : circulant (Pi.single 0 a : n → α) = scalar n a := by ext i j simp [Pi.single_apply, diagonal_apply, sub_eq_zero] /-- Note we use `↑i = 0` instead of `i = 0` as `Fin 0` has no `0`. This means that we cannot state this with `Pi.single` as we did with `Matrix.circulant_single`. -/ theorem Fin.circulant_ite (α) [Zero α] [One α] : ∀ n, circulant (fun i => ite (i.1 = 0) 1 0 : Fin n → α) = 1 | 0 => by simp [eq_iff_true_of_subsingleton] | n + 1 => by rw [← circulant_single_one] congr with j simp [Pi.single_apply] /-- A circulant of `v` is symmetric iff `v` equals its reverse. -/ theorem circulant_isSymm_iff [SubtractionMonoid n] {v : n → α} : (circulant v).IsSymm ↔ ∀ i, v (-i) = v i := by rw [IsSymm, transpose_circulant, circulant_inj, funext_iff] theorem Fin.circulant_isSymm_iff : ∀ {n} {v : Fin n → α}, (circulant v).IsSymm ↔ ∀ i, v (-i) = v i | 0 => by simp [IsSymm.ext_iff, IsEmpty.forall_iff] | _ + 1 => Matrix.circulant_isSymm_iff /-- If `circulant v` is symmetric, `∀ i j : I, v (- i) = v i`. -/ theorem circulant_isSymm_apply [SubtractionMonoid n] {v : n → α} (h : (circulant v).IsSymm) (i : n) : v (-i) = v i := circulant_isSymm_iff.1 h i theorem Fin.circulant_isSymm_apply {n} {v : Fin n → α} (h : (circulant v).IsSymm) (i : Fin n) : v (-i) = v i := Fin.circulant_isSymm_iff.1 h i end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Swap.lean
import Mathlib.LinearAlgebra.Matrix.GeneralLinearGroup.Defs import Mathlib.LinearAlgebra.Matrix.Permutation import Mathlib.Data.Matrix.PEquiv /-! # Swap matrices A swap matrix indexed by `i` and `j` is the matrix that, when multiplying another matrix on the left (resp. on the right), swaps the `i`-th row with the `j`-th row (resp. the `i`-th column with the `j`-th column). Swap matrices are a special case of *elementary matrices*. For transvections see `Mathlib/LinearAlgebra/Matrix/Transvection.lean`. ## Implementation detail This is a thin wrapper around `(Equiv.swap i j).permMatrix`. -/ namespace Matrix section Def variable {R n : Type*} [Zero R] [One R] [DecidableEq n] variable (R) in /-- The swap matrix `swap R i j` is the identity matrix with the `i`-th and `j`-th rows modified such that multiplying by it on the left (resp. right) corresponds to swapping the `i`-th and `j`-th row (resp. column). -/ def swap (i j : n) : Matrix n n R := (Equiv.swap i j).permMatrix R lemma swap_comm (i j : n) : swap R i j = swap R j i := by simp only [swap, Equiv.swap_comm] @[simp] lemma transpose_swap (i j : n) : (swap R i j).transpose = swap R i j := by simp [swap] @[simp] lemma conjTranspose_swap {R : Type*} [NonAssocSemiring R] [StarRing R] (i j : n) : (swap R i j).conjTranspose = swap R i j := by simp [swap] end Def section variable {R n m : Type*} [Semiring R] [DecidableEq n] @[simp] lemma map_swap {S : Type*} [NonAssocSemiring S] (f : R →+* S) (i j : n) : (swap R i j).map f = swap S i j := by simp [swap] variable [Fintype n] lemma swap_mulVec (i j : n) (a : n → R) : swap R i j *ᵥ a = a ∘ Equiv.swap i j := by simp [swap, PEquiv.toMatrix_toPEquiv_mulVec] lemma vecMul_swap (i j : n) (a : n → R) : a ᵥ* swap R i j = a ∘ Equiv.swap i j := by simp [swap, PEquiv.vecMul_toMatrix_toPEquiv] @[simp] lemma swap_mulVec_apply (i j : n) (a : n → R) : (swap R i j *ᵥ a) i = a j := by simp [swap, PEquiv.toMatrix_toPEquiv_mulVec] @[simp] lemma vecMul_swap_apply (i j : n) (a : n → R) : (a ᵥ* swap R i j) i = a j := by simp [swap, PEquiv.vecMul_toMatrix_toPEquiv] /-- Multiplying with `swap R i j` on the left swaps the `i`-th row with the `j`-th row. -/ @[simp] lemma swap_mul_apply_left (i j : n) (a : m) (g : Matrix n m R) : (swap R i j * g) i a = g j a := by simp [swap, PEquiv.toMatrix_toPEquiv_mul] /-- Multiplying with `swap R i j` on the left swaps the `j`-th row with the `i`-th row. -/ @[simp] lemma swap_mul_apply_right (i j : n) (a : m) (g : Matrix n m R) : (swap R i j * g) j a = g i a := by rw [swap_comm, swap_mul_apply_left] lemma swap_mul_of_ne {i j a : n} {b : m} (hai : a ≠ i) (haj : a ≠ j) (g : Matrix n m R) : (swap R i j * g) a b = g a b := by simp [swap, PEquiv.toMatrix_toPEquiv_mul, Equiv.swap_apply_of_ne_of_ne hai haj] /-- Multiplying with `swap R i j` on the right swaps the `i`-th column with the `j`-th column. -/ @[simp] lemma mul_swap_apply_left (i j : n) (a : m) (g : Matrix m n R) : (g * swap R i j) a i = g a j := by simp [swap, PEquiv.mul_toMatrix_toPEquiv] /-- Multiplying with `swap R i j` on the right swaps the `j`-th column with the `i`-th column. -/ @[simp] lemma mul_swap_apply_right (i j : n) (a : m) (g : Matrix m n R) : (g * swap R i j) a j = g a i := by rw [swap_comm, mul_swap_apply_left] lemma mul_swap_of_ne {i j b : n} {a : m} (hbi : b ≠ i) (hbj : b ≠ j) (g : Matrix m n R) : (g * swap R i j) a b = g a b := by simp [swap, PEquiv.mul_toMatrix_toPEquiv, Equiv.swap_apply_of_ne_of_ne hbi hbj] /-- Swap matrices are self inverse. -/ lemma swap_mul_self (i j : n) : swap R i j * swap R i j = 1 := by simp only [swap] rw [← Equiv.swap_inv, Equiv.Perm.inv_def] simp [← PEquiv.toMatrix_trans, ← Equiv.toPEquiv_trans] end namespace GeneralLinearGroup variable (R : Type*) {n : Type*} [CommRing R] [DecidableEq n] [Fintype n] /-- `Matrix.swap` as an element of `GL n R`. -/ @[simps] def swap (i j : n) : GL n R where val := Matrix.swap R i j inv := Matrix.swap R i j val_inv := swap_mul_self i j inv_val := swap_mul_self i j variable {R} {S : Type*} [CommRing S] (f : R →+* S) @[simp] lemma map_swap (i j : n) : (swap R i j).map f = swap S i j := by ext : 1 simp [swap] end GeneralLinearGroup end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/RowCol.lean
import Mathlib.LinearAlgebra.Matrix.ConjTranspose /-! # Row and column matrices This file provides results about row and column matrices. ## Main definitions * `Matrix.replicateRow ι r : Matrix ι n α`: the matrix where every row is the vector `r : n → α` * `Matrix.replicateCol ι c : Matrix m ι α`: the matrix where every column is the vector `c : m → α` * `Matrix.updateRow M i r`: update the `i`th row of `M` to `r` * `Matrix.updateCol M j c`: update the `j`th column of `M` to `c` -/ variable {l m n o : Type*} universe u v w variable {R : Type*} {α : Type v} {β : Type w} namespace Matrix /-- `Matrix.replicateCol ι u` is the matrix with all columns equal to the vector `u`. To get a column matrix with exactly one column, `Matrix.replicateCol (Fin 1) u` is the canonical choice. -/ def replicateCol (ι : Type*) (w : m → α) : Matrix m ι α := of fun x _ => w x -- TODO: set as an equation lemma for `replicateCol`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem replicateCol_apply {ι : Type*} (w : m → α) (i) (j : ι) : replicateCol ι w i j = w i := rfl /-- `Matrix.replicateRow ι u` is the matrix with all rows equal to the vector `u`. To get a row matrix with exactly one row, `Matrix.replicateRow (Fin 1) u` is the canonical choice. -/ def replicateRow (ι : Type*) (v : n → α) : Matrix ι n α := of fun _ y => v y variable {ι : Type*} -- TODO: set as an equation lemma for `replicateRow`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem replicateRow_apply (v : n → α) (i : ι) (j) : replicateRow ι v i j = v j := rfl @[simp] theorem vecMulVec_one [MulOneClass R] (x : n → R) : vecMulVec x 1 = replicateCol m x := by ext; simp [vecMulVec_apply] @[simp] theorem one_vecMulVec [MulOneClass R] (x : n → R) : vecMulVec 1 x = replicateRow m x := by ext; simp [vecMulVec_apply] theorem replicateCol_injective [Nonempty ι] : Function.Injective (replicateCol ι : (m → α) → Matrix m ι α) := by inhabit ι exact fun _x _y h => funext fun i => congr_fun₂ h i default @[simp] theorem replicateCol_inj [Nonempty ι] {v w : m → α} : replicateCol ι v = replicateCol ι w ↔ v = w := replicateCol_injective.eq_iff @[simp] theorem replicateCol_zero [Zero α] : replicateCol ι (0 : m → α) = 0 := rfl @[simp] theorem replicateCol_eq_zero [Zero α] [Nonempty ι] (v : m → α) : replicateCol ι v = 0 ↔ v = 0 := replicateCol_inj @[simp] theorem replicateCol_add [Add α] (v w : m → α) : replicateCol ι (v + w) = replicateCol ι v + replicateCol ι w := by ext rfl @[simp] theorem replicateCol_smul [SMul R α] (x : R) (v : m → α) : replicateCol ι (x • v) = x • replicateCol ι v := by ext rfl theorem replicateRow_injective [Nonempty ι] : Function.Injective (replicateRow ι : (n → α) → Matrix ι n α) := by inhabit ι exact fun _x _y h => funext fun j => congr_fun₂ h default j @[simp] theorem replicateRow_inj [Nonempty ι] {v w : n → α} : replicateRow ι v = replicateRow ι w ↔ v = w := replicateRow_injective.eq_iff @[simp] theorem replicateRow_zero [Zero α] : replicateRow ι (0 : n → α) = 0 := rfl @[simp] theorem replicateRow_eq_zero [Zero α] [Nonempty ι] (v : n → α) : replicateRow ι v = 0 ↔ v = 0 := replicateRow_inj @[simp] theorem replicateRow_add [Add α] (v w : m → α) : replicateRow ι (v + w) = replicateRow ι v + replicateRow ι w := by ext rfl @[simp] theorem replicateRow_smul [SMul R α] (x : R) (v : m → α) : replicateRow ι (x • v) = x • replicateRow ι v := by ext rfl @[simp] theorem transpose_replicateCol (v : m → α) : (replicateCol ι v)ᵀ = replicateRow ι v := by ext rfl @[simp] theorem transpose_replicateRow (v : m → α) : (replicateRow ι v)ᵀ = replicateCol ι v := by ext rfl @[simp] theorem conjTranspose_replicateCol [Star α] (v : m → α) : (replicateCol ι v)ᴴ = replicateRow ι (star v) := by ext rfl @[simp] theorem conjTranspose_replicateRow [Star α] (v : m → α) : (replicateRow ι v)ᴴ = replicateCol ι (star v) := by ext rfl theorem replicateRow_vecMul [Fintype m] [NonUnitalNonAssocSemiring α] (M : Matrix m n α) (v : m → α) : replicateRow ι (v ᵥ* M) = replicateRow ι v * M := by ext rfl theorem replicateCol_vecMul [Fintype m] [NonUnitalNonAssocSemiring α] (M : Matrix m n α) (v : m → α) : replicateCol ι (v ᵥ* M) = (replicateRow ι v * M)ᵀ := by ext rfl theorem replicateCol_mulVec [Fintype n] [NonUnitalNonAssocSemiring α] (M : Matrix m n α) (v : n → α) : replicateCol ι (M *ᵥ v) = M * replicateCol ι v := by ext rfl theorem replicateRow_mulVec [Fintype n] [NonUnitalNonAssocSemiring α] (M : Matrix m n α) (v : n → α) : replicateRow ι (M *ᵥ v) = (M * replicateCol ι v)ᵀ := by ext rfl theorem replicateRow_mulVec_eq_const [Fintype m] [NonUnitalNonAssocSemiring α] (v w : m → α) : replicateRow ι v *ᵥ w = Function.const _ (v ⬝ᵥ w) := rfl theorem mulVec_replicateCol_eq_const [Fintype m] [NonUnitalNonAssocSemiring α] (v w : m → α) : v ᵥ* replicateCol ι w = Function.const _ (v ⬝ᵥ w) := rfl theorem replicateRow_mul_replicateCol [Fintype m] [Mul α] [AddCommMonoid α] (v w : m → α) : replicateRow ι v * replicateCol ι w = of fun _ _ => v ⬝ᵥ w := rfl @[simp] theorem replicateRow_mul_replicateCol_apply [Fintype m] [Mul α] [AddCommMonoid α] (v w : m → α) (i j) : (replicateRow ι v * replicateCol ι w) i j = v ⬝ᵥ w := rfl @[simp] theorem diag_replicateCol_mul_replicateRow [Mul α] [AddCommMonoid α] [Unique ι] (a b : n → α) : diag (replicateCol ι a * replicateRow ι b) = a * b := by ext simp [Matrix.mul_apply, replicateCol, replicateRow] variable (ι) theorem vecMulVec_eq [Mul α] [AddCommMonoid α] [Unique ι] (w : m → α) (v : n → α) : vecMulVec w v = replicateCol ι w * replicateRow ι v := by ext simp [vecMulVec, mul_apply] /-! ### Updating rows and columns -/ /-- Update, i.e. replace the `i`th row of matrix `A` with the values in `b`. -/ def updateRow [DecidableEq m] (M : Matrix m n α) (i : m) (b : n → α) : Matrix m n α := of <| Function.update M i b /-- Update, i.e. replace the `j`th column of matrix `A` with the values in `b`. -/ def updateCol [DecidableEq n] (M : Matrix m n α) (j : n) (b : m → α) : Matrix m n α := of fun i => Function.update (M i) j (b i) variable {M : Matrix m n α} {i : m} {j : n} {b : n → α} {c : m → α} @[simp] theorem updateRow_self [DecidableEq m] : updateRow M i b i = b := Function.update_self (β := fun _ => (n → α)) i b M @[simp] theorem updateCol_self [DecidableEq n] : updateCol M j c i j = c i := Function.update_self (β := fun _ => α) j (c i) (M i) @[simp] theorem updateRow_ne [DecidableEq m] {i' : m} (i_ne : i' ≠ i) : updateRow M i b i' = M i' := Function.update_of_ne (β := fun _ => (n → α)) i_ne b M @[simp] theorem updateCol_ne [DecidableEq n] {j' : n} (j_ne : j' ≠ j) : updateCol M j c i j' = M i j' := Function.update_of_ne (β := fun _ => α) j_ne (c i) (M i) theorem updateRow_apply [DecidableEq m] {i' : m} : updateRow M i b i' j = if i' = i then b j else M i' j := by by_cases h : i' = i · rw [h, updateRow_self, if_pos rfl] · rw [updateRow_ne h, if_neg h] theorem updateCol_apply [DecidableEq n] {j' : n} : updateCol M j c i j' = if j' = j then c i else M i j' := by by_cases h : j' = j · rw [h, updateCol_self, if_pos rfl] · rw [updateCol_ne h, if_neg h] @[simp] theorem updateCol_subsingleton [Subsingleton n] (A : Matrix m n R) (i : n) (b : m → R) : A.updateCol i b = (replicateCol (Fin 1) b).submatrix id (Function.const n 0) := by ext x y simp [Subsingleton.elim i y] @[simp] theorem updateRow_subsingleton [Subsingleton m] (A : Matrix m n R) (i : m) (b : n → R) : A.updateRow i b = (replicateRow (Fin 1) b).submatrix (Function.const m 0) id := by ext x y simp [Subsingleton.elim i x] theorem map_updateRow [DecidableEq m] (f : α → β) : map (updateRow M i b) f = updateRow (M.map f) i (f ∘ b) := by ext rw [updateRow_apply, map_apply, map_apply, updateRow_apply] exact apply_ite f _ _ _ theorem map_updateCol [DecidableEq n] (f : α → β) : map (updateCol M j c) f = updateCol (M.map f) j (f ∘ c) := by ext rw [updateCol_apply, map_apply, map_apply, updateCol_apply] exact apply_ite f _ _ _ theorem updateRow_transpose [DecidableEq n] : updateRow Mᵀ j c = (updateCol M j c)ᵀ := by ext rw [transpose_apply, updateRow_apply, updateCol_apply] rfl theorem updateCol_transpose [DecidableEq m] : updateCol Mᵀ i b = (updateRow M i b)ᵀ := by ext rw [transpose_apply, updateRow_apply, updateCol_apply] rfl theorem updateRow_conjTranspose [DecidableEq n] [Star α] : updateRow Mᴴ j (star c) = (updateCol M j c)ᴴ := by rw [conjTranspose, conjTranspose, transpose_map, transpose_map, updateRow_transpose, map_updateCol] rfl theorem updateCol_conjTranspose [DecidableEq m] [Star α] : updateCol Mᴴ i (star b) = (updateRow M i b)ᴴ := by rw [conjTranspose, conjTranspose, transpose_map, transpose_map, updateCol_transpose, map_updateRow] rfl @[simp] theorem updateRow_eq_self [DecidableEq m] (A : Matrix m n α) (i : m) : A.updateRow i (A i) = A := Function.update_eq_self i A @[simp] theorem updateCol_eq_self [DecidableEq n] (A : Matrix m n α) (i : n) : (A.updateCol i fun j => A j i) = A := funext fun j => Function.update_eq_self i (A j) @[simp] theorem updateRow_zero_zero [DecidableEq m] [Zero α] (i : m) : (0 : Matrix m n α).updateRow i 0 = 0 := updateRow_eq_self _ i @[simp] theorem updateCol_zero_zero [DecidableEq n] [Zero α] (i : n) : (0 : Matrix m n α).updateCol i 0 = 0 := updateCol_eq_self _ i theorem diagonal_updateCol_single [DecidableEq n] [Zero α] (v : n → α) (i : n) (x : α) : (diagonal v).updateCol i (Pi.single i x) = diagonal (Function.update v i x) := by ext j k obtain rfl | hjk := eq_or_ne j k · rw [diagonal_apply_eq] obtain rfl | hji := eq_or_ne j i · rw [updateCol_self, Pi.single_eq_same, Function.update_self] · rw [updateCol_ne hji, diagonal_apply_eq, Function.update_of_ne hji] · rw [diagonal_apply_ne _ hjk] obtain rfl | hki := eq_or_ne k i · rw [updateCol_self, Pi.single_eq_of_ne hjk] · rw [updateCol_ne hki, diagonal_apply_ne _ hjk] theorem diagonal_updateRow_single [DecidableEq n] [Zero α] (v : n → α) (i : n) (x : α) : (diagonal v).updateRow i (Pi.single i x) = diagonal (Function.update v i x) := by rw [← diagonal_transpose, updateRow_transpose, diagonal_updateCol_single, diagonal_transpose] @[simp] theorem updateRow_idem [DecidableEq m] (A : Matrix m n α) (i : m) (x y : n → α) : (A.updateRow i x).updateRow i y = A.updateRow i y := Function.update_idem _ _ _ theorem updateRow_comm [DecidableEq m] (A : Matrix m n α) {i i' : m} (h : i ≠ i') (x y : n → α) : (A.updateRow i x).updateRow i' y = (A.updateRow i' y).updateRow i x := Function.update_comm h _ _ _ @[simp] theorem updateCol_idem [DecidableEq n] (A : Matrix m n α) (j : n) (x y : m → α) : (A.updateCol j x).updateCol j y = A.updateCol j y := by simpa only [updateRow_transpose] using congr_arg transpose <| updateRow_idem Aᵀ j x y theorem updateCol_comm [DecidableEq n] (A : Matrix m n α) {j j' : n} (h : j ≠ j') (x y : m → α) : (A.updateCol j x).updateCol j' y = (A.updateCol j' y).updateCol j x := by simpa only [updateRow_transpose] using congr_arg transpose <| updateRow_comm Aᵀ h x y /-! Updating rows and columns commutes in the obvious way with reindexing the matrix. -/ theorem updateRow_submatrix_equiv [DecidableEq l] [DecidableEq m] (A : Matrix m n α) (i : l) (r : o → α) (e : l ≃ m) (f : o ≃ n) : updateRow (A.submatrix e f) i r = (A.updateRow (e i) fun j => r (f.symm j)).submatrix e f := by ext i' j simp only [submatrix_apply, updateRow_apply, Equiv.apply_eq_iff_eq, Equiv.symm_apply_apply] theorem submatrix_updateRow_equiv [DecidableEq l] [DecidableEq m] (A : Matrix m n α) (i : m) (r : n → α) (e : l ≃ m) (f : o ≃ n) : (A.updateRow i r).submatrix e f = updateRow (A.submatrix e f) (e.symm i) fun i => r (f i) := Eq.trans (by simp_rw [Equiv.apply_symm_apply]) (updateRow_submatrix_equiv A _ _ e f).symm theorem updateCol_submatrix_equiv [DecidableEq o] [DecidableEq n] (A : Matrix m n α) (j : o) (c : l → α) (e : l ≃ m) (f : o ≃ n) : updateCol (A.submatrix e f) j c = (A.updateCol (f j) fun i => c (e.symm i)).submatrix e f := by simpa only [← transpose_submatrix, updateRow_transpose] using congr_arg transpose (updateRow_submatrix_equiv Aᵀ j c f e) theorem submatrix_updateCol_equiv [DecidableEq o] [DecidableEq n] (A : Matrix m n α) (j : n) (c : m → α) (e : l ≃ m) (f : o ≃ n) : (A.updateCol j c).submatrix e f = updateCol (A.submatrix e f) (f.symm j) fun i => c (e i) := Eq.trans (by simp_rw [Equiv.apply_symm_apply]) (updateCol_submatrix_equiv A _ _ e f).symm /-! `reindex` versions of the above `submatrix` lemmas for convenience. -/ theorem updateRow_reindex [DecidableEq l] [DecidableEq m] (A : Matrix m n α) (i : l) (r : o → α) (e : m ≃ l) (f : n ≃ o) : updateRow (reindex e f A) i r = reindex e f (A.updateRow (e.symm i) fun j => r (f j)) := updateRow_submatrix_equiv _ _ _ _ _ theorem reindex_updateRow [DecidableEq l] [DecidableEq m] (A : Matrix m n α) (i : m) (r : n → α) (e : m ≃ l) (f : n ≃ o) : reindex e f (A.updateRow i r) = updateRow (reindex e f A) (e i) fun i => r (f.symm i) := submatrix_updateRow_equiv _ _ _ _ _ theorem updateCol_reindex [DecidableEq o] [DecidableEq n] (A : Matrix m n α) (j : o) (c : l → α) (e : m ≃ l) (f : n ≃ o) : updateCol (reindex e f A) j c = reindex e f (A.updateCol (f.symm j) fun i => c (e i)) := updateCol_submatrix_equiv _ _ _ _ _ theorem reindex_updateCol [DecidableEq o] [DecidableEq n] (A : Matrix m n α) (j : n) (c : m → α) (e : m ≃ l) (f : n ≃ o) : reindex e f (A.updateCol j c) = updateCol (reindex e f A) (f j) fun i => c (e.symm i) := submatrix_updateCol_equiv _ _ _ _ _ theorem single_eq_updateRow_zero [DecidableEq m] [DecidableEq n] [Zero α] (i : m) (j : n) (r : α) : single i j r = updateRow 0 i (Pi.single j r) := single_eq_of_single_single _ _ _ theorem single_eq_updateCol_zero [DecidableEq m] [DecidableEq n] [Zero α] (i : m) (j : n) (r : α) : single i j r = updateCol 0 j (Pi.single i r) := by simpa [← updateCol_transpose] using congr($(single_eq_updateRow_zero j i r)ᵀ) section mul theorem updateRow_mulVec [DecidableEq l] [Fintype m] [NonUnitalNonAssocSemiring α] (A : Matrix l m α) (i : l) (c : m → α) (v : m → α) : A.updateRow i c *ᵥ v = Function.update (A *ᵥ v) i (c ⬝ᵥ v) := by ext i' obtain rfl | hi := eq_or_ne i' i · simp [mulVec] · simp [mulVec, hi] theorem vecMul_updateCol [DecidableEq n] [Fintype m] [NonUnitalNonAssocSemiring α] (v : m → α) (B : Matrix m n α) (j : n) (r : m → α) : v ᵥ* B.updateCol j r = Function.update (v ᵥ* B) j (v ⬝ᵥ r) := by ext j' obtain rfl | hj := eq_or_ne j' j · simp [vecMul] · simp [vecMul, hj] theorem update_vecMulVec [DecidableEq m] [Mul α] (u : m → α) (v : n → α) (i : m) (a : α) : vecMulVec (Function.update u i a) v = (vecMulVec u v).updateRow i (a • v) := by ext i' j obtain rfl | hi := eq_or_ne i' i · simp [vecMulVec_apply] · simp [vecMulVec_apply, hi] theorem vecMulVec_update [DecidableEq n] [Mul α] (u : m → α) (v : n → α) (j : n) (a : α) : vecMulVec u (Function.update v j a) = (vecMulVec u v).updateCol j (MulOpposite.op a • u) := by ext i j' obtain rfl | hi := eq_or_ne j' j · simp [vecMulVec_apply] · simp [vecMulVec_apply, hi] theorem updateRow_mul [DecidableEq l] [Fintype m] [NonUnitalNonAssocSemiring α] (A : Matrix l m α) (i : l) (r : m → α) (B : Matrix m n α) : A.updateRow i r * B = (A * B).updateRow i (r ᵥ* B) := by ext i' j' obtain rfl | hi := eq_or_ne i' i · simp [mul_apply, vecMul, dotProduct] · simp [mul_apply, hi] theorem mul_updateCol [DecidableEq n] [Fintype m] [NonUnitalNonAssocSemiring α] (A : Matrix l m α) (B : Matrix m n α) (j : n) (c : m → α) : A * B.updateCol j c = (A * B).updateCol j (A *ᵥ c) := by ext i' j' obtain rfl | hj := eq_or_ne j' j · simp [mul_apply, mulVec, dotProduct] · simp [mul_apply, hj] open RightActions in theorem mul_single_eq_updateCol_zero [DecidableEq m] [DecidableEq n] [Fintype m] [NonUnitalNonAssocSemiring α] (A : Matrix l m α) (i : m) (j : n) (r : α) : A * single i j r = updateCol 0 j (A.col i <• r) := by rw [single_eq_updateCol_zero, mul_updateCol, Matrix.mul_zero, mulVec_single] theorem single_mul_eq_updateRow_zero [DecidableEq l] [DecidableEq m] [Fintype m] [NonUnitalNonAssocSemiring α] (i : l) (j : m) (r : α) (B : Matrix m n α) : single i j r * B = updateRow 0 i (r • B.row j) := by rw [single_eq_updateRow_zero, updateRow_mul, Matrix.zero_mul, single_vecMul] @[simp] theorem updateRow_zero_mul_updateCol_zero [DecidableEq l] [DecidableEq n] [Fintype m] [NonUnitalNonAssocSemiring α] (i : l) (r : m → α) (j : n) (c : m → α) : (0 : Matrix l m α).updateRow i r * (0 : Matrix m n α).updateCol j c = single i j (r ⬝ᵥ c) := by rw [updateRow_mul, vecMul_updateCol, mul_updateCol, single_eq_of_single_single, Matrix.zero_mul, vecMul_zero, zero_mulVec, updateCol_zero_zero, updateRow, ← Pi.single, ← Pi.single] end mul end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/LDL.lean
import Mathlib.Analysis.InnerProductSpace.GramSchmidtOrtho import Mathlib.LinearAlgebra.Matrix.PosDef /-! # LDL decomposition This file proves the LDL-decomposition of matrices: Any positive definite matrix `S` can be decomposed as `S = LDLᴴ` where `L` is a lower-triangular matrix and `D` is a diagonal matrix. ## Main definitions * `LDL.lower` is the lower triangular matrix `L`. * `LDL.lowerInv` is the inverse of the lower triangular matrix `L`. * `LDL.diag` is the diagonal matrix `D`. ## Main result * `LDL.lower_conj_diag` states that any positive definite matrix can be decomposed as `LDLᴴ`. ## TODO * Prove that `LDL.lower` is lower triangular from `LDL.lowerInv_triangular`. -/ open Module variable {𝕜 : Type*} [RCLike 𝕜] variable {n : Type*} [LinearOrder n] [WellFoundedLT n] [LocallyFiniteOrderBot n] section set_options set_option quotPrecheck false local notation "⟪" x ", " y "⟫ₑ" => inner 𝕜 (WithLp.toLp 2 x) (WithLp.toLp 2 y) open Matrix InnerProductSpace open scoped ComplexOrder variable {S : Matrix n n 𝕜} [Fintype n] (hS : S.PosDef) /-- The inverse of the lower triangular matrix `L` of the LDL-decomposition. It is obtained by applying Gram-Schmidt-Orthogonalization w.r.t. the inner product induced by `Sᵀ` on the standard basis vectors `Pi.basisFun`. -/ noncomputable def LDL.lowerInv : Matrix n n 𝕜 := @gramSchmidt 𝕜 (n → 𝕜) _ (_ :) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ (Pi.basisFun 𝕜 n) theorem LDL.lowerInv_eq_gramSchmidtBasis : LDL.lowerInv hS = ((Pi.basisFun 𝕜 n).toMatrix (@gramSchmidtBasis 𝕜 (n → 𝕜) _ (_ :) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ (Pi.basisFun 𝕜 n)))ᵀ := by letI := NormedAddCommGroup.ofMatrix hS.transpose letI := InnerProductSpace.ofMatrix hS.transpose ext i j rw [LDL.lowerInv, Basis.coePiBasisFun.toMatrix_eq_transpose, coe_gramSchmidtBasis] rfl noncomputable instance LDL.invertibleLowerInv : Invertible (LDL.lowerInv hS) := by rw [LDL.lowerInv_eq_gramSchmidtBasis] haveI := Basis.invertibleToMatrix (Pi.basisFun 𝕜 n) (@gramSchmidtBasis 𝕜 (n → 𝕜) _ (_ :) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ (Pi.basisFun 𝕜 n)) infer_instance theorem LDL.lowerInv_orthogonal {i j : n} (h₀ : i ≠ j) : ⟪LDL.lowerInv hS i, Sᵀ *ᵥ LDL.lowerInv hS j⟫ₑ = 0 := @gramSchmidt_orthogonal 𝕜 _ _ (_ :) (InnerProductSpace.ofMatrix hS.transpose) _ _ _ _ _ _ _ h₀ /-- The entries of the diagonal matrix `D` of the LDL decomposition. -/ noncomputable def LDL.diagEntries : n → 𝕜 := fun i => ⟪star (LDL.lowerInv hS i), S *ᵥ star (LDL.lowerInv hS i)⟫ₑ /-- The diagonal matrix `D` of the LDL decomposition. -/ noncomputable def LDL.diag : Matrix n n 𝕜 := Matrix.diagonal (LDL.diagEntries hS) theorem LDL.lowerInv_triangular {i j : n} (hij : i < j) : LDL.lowerInv hS i j = 0 := by rw [← @gramSchmidt_triangular 𝕜 (n → 𝕜) _ (_ :) (InnerProductSpace.ofMatrix hS.transpose) n _ _ _ i j hij (Pi.basisFun 𝕜 n), Pi.basisFun_repr, LDL.lowerInv] /-- Inverse statement of **LDL decomposition**: we can conjugate a positive definite matrix by some lower triangular matrix and get a diagonal matrix. -/ theorem LDL.diag_eq_lowerInv_conj : LDL.diag hS = LDL.lowerInv hS * S * (LDL.lowerInv hS)ᴴ := by ext i j by_cases hij : i = j · simp only [diag, diagEntries, EuclideanSpace.inner_toLp_toLp, star_star, hij, diagonal_apply_eq, Matrix.mul_assoc, dotProduct_comm] rfl · simp only [LDL.diag, hij, diagonal_apply_ne, Ne, not_false_iff, mul_mul_apply] rw [conjTranspose, transpose_map, transpose_transpose, dotProduct_mulVec, (LDL.lowerInv_orthogonal hS fun h : j = i => hij h.symm).symm, ← inner_conj_symm, mulVec_transpose, EuclideanSpace.inner_toLp_toLp, ← RCLike.star_def, ← star_dotProduct_star, star_star] rfl /-- The lower triangular matrix `L` of the LDL decomposition. -/ noncomputable def LDL.lower := (LDL.lowerInv hS)⁻¹ /-- **LDL decomposition**: any positive definite matrix `S` can be decomposed as `S = LDLᴴ` where `L` is a lower-triangular matrix and `D` is a diagonal matrix. -/ theorem LDL.lower_conj_diag : LDL.lower hS * LDL.diag hS * (LDL.lower hS)ᴴ = S := by rw [LDL.lower, conjTranspose_nonsing_inv, Matrix.mul_assoc, Matrix.inv_mul_eq_iff_eq_mul_of_invertible (LDL.lowerInv hS), Matrix.mul_inv_eq_iff_eq_mul_of_invertible] exact LDL.diag_eq_lowerInv_conj hS end set_options
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Diagonal.lean
import Mathlib.LinearAlgebra.Dimension.LinearMap import Mathlib.LinearAlgebra.Matrix.ToLin /-! # Diagonal matrices This file contains some results on the linear map corresponding to a diagonal matrix (`range`, `ker` and `rank`). ## Tags matrix, diagonal, linear_map -/ noncomputable section open LinearMap Matrix Set Submodule Matrix universe u v w namespace Matrix section CommSemiring variable {n : Type*} [Fintype n] [DecidableEq n] {R : Type v} [CommSemiring R] theorem proj_diagonal (i : n) (w : n → R) : (proj i).comp (toLin' (diagonal w)) = w i • proj i := LinearMap.ext fun _ => mulVec_diagonal _ _ _ theorem diagonal_comp_single (w : n → R) (i : n) : (diagonal w).toLin'.comp (LinearMap.single R (fun _ : n => R) i) = w i • LinearMap.single R (fun _ : n => R) i := LinearMap.ext fun x => (diagonal_mulVec_single w _ _).trans (Pi.single_smul' i (w i) x) theorem diagonal_toLin' (w : n → R) : toLin' (diagonal w) = LinearMap.pi fun i => w i • LinearMap.proj i := LinearMap.ext fun _ => funext fun _ => mulVec_diagonal _ _ _ end CommSemiring section Semifield variable {m : Type*} [Fintype m] {K : Type u} [Semifield K] -- maybe try to relax the universe constraint theorem ker_diagonal_toLin' [DecidableEq m] (w : m → K) : ker (toLin' (diagonal w)) = ⨆ i ∈ { i | w i = 0 }, LinearMap.range (LinearMap.single K (fun _ => K) i) := by rw [← comap_bot, ← iInf_ker_proj, comap_iInf] have := fun i : m => ker_comp (toLin' (diagonal w)) (proj i) simp only [← this, proj_diagonal, ker_smul'] have : univ ⊆ { i : m | w i = 0 } ∪ { i : m | w i = 0 }ᶜ := by rw [Set.union_compl_self] exact (iSup_range_single_eq_iInf_ker_proj K (fun _ : m => K) disjoint_compl_right this (Set.toFinite _)).symm theorem range_diagonal [DecidableEq m] (w : m → K) : LinearMap.range (toLin' (diagonal w)) = ⨆ i ∈ { i | w i ≠ 0 }, LinearMap.range (LinearMap.single K (fun _ => K) i) := by dsimp only [mem_setOf_eq] rw [← Submodule.map_top, ← iSup_range_single, Submodule.map_iSup] congr; funext i rw [← LinearMap.range_comp, diagonal_comp_single, ← range_smul'] end Semifield end Matrix namespace LinearMap section Field variable {m : Type*} [Fintype m] {K : Type u} [Field K] theorem rank_diagonal [DecidableEq m] [DecidableEq K] (w : m → K) : LinearMap.rank (toLin' (diagonal w)) = Fintype.card { i // w i ≠ 0 } := by have hu : univ ⊆ { i : m | w i = 0 }ᶜ ∪ { i : m | w i = 0 } := by rw [Set.compl_union_self] have hd : Disjoint { i : m | w i ≠ 0 } { i : m | w i = 0 } := disjoint_compl_left have B₁ := iSup_range_single_eq_iInf_ker_proj K (fun _ : m => K) hd hu (Set.toFinite _) have B₂ := iInfKerProjEquiv K (fun _ ↦ K) hd hu rw [LinearMap.rank, range_diagonal, B₁, ← @rank_fun' K] apply LinearEquiv.rank_eq apply B₂ end Field end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/PosDef.lean
import Mathlib.Algebra.Order.Ring.Star import Mathlib.LinearAlgebra.Matrix.DotProduct import Mathlib.LinearAlgebra.Matrix.Hermitian import Mathlib.LinearAlgebra.Matrix.Vec import Mathlib.LinearAlgebra.QuadraticForm.Basic /-! # Positive Definite Matrices This file defines positive (semi)definite matrices and connects the notion to positive definiteness of quadratic forms. In `Mathlib/Analysis/Matrix/Order.lean`, positive semi-definiteness is used to define the partial order on matrices on `ℝ` or `ℂ`. ## Main definitions * `Matrix.PosSemidef` : a matrix `M : Matrix n n R` is positive semidefinite if it is Hermitian and `xᴴMx` is nonnegative for all `x`. * `Matrix.PosDef` : a matrix `M : Matrix n n R` is positive definite if it is Hermitian and `xᴴMx` is greater than zero for all nonzero `x`. * `Matrix.InnerProductSpace.ofMatrix`: the inner product on `n → 𝕜` induced by a positive definite matrix `M`, and is given by `⟪x, y⟫ = xᴴMy`. ## Main results * `Matrix.PosSemidef.fromBlocks₁₁` and `Matrix.PosSemidef.fromBlocks₂₂`: If a matrix `A` is positive definite, then `[A B; Bᴴ D]` is positive semidefinite if and only if `D - Bᴴ A⁻¹ B` is positive semidefinite. * `Matrix.PosDef.isUnit`: A positive definite matrix in a field is invertible. -/ -- TODO: -- assert_not_exists MonoidAlgebra assert_not_exists Matrix.IsHermitian.eigenvalues open WithLp open scoped ComplexOrder namespace Matrix variable {m n R R' 𝕜 : Type*} variable [Fintype m] [Fintype n] variable [Ring R] [PartialOrder R] [StarRing R] variable [CommRing R'] [PartialOrder R'] [StarRing R'] variable [RCLike 𝕜] open Matrix /-! ## Positive semidefinite matrices -/ /-- A matrix `M : Matrix n n R` is positive semidefinite if it is Hermitian and `xᴴ * M * x` is nonnegative for all `x`. -/ def PosSemidef (M : Matrix n n R) := M.IsHermitian ∧ ∀ x : n → R, 0 ≤ star x ⬝ᵥ (M *ᵥ x) protected theorem PosSemidef.diagonal [StarOrderedRing R] [DecidableEq n] {d : n → R} (h : 0 ≤ d) : PosSemidef (diagonal d) := ⟨isHermitian_diagonal_of_self_adjoint _ <| funext fun i => IsSelfAdjoint.of_nonneg (h i), fun x => by refine Fintype.sum_nonneg fun i => ?_ simpa only [mulVec_diagonal, ← mul_assoc] using star_left_conjugate_nonneg (h i) _⟩ /-- A diagonal matrix is positive semidefinite iff its diagonal entries are nonnegative. -/ lemma posSemidef_diagonal_iff [StarOrderedRing R] [DecidableEq n] {d : n → R} : PosSemidef (diagonal d) ↔ (∀ i : n, 0 ≤ d i) := ⟨fun ⟨_, hP⟩ i ↦ by simpa using hP (Pi.single i 1), .diagonal⟩ namespace PosSemidef theorem isHermitian {M : Matrix n n R} (hM : M.PosSemidef) : M.IsHermitian := hM.1 theorem re_dotProduct_nonneg {M : Matrix n n 𝕜} (hM : M.PosSemidef) (x : n → 𝕜) : 0 ≤ RCLike.re (star x ⬝ᵥ (M *ᵥ x)) := RCLike.nonneg_iff.mp (hM.2 _) |>.1 lemma conjTranspose_mul_mul_same {A : Matrix n n R} (hA : PosSemidef A) {m : Type*} [Fintype m] (B : Matrix n m R) : PosSemidef (Bᴴ * A * B) := by constructor · exact isHermitian_conjTranspose_mul_mul B hA.1 · intro x simpa only [star_mulVec, dotProduct_mulVec, vecMul_vecMul] using hA.2 (B *ᵥ x) lemma mul_mul_conjTranspose_same {A : Matrix n n R} (hA : PosSemidef A) {m : Type*} [Fintype m] (B : Matrix m n R) : PosSemidef (B * A * Bᴴ) := by simpa only [conjTranspose_conjTranspose] using hA.conjTranspose_mul_mul_same Bᴴ theorem submatrix {M : Matrix n n R} (hM : M.PosSemidef) (e : m → n) : (M.submatrix e e).PosSemidef := by classical rw [(by simp : M = 1 * M * 1), submatrix_mul (he₂ := Function.bijective_id), submatrix_mul (he₂ := Function.bijective_id), submatrix_id_id] simpa only [conjTranspose_submatrix, conjTranspose_one] using conjTranspose_mul_mul_same hM (Matrix.submatrix 1 id e) theorem transpose {M : Matrix n n R'} (hM : M.PosSemidef) : Mᵀ.PosSemidef := by refine ⟨IsHermitian.transpose hM.1, fun x => ?_⟩ convert hM.2 (star x) using 1 rw [mulVec_transpose, dotProduct_mulVec, star_star, dotProduct_comm] @[simp] theorem _root_.Matrix.posSemidef_transpose_iff {M : Matrix n n R'} : Mᵀ.PosSemidef ↔ M.PosSemidef := ⟨(by simpa using ·.transpose), .transpose⟩ theorem conjTranspose {M : Matrix n n R} (hM : M.PosSemidef) : Mᴴ.PosSemidef := hM.1.symm ▸ hM @[simp] theorem _root_.Matrix.posSemidef_conjTranspose_iff {M : Matrix n n R} : Mᴴ.PosSemidef ↔ M.PosSemidef := ⟨(by simpa using ·.conjTranspose), .conjTranspose⟩ protected lemma zero : PosSemidef (0 : Matrix n n R) := ⟨isHermitian_zero, by simp⟩ protected lemma one [StarOrderedRing R] [DecidableEq n] : PosSemidef (1 : Matrix n n R) := ⟨isHermitian_one, fun x => by rw [one_mulVec]; exact Fintype.sum_nonneg fun i => star_mul_self_nonneg _⟩ protected theorem natCast [StarOrderedRing R] [DecidableEq n] (d : ℕ) : PosSemidef (d : Matrix n n R) := ⟨isHermitian_natCast _, fun x => by rw [natCast_mulVec, Nat.cast_smul_eq_nsmul, dotProduct_smul] exact nsmul_nonneg (dotProduct_star_self_nonneg _) _⟩ protected theorem ofNat [StarOrderedRing R] [DecidableEq n] (d : ℕ) [d.AtLeastTwo] : PosSemidef (ofNat(d) : Matrix n n R) := .natCast d protected theorem intCast [StarOrderedRing R] [DecidableEq n] (d : ℤ) (hd : 0 ≤ d) : PosSemidef (d : Matrix n n R) := ⟨isHermitian_intCast _, fun x => by rw [intCast_mulVec, Int.cast_smul_eq_zsmul, dotProduct_smul] exact zsmul_nonneg (dotProduct_star_self_nonneg _) hd⟩ @[simp] protected theorem _root_.Matrix.posSemidef_intCast_iff [StarOrderedRing R] [DecidableEq n] [Nonempty n] [Nontrivial R] (d : ℤ) : PosSemidef (d : Matrix n n R) ↔ 0 ≤ d := posSemidef_diagonal_iff.trans <| by simp protected lemma pow [StarOrderedRing R] [DecidableEq n] {M : Matrix n n R} (hM : M.PosSemidef) (k : ℕ) : PosSemidef (M ^ k) := match k with | 0 => .one | 1 => by simpa using hM | (k + 2) => by rw [pow_succ, pow_succ'] simpa only [hM.isHermitian.eq] using (hM.pow k).mul_mul_conjTranspose_same M protected lemma inv [DecidableEq n] {M : Matrix n n R'} (hM : M.PosSemidef) : M⁻¹.PosSemidef := by by_cases h : IsUnit M.det · have := (conjTranspose_mul_mul_same hM M⁻¹).conjTranspose rwa [mul_nonsing_inv_cancel_right _ _ h, conjTranspose_conjTranspose] at this · rw [nonsing_inv_apply_not_isUnit _ h] exact .zero protected lemma zpow [StarOrderedRing R'] [DecidableEq n] {M : Matrix n n R'} (hM : M.PosSemidef) (z : ℤ) : (M ^ z).PosSemidef := by obtain ⟨n, rfl | rfl⟩ := z.eq_nat_or_neg · simpa using hM.pow n · simpa using (hM.pow n).inv protected lemma add [AddLeftMono R] {A : Matrix m m R} {B : Matrix m m R} (hA : A.PosSemidef) (hB : B.PosSemidef) : (A + B).PosSemidef := ⟨hA.isHermitian.add hB.isHermitian, fun x => by rw [add_mulVec, dotProduct_add] exact add_nonneg (hA.2 x) (hB.2 x)⟩ protected theorem smul {α : Type*} [CommSemiring α] [PartialOrder α] [StarRing α] [StarOrderedRing α] [Algebra α R] [StarModule α R] [PosSMulMono α R] {x : Matrix n n R} (hx : x.PosSemidef) {a : α} (ha : 0 ≤ a) : (a • x).PosSemidef := by refine ⟨IsSelfAdjoint.smul (IsSelfAdjoint.of_nonneg ha) hx.1, fun y => ?_⟩ simp only [smul_mulVec, dotProduct_smul] exact smul_nonneg ha (hx.2 _) lemma diag_nonneg {A : Matrix n n R} (hA : A.PosSemidef) {i : n} : 0 ≤ A i i := by classical simpa [trace] using hA.2 <| Pi.single i 1 lemma trace_nonneg [AddLeftMono R] {A : Matrix n n R} (hA : A.PosSemidef) : 0 ≤ A.trace := Fintype.sum_nonneg fun _ ↦ hA.diag_nonneg end PosSemidef @[simp] theorem posSemidef_submatrix_equiv {M : Matrix n n R} (e : m ≃ n) : (M.submatrix e e).PosSemidef ↔ M.PosSemidef := ⟨fun h => by simpa using h.submatrix e.symm, fun h => h.submatrix _⟩ /-- The conjugate transpose of a matrix multiplied by the matrix is positive semidefinite -/ theorem posSemidef_conjTranspose_mul_self [StarOrderedRing R] (A : Matrix m n R) : PosSemidef (Aᴴ * A) := by refine ⟨isHermitian_conjTranspose_mul_self _, fun x => ?_⟩ rw [← mulVec_mulVec, dotProduct_mulVec, vecMul_conjTranspose, star_star] exact Finset.sum_nonneg fun i _ => star_mul_self_nonneg _ /-- A matrix multiplied by its conjugate transpose is positive semidefinite -/ theorem posSemidef_self_mul_conjTranspose [StarOrderedRing R] (A : Matrix m n R) : PosSemidef (A * Aᴴ) := by simpa only [conjTranspose_conjTranspose] using posSemidef_conjTranspose_mul_self Aᴴ theorem posSemidef_sum {ι : Type*} [AddLeftMono R] {x : ι → Matrix n n R} (s : Finset ι) (h : ∀ i ∈ s, PosSemidef (x i)) : PosSemidef (∑ i ∈ s, x i) := by refine ⟨isSelfAdjoint_sum s fun _ hi => h _ hi |>.1, fun y => ?_⟩ simp [sum_mulVec, dotProduct_sum, Finset.sum_nonneg fun _ hi => (h _ hi).2 _] section trace -- TODO: move these results to an earlier file variable {R : Type*} [PartialOrder R] [NonUnitalRing R] [StarRing R] [StarOrderedRing R] [NoZeroDivisors R] theorem trace_conjTranspose_mul_self_eq_zero_iff {A : Matrix m n R} : (Aᴴ * A).trace = 0 ↔ A = 0 := by rw [← star_vec_dotProduct_vec, dotProduct_star_self_eq_zero, vec_eq_zero_iff] theorem trace_mul_conjTranspose_self_eq_zero_iff {A : Matrix m n R} : (A * Aᴴ).trace = 0 ↔ A = 0 := by simpa using trace_conjTranspose_mul_self_eq_zero_iff (A := Aᴴ) end trace section conjugate variable [DecidableEq n] {U x : Matrix n n R} /-- For an invertible matrix `U`, `star U * x * U` is positive semi-definite iff `x` is. This works on any ⋆-ring with a partial order. See `IsUnit.star_left_conjugate_nonneg_iff` for a similar statement for star-ordered rings. -/ theorem IsUnit.posSemidef_star_left_conjugate_iff (hU : IsUnit U) : PosSemidef (star U * x * U) ↔ x.PosSemidef := by refine ⟨fun h ↦ ?_, fun h ↦ h.conjTranspose_mul_mul_same _⟩ lift U to (Matrix n n R)ˣ using hU have := h.conjTranspose_mul_mul_same ((U⁻¹ : (Matrix n n R)ˣ) : Matrix n n R) rwa [← star_eq_conjTranspose, ← mul_assoc, ← mul_assoc, ← star_mul, mul_assoc, Units.mul_inv, mul_one, star_one, one_mul] at this /-- For an invertible matrix `U`, `U * x * star U` is positive semi-definite iff `x` is. This works on any ⋆-ring with a partial order. See `IsUnit.star_right_conjugate_nonneg_iff` for a similar statement for star-ordered rings. -/ theorem IsUnit.posSemidef_star_right_conjugate_iff (hU : IsUnit U) : PosSemidef (U * x * star U) ↔ x.PosSemidef := by simpa using hU.star.posSemidef_star_left_conjugate_iff end conjugate /-- The matrix `vecMulVec a (star a)` is always positive semi-definite. -/ theorem posSemidef_vecMulVec_self_star [StarOrderedRing R] (a : n → R) : (vecMulVec a (star a)).PosSemidef := by simp [vecMulVec_eq Unit, ← conjTranspose_replicateCol, posSemidef_self_mul_conjTranspose] /-- The matrix `vecMulVec (star a) a` is always postive semi-definite. -/ theorem posSemidef_vecMulVec_star_self [StarOrderedRing R] (a : n → R) : (vecMulVec (star a) a).PosSemidef := by simp [vecMulVec_eq Unit, ← conjTranspose_replicateRow, posSemidef_conjTranspose_mul_self] /-! ## Positive definite matrices -/ /-- A matrix `M : Matrix n n R` is positive definite if it is Hermitian and `xᴴMx` is greater than zero for all nonzero `x`. -/ def PosDef (M : Matrix n n R) := M.IsHermitian ∧ ∀ x : n → R, x ≠ 0 → 0 < star x ⬝ᵥ (M *ᵥ x) namespace PosDef theorem isHermitian {M : Matrix n n R} (hM : M.PosDef) : M.IsHermitian := hM.1 theorem re_dotProduct_pos {M : Matrix n n 𝕜} (hM : M.PosDef) {x : n → 𝕜} (hx : x ≠ 0) : 0 < RCLike.re (star x ⬝ᵥ (M *ᵥ x)) := RCLike.pos_iff.mp (hM.2 _ hx) |>.1 theorem posSemidef {M : Matrix n n R} (hM : M.PosDef) : M.PosSemidef := by refine ⟨hM.1, ?_⟩ intro x by_cases hx : x = 0 · simp only [hx, zero_dotProduct, star_zero] exact le_rfl · exact le_of_lt (hM.2 x hx) theorem transpose {M : Matrix n n R'} (hM : M.PosDef) : Mᵀ.PosDef := by refine ⟨IsHermitian.transpose hM.1, fun x hx => ?_⟩ convert hM.2 (star x) (star_ne_zero.2 hx) using 1 rw [mulVec_transpose, dotProduct_mulVec, star_star, dotProduct_comm] @[simp] theorem transpose_iff {M : Matrix n n R'} : Mᵀ.PosDef ↔ M.PosDef := ⟨(by simpa using ·.transpose), .transpose⟩ protected theorem diagonal [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] {d : n → R} (h : ∀ i, 0 < d i) : PosDef (diagonal d) := ⟨isHermitian_diagonal_of_self_adjoint _ <| funext fun i => IsSelfAdjoint.of_nonneg (h i).le, fun x hx => by refine Fintype.sum_pos ?_ simp_rw [mulVec_diagonal, ← mul_assoc, Pi.lt_def] obtain ⟨i, hi⟩ := Function.ne_iff.mp hx exact ⟨fun i => star_left_conjugate_nonneg (h i).le _, i, star_left_conjugate_pos (h _) (isRegular_of_ne_zero hi)⟩⟩ @[simp] theorem _root_.Matrix.posDef_diagonal_iff [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] [Nontrivial R] {d : n → R} : PosDef (diagonal d) ↔ ∀ i, 0 < d i := by refine ⟨fun h i => ?_, .diagonal⟩ have := h.2 (Pi.single i 1) simp_rw [mulVec_single_one, ← Pi.single_star, star_one, single_dotProduct, one_mul, col_apply, diagonal_apply_eq, Function.ne_iff] at this exact this ⟨i, by simp⟩ protected theorem one [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] : PosDef (1 : Matrix n n R) := ⟨isHermitian_one, fun x hx => by simpa only [one_mulVec, dotProduct_star_self_pos_iff]⟩ protected theorem natCast [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] (d : ℕ) (hd : d ≠ 0) : PosDef (d : Matrix n n R) := ⟨isHermitian_natCast _, fun x hx => by rw [natCast_mulVec, Nat.cast_smul_eq_nsmul, dotProduct_smul] exact nsmul_pos (dotProduct_star_self_pos_iff.mpr hx) hd⟩ @[simp] theorem _root_.Matrix.posDef_natCast_iff [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] [Nonempty n] [Nontrivial R] {d : ℕ} : PosDef (d : Matrix n n R) ↔ 0 < d := posDef_diagonal_iff.trans <| by simp protected theorem ofNat [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] (d : ℕ) [d.AtLeastTwo] : PosDef (ofNat(d) : Matrix n n R) := .natCast d (NeZero.ne _) protected theorem intCast [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] (d : ℤ) (hd : 0 < d) : PosDef (d : Matrix n n R) := ⟨isHermitian_intCast _, fun x hx => by rw [intCast_mulVec, Int.cast_smul_eq_zsmul, dotProduct_smul] exact zsmul_pos (dotProduct_star_self_pos_iff.mpr hx) hd⟩ @[simp] theorem _root_.Matrix.posDef_intCast_iff [StarOrderedRing R] [DecidableEq n] [NoZeroDivisors R] [Nonempty n] [Nontrivial R] {d : ℤ} : PosDef (d : Matrix n n R) ↔ 0 < d := posDef_diagonal_iff.trans <| by simp protected lemma add_posSemidef [AddLeftMono R] {A : Matrix m m R} {B : Matrix m m R} (hA : A.PosDef) (hB : B.PosSemidef) : (A + B).PosDef := ⟨hA.isHermitian.add hB.isHermitian, fun x hx => by rw [add_mulVec, dotProduct_add] exact add_pos_of_pos_of_nonneg (hA.2 x hx) (hB.2 x)⟩ protected lemma posSemidef_add [AddLeftMono R] {A : Matrix m m R} {B : Matrix m m R} (hA : A.PosSemidef) (hB : B.PosDef) : (A + B).PosDef := add_comm A B ▸ hB.add_posSemidef hA protected lemma add [AddLeftMono R] {A : Matrix m m R} {B : Matrix m m R} (hA : A.PosDef) (hB : B.PosDef) : (A + B).PosDef := hA.add_posSemidef hB.posSemidef theorem _root_.Matrix.posDef_sum {ι : Type*} [AddLeftMono R] {A : ι → Matrix m m R} {s : Finset ι} (hs : s.Nonempty) (hA : ∀ i ∈ s, (A i).PosDef) : (∑ i ∈ s, A i).PosDef := by classical induction s using Finset.induction_on with | empty => simp at hs | insert i hi hins H => rw [Finset.sum_insert hins] by_cases h : ¬ hi.Nonempty · simp_all · exact PosDef.add (hA _ <| Finset.mem_insert_self i hi) <| H (not_not.mp h) fun _ _hi => hA _ (Finset.mem_insert_of_mem _hi) protected theorem smul {α : Type*} [CommSemiring α] [PartialOrder α] [StarRing α] [StarOrderedRing α] [Algebra α R] [StarModule α R] [PosSMulStrictMono α R] {x : Matrix n n R} (hx : x.PosDef) {a : α} (ha : 0 < a) : (a • x).PosDef := by refine ⟨IsSelfAdjoint.smul (IsSelfAdjoint.of_nonneg ha.le) hx.1, fun y hy => ?_⟩ simp only [smul_mulVec, dotProduct_smul] exact smul_pos ha (hx.2 _ hy) lemma conjTranspose_mul_mul_same {A : Matrix n n R} {B : Matrix n m R} (hA : A.PosDef) (hB : Function.Injective B.mulVec) : (Bᴴ * A * B).PosDef := by refine ⟨Matrix.isHermitian_conjTranspose_mul_mul _ hA.1, fun x hx => ?_⟩ have : B *ᵥ x ≠ 0 := fun h => hx <| hB.eq_iff' (mulVec_zero _) |>.1 h simpa only [star_mulVec, dotProduct_mulVec, vecMul_vecMul] using hA.2 _ this lemma mul_mul_conjTranspose_same {A : Matrix n n R} {B : Matrix m n R} (hA : A.PosDef) (hB : Function.Injective B.vecMul) : (B * A * Bᴴ).PosDef := by replace hB := star_injective.comp <| hB.comp star_injective simp_rw [Function.comp_def, star_vecMul, star_star] at hB simpa using hA.conjTranspose_mul_mul_same (B := Bᴴ) hB theorem conjTranspose_mul_self [StarOrderedRing R] [NoZeroDivisors R] (A : Matrix m n R) (hA : Function.Injective A.mulVec) : PosDef (Aᴴ * A) := by classical simpa using conjTranspose_mul_mul_same .one hA theorem mul_conjTranspose_self [StarOrderedRing R] [NoZeroDivisors R] (A : Matrix m n R) (hA : Function.Injective A.vecMul) : PosDef (A * Aᴴ) := by classical simpa using mul_mul_conjTranspose_same .one hA theorem conjTranspose {M : Matrix n n R} (hM : M.PosDef) : Mᴴ.PosDef := hM.1.symm ▸ hM @[simp] theorem _root_.Matrix.posDef_conjTranspose_iff {M : Matrix n n R} : Mᴴ.PosDef ↔ M.PosDef := ⟨(by simpa using ·.conjTranspose), .conjTranspose⟩ theorem of_toQuadraticForm' [DecidableEq n] {M : Matrix n n ℝ} (hM : M.IsSymm) (hMq : M.toQuadraticMap'.PosDef) : M.PosDef := by refine ⟨hM, fun x hx => ?_⟩ simp only [toQuadraticMap', QuadraticMap.PosDef, LinearMap.BilinMap.toQuadraticMap_apply, toLinearMap₂'_apply'] at hMq apply hMq x hx theorem toQuadraticForm' [DecidableEq n] {M : Matrix n n ℝ} (hM : M.PosDef) : M.toQuadraticMap'.PosDef := by intro x hx simp only [Matrix.toQuadraticMap', LinearMap.BilinMap.toQuadraticMap_apply, toLinearMap₂'_apply'] apply hM.2 x hx lemma diag_pos [Nontrivial R] {A : Matrix n n R} (hA : A.PosDef) {i : n} : 0 < A i i := by classical simpa [trace] using hA.2 <| Pi.single i 1 lemma trace_pos [Nontrivial R] [IsOrderedCancelAddMonoid R] [Nonempty n] {A : Matrix n n R} (hA : A.PosDef) : 0 < A.trace := Finset.sum_pos (fun _ _ ↦ hA.diag_pos) Finset.univ_nonempty section Field variable {K : Type*} [Field K] [PartialOrder K] [StarRing K] theorem isUnit [DecidableEq n] {M : Matrix n n K} (hM : M.PosDef) : IsUnit M := by by_contra h obtain ⟨a, ha, ha2⟩ : ∃ a ≠ 0, M *ᵥ a = 0 := by obtain ⟨a, b, ha⟩ := Function.not_injective_iff.mp <| mulVec_injective_iff_isUnit.not.mpr h exact ⟨a - b, by simp [sub_eq_zero, ha, mulVec_sub]⟩ simpa [ha2] using hM.2 _ ha protected theorem inv [DecidableEq n] {M : Matrix n n K} (hM : M.PosDef) : M⁻¹.PosDef := by have := hM.mul_mul_conjTranspose_same (B := M⁻¹) ?_ · let _ := hM.isUnit.invertible simpa using this.conjTranspose · simp only [Matrix.vecMul_injective_iff_isUnit, isUnit_nonsing_inv_iff, hM.isUnit] @[simp] theorem _root_.Matrix.posDef_inv_iff [DecidableEq n] {M : Matrix n n K} : M⁻¹.PosDef ↔ M.PosDef := ⟨fun h => letI := (Matrix.isUnit_nonsing_inv_iff.1 <| h.isUnit).invertible Matrix.inv_inv_of_invertible M ▸ h.inv, (·.inv)⟩ end Field section conjugate variable [DecidableEq n] {x U : Matrix n n R} /-- For an invertible matrix `U`, `star U * x * U` is positive definite iff `x` is. This works on any ⋆-ring with a partial order. See `IsUnit.isStrictlyPositive_star_left_conjugate_iff'` for a similar statement for star-ordered rings. For matrices, positive definiteness is equivalent to strict positivity when the underlying field is `ℝ` or `ℂ` (see `Matrix.isStrictlyPositive_iff_posDef`). -/ theorem _root_.Matrix.IsUnit.posDef_star_left_conjugate_iff (hU : IsUnit U) : PosDef (star U * x * U) ↔ x.PosDef := by refine ⟨fun h ↦ ?_, fun h ↦ h.conjTranspose_mul_mul_same <| mulVec_injective_of_isUnit hU⟩ lift U to (Matrix n n R)ˣ using hU have := h.conjTranspose_mul_mul_same (mulVec_injective_of_isUnit (Units.isUnit U⁻¹)) rwa [← star_eq_conjTranspose, ← mul_assoc, ← mul_assoc, ← star_mul, mul_assoc, Units.mul_inv, mul_one, star_one, one_mul] at this /-- For an invertible matrix `U`, `U * x * star U` is positive definite iff `x` is. This works on any ⋆-ring with a partial order. See `IsUnit.isStrictlyPositive_star_right_conjugate_iff` for a similar statement for star-ordered rings. For matrices, positive definiteness is equivalent to strict positivity when the underlying field is `ℝ` or `ℂ` (see `Matrix.isStrictlyPositive_iff_posDef`). -/ theorem _root_.Matrix.IsUnit.posDef_star_right_conjugate_iff (hU : IsUnit U) : PosDef (U * x * star U) ↔ x.PosDef := by simpa using hU.star.posDef_star_left_conjugate_iff end conjugate section SchurComplement variable [StarOrderedRing R'] theorem fromBlocks₁₁ [DecidableEq m] {A : Matrix m m R'} (B : Matrix m n R') (D : Matrix n n R') (hA : A.PosDef) [Invertible A] : (fromBlocks A B Bᴴ D).PosSemidef ↔ (D - Bᴴ * A⁻¹ * B).PosSemidef := by rw [PosSemidef, IsHermitian.fromBlocks₁₁ _ _ hA.1] constructor · refine fun h => ⟨h.1, fun x => ?_⟩ have := h.2 (-((A⁻¹ * B) *ᵥ x) ⊕ᵥ x) rwa [dotProduct_mulVec, schur_complement_eq₁₁ B D _ _ hA.1, neg_add_cancel, dotProduct_zero, zero_add, ← dotProduct_mulVec] at this · refine fun h => ⟨h.1, fun x => ?_⟩ rw [dotProduct_mulVec, ← Sum.elim_comp_inl_inr x, schur_complement_eq₁₁ B D _ _ hA.1] apply le_add_of_nonneg_of_le · rw [← dotProduct_mulVec] apply hA.posSemidef.2 · rw [← dotProduct_mulVec (star (x ∘ Sum.inr))] apply h.2 theorem fromBlocks₂₂ [DecidableEq n] (A : Matrix m m R') (B : Matrix m n R') {D : Matrix n n R'} (hD : D.PosDef) [Invertible D] : (fromBlocks A B Bᴴ D).PosSemidef ↔ (A - B * D⁻¹ * Bᴴ).PosSemidef := by rw [← posSemidef_submatrix_equiv (Equiv.sumComm n m), Equiv.sumComm_apply, fromBlocks_submatrix_sum_swap_sum_swap] convert fromBlocks₁₁ Bᴴ A hD <;> simp end SchurComplement end PosDef end Matrix namespace QuadraticForm open QuadraticMap variable {n : Type*} [Fintype n] theorem posDef_of_toMatrix' [DecidableEq n] {Q : QuadraticForm ℝ (n → ℝ)} (hQ : Q.toMatrix'.PosDef) : Q.PosDef := by rw [← toQuadraticMap_associated ℝ Q, ← (LinearMap.toMatrix₂' ℝ).left_inv ((associatedHom (R := ℝ) ℝ) Q)] exact hQ.toQuadraticForm' theorem posDef_toMatrix' [DecidableEq n] {Q : QuadraticForm ℝ (n → ℝ)} (hQ : Q.PosDef) : Q.toMatrix'.PosDef := by rw [← toQuadraticMap_associated ℝ Q, ← (LinearMap.toMatrix₂' ℝ).left_inv ((associatedHom (R := ℝ) ℝ) Q)] at hQ exact .of_toQuadraticForm' (isSymm_toMatrix' Q) hQ end QuadraticForm namespace Matrix variable {𝕜 : Type*} [RCLike 𝕜] {n : Type*} [Fintype n] /-- A positive definite matrix `M` induces a norm `‖x‖ = sqrt (re xᴴMx)`. -/ noncomputable abbrev NormedAddCommGroup.ofMatrix {M : Matrix n n 𝕜} (hM : M.PosDef) : NormedAddCommGroup (n → 𝕜) := @InnerProductSpace.Core.toNormedAddCommGroup _ _ _ _ _ { inner x y := (M *ᵥ y) ⬝ᵥ star x conj_inner_symm x y := by rw [dotProduct_comm, star_dotProduct, starRingEnd_apply, star_star, star_mulVec, dotProduct_comm (M *ᵥ y), dotProduct_mulVec, hM.isHermitian.eq] re_inner_nonneg x := dotProduct_comm _ (star x) ▸ hM.posSemidef.re_dotProduct_nonneg x definite x (hx : _ ⬝ᵥ _ = 0) := by by_contra! h simpa [hx, lt_irrefl, dotProduct_comm] using hM.re_dotProduct_pos h add_left := by simp only [star_add, dotProduct_add, forall_const] smul_left _ _ _ := by rw [← smul_eq_mul, ← dotProduct_smul, starRingEnd_apply, ← star_smul] } /-- A positive definite matrix `M` induces an inner product `⟪x, y⟫ = xᴴMy`. -/ def InnerProductSpace.ofMatrix {M : Matrix n n 𝕜} (hM : M.PosDef) : @InnerProductSpace 𝕜 (n → 𝕜) _ (NormedAddCommGroup.ofMatrix hM).toSeminormedAddCommGroup := InnerProductSpace.ofCore _ end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/BilinearForm.lean
import Mathlib.LinearAlgebra.BilinearForm.Properties import Mathlib.LinearAlgebra.Matrix.SesquilinearForm /-! # Bilinear form This file defines the conversion between bilinear forms and matrices. ## Main definitions * `Matrix.toBilin` given a basis define a bilinear form * `Matrix.toBilin'` define the bilinear form on `n → R` * `BilinForm.toMatrix`: calculate the matrix coefficients of a bilinear form * `BilinForm.toMatrix'`: calculate the matrix coefficients of a bilinear form on `n → R` ## Notation In this file we use the following type variables: - `M₁` is a module over the commutative semiring `R₁`, - `M₂` is a module over the commutative ring `R₂`. ## Tags bilinear form, bilin form, BilinearForm, matrix, basis -/ open LinearMap (BilinForm) open Module variable {R₁ : Type*} {M₁ : Type*} [CommSemiring R₁] [AddCommMonoid M₁] [Module R₁ M₁] variable {R₂ : Type*} {M₂ : Type*} [CommRing R₂] [AddCommGroup M₂] [Module R₂ M₂] section Matrix variable {n o : Type*} open Finset LinearMap Matrix open Matrix /-- The map from `Matrix n n R` to bilinear forms on `n → R`. This is an auxiliary definition for the equivalence `Matrix.toBilin'`. -/ def Matrix.toBilin'Aux [Fintype n] (M : Matrix n n R₁) : BilinForm R₁ (n → R₁) := Matrix.toLinearMap₂'Aux _ _ M theorem Matrix.toBilin'Aux_single [Fintype n] [DecidableEq n] (M : Matrix n n R₁) (i j : n) : M.toBilin'Aux (Pi.single i 1) (Pi.single j 1) = M i j := Matrix.toLinearMap₂'Aux_single _ _ _ _ _ /-- The linear map from bilinear forms to `Matrix n n R` given an `n`-indexed basis. This is an auxiliary definition for the equivalence `Matrix.toBilin'`. -/ def BilinForm.toMatrixAux (b : n → M₁) : BilinForm R₁ M₁ →ₗ[R₁] Matrix n n R₁ := LinearMap.toMatrix₂Aux R₁ b b @[simp] theorem LinearMap.BilinForm.toMatrixAux_apply (B : BilinForm R₁ M₁) (b : n → M₁) (i j : n) : BilinForm.toMatrixAux b B i j = B (b i) (b j) := LinearMap.toMatrix₂Aux_apply R₁ B _ _ _ _ variable [Fintype n] [Fintype o] theorem toBilin'Aux_toMatrixAux [DecidableEq n] (B₂ : BilinForm R₁ (n → R₁)) : Matrix.toBilin'Aux (BilinForm.toMatrixAux (fun j => Pi.single j 1) B₂) = B₂ := by rw [BilinForm.toMatrixAux, Matrix.toBilin'Aux, toLinearMap₂'Aux_toMatrix₂Aux] section ToMatrix' /-! ### `ToMatrix'` section This section deals with the conversion between matrices and bilinear forms on `n → R₂`. -/ variable [DecidableEq n] [DecidableEq o] /-- The linear equivalence between bilinear forms on `n → R` and `n × n` matrices -/ def LinearMap.BilinForm.toMatrix' : BilinForm R₁ (n → R₁) ≃ₗ[R₁] Matrix n n R₁ := LinearMap.toMatrix₂' R₁ /-- The linear equivalence between `n × n` matrices and bilinear forms on `n → R` -/ def Matrix.toBilin' : Matrix n n R₁ ≃ₗ[R₁] BilinForm R₁ (n → R₁) := BilinForm.toMatrix'.symm @[simp] theorem Matrix.toBilin'Aux_eq (M : Matrix n n R₁) : Matrix.toBilin'Aux M = Matrix.toBilin' M := rfl theorem Matrix.toBilin'_apply (M : Matrix n n R₁) (x y : n → R₁) : Matrix.toBilin' M x y = ∑ i, ∑ j, x i * M i j * y j := (Matrix.toLinearMap₂'_apply _ _ _).trans (by simp only [smul_eq_mul, mul_comm, mul_left_comm]) theorem Matrix.toBilin'_apply' (M : Matrix n n R₁) (v w : n → R₁) : Matrix.toBilin' M v w = v ⬝ᵥ M *ᵥ w := Matrix.toLinearMap₂'_apply' _ _ _ @[simp] theorem Matrix.toBilin'_single (M : Matrix n n R₁) (i j : n) : Matrix.toBilin' M (Pi.single i 1) (Pi.single j 1) = M i j := by simp [Matrix.toBilin'_apply, Pi.single_apply] @[simp] theorem LinearMap.BilinForm.toMatrix'_symm : (BilinForm.toMatrix'.symm : Matrix n n R₁ ≃ₗ[R₁] _) = Matrix.toBilin' := rfl @[simp] theorem Matrix.toBilin'_symm : (Matrix.toBilin'.symm : _ ≃ₗ[R₁] Matrix n n R₁) = BilinForm.toMatrix' := BilinForm.toMatrix'.symm_symm @[simp] theorem Matrix.toBilin'_toMatrix' (B : BilinForm R₁ (n → R₁)) : Matrix.toBilin' (BilinForm.toMatrix' B) = B := Matrix.toBilin'.apply_symm_apply B namespace LinearMap @[simp] theorem BilinForm.toMatrix'_toBilin' (M : Matrix n n R₁) : BilinForm.toMatrix' (Matrix.toBilin' M) = M := (LinearMap.toMatrix₂' R₁).apply_symm_apply M @[simp] theorem BilinForm.toMatrix'_apply (B : BilinForm R₁ (n → R₁)) (i j : n) : BilinForm.toMatrix' B i j = B (Pi.single i 1) (Pi.single j 1) := LinearMap.toMatrix₂'_apply _ _ _ @[simp] theorem BilinForm.toMatrix'_comp (B : BilinForm R₁ (n → R₁)) (l r : (o → R₁) →ₗ[R₁] n → R₁) : (B.comp l r).toMatrix' = l.toMatrix'ᵀ * B.toMatrix' * r.toMatrix' := B.toMatrix₂'_compl₁₂ _ _ theorem BilinForm.toMatrix'_compLeft (B : BilinForm R₁ (n → R₁)) (f : (n → R₁) →ₗ[R₁] n → R₁) : (B.compLeft f).toMatrix' = f.toMatrix'ᵀ * B.toMatrix' := B.toMatrix₂'_comp _ theorem BilinForm.toMatrix'_compRight (B : BilinForm R₁ (n → R₁)) (f : (n → R₁) →ₗ[R₁] n → R₁) : (B.compRight f).toMatrix' = B.toMatrix' * f.toMatrix' := B.toMatrix₂'_compl₂ _ theorem BilinForm.mul_toMatrix'_mul (B : BilinForm R₁ (n → R₁)) (M : Matrix o n R₁) (N : Matrix n o R₁) : M * B.toMatrix' * N = (B.comp (Mᵀ).toLin' N.toLin').toMatrix' := B.mul_toMatrix₂'_mul _ _ theorem BilinForm.mul_toMatrix' (B : BilinForm R₁ (n → R₁)) (M : Matrix n n R₁) : M * B.toMatrix' = (B.compLeft (Mᵀ).toLin').toMatrix' := LinearMap.mul_toMatrix' B _ theorem BilinForm.toMatrix'_mul (B : BilinForm R₁ (n → R₁)) (M : Matrix n n R₁) : BilinForm.toMatrix' B * M = BilinForm.toMatrix' (B.compRight (Matrix.toLin' M)) := B.toMatrix₂'_mul _ end LinearMap theorem Matrix.toBilin'_comp (M : Matrix n n R₁) (P Q : Matrix n o R₁) : M.toBilin'.comp P.toLin' Q.toLin' = (Pᵀ * M * Q).toBilin' := BilinForm.toMatrix'.injective (by simp only [BilinForm.toMatrix'_comp, BilinForm.toMatrix'_toBilin', toMatrix'_toLin']) end ToMatrix' section ToMatrix /-! ### `ToMatrix` section This section deals with the conversion between matrices and bilinear forms on a module with a fixed basis. -/ variable [DecidableEq n] (b : Basis n R₁ M₁) /-- `BilinForm.toMatrix b` is the equivalence between `R`-bilinear forms on `M` and `n`-by-`n` matrices with entries in `R`, if `b` is an `R`-basis for `M`. -/ noncomputable def BilinForm.toMatrix : BilinForm R₁ M₁ ≃ₗ[R₁] Matrix n n R₁ := LinearMap.toMatrix₂ b b /-- `BilinForm.toMatrix b` is the equivalence between `R`-bilinear forms on `M` and `n`-by-`n` matrices with entries in `R`, if `b` is an `R`-basis for `M`. -/ noncomputable def Matrix.toBilin : Matrix n n R₁ ≃ₗ[R₁] BilinForm R₁ M₁ := (BilinForm.toMatrix b).symm @[simp] theorem BilinForm.toMatrix_apply (B : BilinForm R₁ M₁) (i j : n) : BilinForm.toMatrix b B i j = B (b i) (b j) := LinearMap.toMatrix₂_apply _ _ B _ _ theorem BilinForm.dotProduct_toMatrix_mulVec (B : BilinForm R₁ M₁) (x y : n → R₁) : x ⬝ᵥ (BilinForm.toMatrix b B) *ᵥ y = B (b.equivFun.symm x) (b.equivFun.symm y) := dotProduct_toMatrix₂_mulVec b b B x y lemma BilinForm.apply_eq_dotProduct_toMatrix_mulVec (B : BilinForm R₁ M₁) (x y : M₁) : B x y = (b.repr x) ⬝ᵥ (BilinForm.toMatrix b B) *ᵥ (b.repr y) := apply_eq_dotProduct_toMatrix₂_mulVec b b B x y @[simp] theorem Matrix.toBilin_apply (M : Matrix n n R₁) (x y : M₁) : Matrix.toBilin b M x y = ∑ i, ∑ j, b.repr x i * M i j * b.repr y j := (Matrix.toLinearMap₂_apply _ _ _ _ _).trans (by simp only [smul_eq_mul, mul_comm, mul_left_comm]) -- Not a `simp` lemma since `BilinForm.toMatrix` needs an extra argument theorem BilinearForm.toMatrixAux_eq (B : BilinForm R₁ M₁) : BilinForm.toMatrixAux (R₁ := R₁) b B = BilinForm.toMatrix b B := LinearMap.toMatrix₂Aux_eq _ _ B @[simp] theorem BilinForm.toMatrix_symm : (BilinForm.toMatrix b).symm = Matrix.toBilin b := rfl @[simp] theorem Matrix.toBilin_symm : (Matrix.toBilin b).symm = BilinForm.toMatrix b := (BilinForm.toMatrix b).symm_symm theorem Matrix.toBilin_basisFun : Matrix.toBilin (Pi.basisFun R₁ n) = Matrix.toBilin' := by ext M simp only [coe_comp, coe_single, Function.comp_apply, toBilin_apply, Pi.basisFun_repr, toBilin'_apply] theorem BilinForm.toMatrix_basisFun : BilinForm.toMatrix (Pi.basisFun R₁ n) = BilinForm.toMatrix' := by rw [BilinForm.toMatrix, BilinForm.toMatrix', LinearMap.toMatrix₂_basisFun] @[simp] theorem Matrix.toBilin_toMatrix (B : BilinForm R₁ M₁) : Matrix.toBilin b (BilinForm.toMatrix b B) = B := (Matrix.toBilin b).apply_symm_apply B @[simp] theorem BilinForm.toMatrix_toBilin (M : Matrix n n R₁) : BilinForm.toMatrix b (Matrix.toBilin b M) = M := (BilinForm.toMatrix b).apply_symm_apply M variable {M₂' : Type*} [AddCommMonoid M₂'] [Module R₁ M₂'] variable (c : Basis o R₁ M₂') variable [DecidableEq o] -- Cannot be a `simp` lemma because `b` must be inferred. theorem BilinForm.toMatrix_comp (B : BilinForm R₁ M₁) (l r : M₂' →ₗ[R₁] M₁) : BilinForm.toMatrix c (B.comp l r) = (LinearMap.toMatrix c b l)ᵀ * BilinForm.toMatrix b B * LinearMap.toMatrix c b r := LinearMap.toMatrix₂_compl₁₂ _ _ _ _ B _ _ theorem BilinForm.toMatrix_compLeft (B : BilinForm R₁ M₁) (f : M₁ →ₗ[R₁] M₁) : BilinForm.toMatrix b (B.compLeft f) = (LinearMap.toMatrix b b f)ᵀ * BilinForm.toMatrix b B := LinearMap.toMatrix₂_comp _ _ _ B _ theorem BilinForm.toMatrix_compRight (B : BilinForm R₁ M₁) (f : M₁ →ₗ[R₁] M₁) : BilinForm.toMatrix b (B.compRight f) = BilinForm.toMatrix b B * LinearMap.toMatrix b b f := LinearMap.toMatrix₂_compl₂ _ _ _ B _ @[simp] theorem BilinForm.toMatrix_mul_basis_toMatrix (c : Basis o R₁ M₁) (B : BilinForm R₁ M₁) : (b.toMatrix c)ᵀ * BilinForm.toMatrix b B * b.toMatrix c = BilinForm.toMatrix c B := LinearMap.toMatrix₂_mul_basis_toMatrix _ _ _ _ B theorem BilinForm.mul_toMatrix_mul (B : BilinForm R₁ M₁) (M : Matrix o n R₁) (N : Matrix n o R₁) : M * BilinForm.toMatrix b B * N = BilinForm.toMatrix c (B.comp (Matrix.toLin c b Mᵀ) (Matrix.toLin c b N)) := LinearMap.mul_toMatrix₂_mul _ _ _ _ B _ _ theorem BilinForm.mul_toMatrix (B : BilinForm R₁ M₁) (M : Matrix n n R₁) : M * BilinForm.toMatrix b B = BilinForm.toMatrix b (B.compLeft (Matrix.toLin b b Mᵀ)) := LinearMap.mul_toMatrix₂ _ _ _ B _ theorem BilinForm.toMatrix_mul (B : BilinForm R₁ M₁) (M : Matrix n n R₁) : BilinForm.toMatrix b B * M = BilinForm.toMatrix b (B.compRight (Matrix.toLin b b M)) := LinearMap.toMatrix₂_mul _ _ _ B _ theorem Matrix.toBilin_comp (M : Matrix n n R₁) (P Q : Matrix n o R₁) : (Matrix.toBilin b M).comp (toLin c b P) (toLin c b Q) = Matrix.toBilin c (Pᵀ * M * Q) := by ext x y rw [Matrix.toBilin, BilinForm.toMatrix, Matrix.toBilin, BilinForm.toMatrix, toMatrix₂_symm, toMatrix₂_symm, ← Matrix.toLinearMap₂_compl₁₂ b b c c] simp end ToMatrix end Matrix section MatrixAdjoints open Matrix variable {n : Type*} [Fintype n] variable (b : Basis n R₂ M₂) variable (J J₃ A A' : Matrix n n R₂) theorem Matrix.isAdjointPair_equiv' [DecidableEq n] (P : Matrix n n R₂) (h : IsUnit P) : (Pᵀ * J * P).IsAdjointPair (Pᵀ * J * P) A A' ↔ J.IsAdjointPair J (P * A * P⁻¹) (P * A' * P⁻¹) := Matrix.isAdjointPair_equiv _ _ _ _ h variable [DecidableEq n] theorem mem_pairSelfAdjointMatricesSubmodule' : A ∈ pairSelfAdjointMatricesSubmodule J J₃ ↔ Matrix.IsAdjointPair J J₃ A A := by simp only [mem_pairSelfAdjointMatricesSubmodule] /-- The submodule of self-adjoint matrices with respect to the bilinear form corresponding to the matrix `J`. -/ def selfAdjointMatricesSubmodule' : Submodule R₂ (Matrix n n R₂) := pairSelfAdjointMatricesSubmodule J J theorem mem_selfAdjointMatricesSubmodule' : A ∈ selfAdjointMatricesSubmodule J ↔ J.IsSelfAdjoint A := by simp only [mem_selfAdjointMatricesSubmodule] /-- The submodule of skew-adjoint matrices with respect to the bilinear form corresponding to the matrix `J`. -/ def skewAdjointMatricesSubmodule' : Submodule R₂ (Matrix n n R₂) := pairSelfAdjointMatricesSubmodule (-J) J theorem mem_skewAdjointMatricesSubmodule' : A ∈ skewAdjointMatricesSubmodule J ↔ J.IsSkewAdjoint A := by simp only [mem_skewAdjointMatricesSubmodule] end MatrixAdjoints namespace LinearMap namespace BilinForm section Det open Matrix variable {A : Type*} [CommRing A] [IsDomain A] [Module A M₂] (B₃ : BilinForm A M₂) variable {ι : Type*} [DecidableEq ι] [Fintype ι] theorem _root_.Matrix.nondegenerate_toBilin'_iff_nondegenerate_toBilin {M : Matrix ι ι R₁} (b : Basis ι R₁ M₁) : M.toBilin'.Nondegenerate ↔ (Matrix.toBilin b M).Nondegenerate := (nondegenerate_congr_iff b.equivFun.symm).symm -- Lemmas transferring nondegeneracy between a matrix and its associated bilinear form theorem _root_.Matrix.Nondegenerate.toBilin' {M : Matrix ι ι R₂} (h : M.Nondegenerate) : M.toBilin'.Nondegenerate := fun x hx => h.eq_zero_of_ortho fun y => by simpa only [toBilin'_apply'] using hx y @[simp] theorem _root_.Matrix.nondegenerate_toBilin'_iff {M : Matrix ι ι R₂} : M.toBilin'.Nondegenerate ↔ M.Nondegenerate := by refine ⟨fun h ↦ Matrix.nondegenerate_def.mpr ?_, Matrix.Nondegenerate.toBilin'⟩ exact fun v hv => h v fun w => (M.toBilin'_apply' _ _).trans <| hv w theorem _root_.Matrix.Nondegenerate.toBilin {M : Matrix ι ι R₂} (h : M.Nondegenerate) (b : Basis ι R₂ M₂) : (Matrix.toBilin b M).Nondegenerate := (Matrix.nondegenerate_toBilin'_iff_nondegenerate_toBilin b).mp h.toBilin' @[simp] theorem _root_.Matrix.nondegenerate_toBilin_iff {M : Matrix ι ι R₂} (b : Basis ι R₂ M₂) : (Matrix.toBilin b M).Nondegenerate ↔ M.Nondegenerate := by rw [← Matrix.nondegenerate_toBilin'_iff_nondegenerate_toBilin, Matrix.nondegenerate_toBilin'_iff] /-! Lemmas transferring nondegeneracy between a bilinear form and its associated matrix -/ @[simp] theorem nondegenerate_toMatrix'_iff {B : BilinForm R₂ (ι → R₂)} : B.toMatrix'.Nondegenerate (m := ι) ↔ B.Nondegenerate := Matrix.nondegenerate_toBilin'_iff.symm.trans <| (Matrix.toBilin'_toMatrix' B).symm ▸ Iff.rfl theorem Nondegenerate.toMatrix' {B : BilinForm R₂ (ι → R₂)} (h : B.Nondegenerate) : B.toMatrix'.Nondegenerate := nondegenerate_toMatrix'_iff.mpr h @[simp] theorem nondegenerate_toMatrix_iff {B : BilinForm R₂ M₂} (b : Basis ι R₂ M₂) : (BilinForm.toMatrix b B).Nondegenerate ↔ B.Nondegenerate := (Matrix.nondegenerate_toBilin_iff b).symm.trans <| (Matrix.toBilin_toMatrix b B).symm ▸ Iff.rfl theorem Nondegenerate.toMatrix {B : BilinForm R₂ M₂} (h : B.Nondegenerate) (b : Basis ι R₂ M₂) : (BilinForm.toMatrix b B).Nondegenerate := (nondegenerate_toMatrix_iff b).mpr h /-! Some shorthands for combining the above with `Matrix.nondegenerate_of_det_ne_zero` -/ theorem nondegenerate_toBilin'_iff_det_ne_zero {M : Matrix ι ι A} : M.toBilin'.Nondegenerate ↔ M.det ≠ 0 := by rw [Matrix.nondegenerate_toBilin'_iff, Matrix.nondegenerate_iff_det_ne_zero] theorem nondegenerate_toBilin'_of_det_ne_zero' (M : Matrix ι ι A) (h : M.det ≠ 0) : M.toBilin'.Nondegenerate := nondegenerate_toBilin'_iff_det_ne_zero.mpr h theorem nondegenerate_iff_det_ne_zero {B : BilinForm A M₂} (b : Basis ι A M₂) : B.Nondegenerate ↔ (BilinForm.toMatrix b B).det ≠ 0 := by rw [← Matrix.nondegenerate_iff_det_ne_zero, nondegenerate_toMatrix_iff] theorem nondegenerate_of_det_ne_zero (b : Basis ι A M₂) (h : (BilinForm.toMatrix b B₃).det ≠ 0) : B₃.Nondegenerate := (nondegenerate_iff_det_ne_zero b).mpr h end Det end BilinForm end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Nondegenerate.lean
import Mathlib.Data.Matrix.Basic import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.LinearAlgebra.Matrix.Adjugate /-! # Matrices associated with non-degenerate bilinear forms ## Main definitions * `Matrix.Nondegenerate A`: the proposition that when interpreted as a bilinear form, the matrix `A` is nondegenerate. -/ namespace Matrix variable {m R A : Type*} [CommRing R] /-- A matrix `M` is nondegenerate if for all `v ≠ 0`, there is a `w ≠ 0` with `w * M * v ≠ 0`. -/ def Nondegenerate [Finite m] (M : Matrix m m R) := letI : Fintype m := Fintype.ofFinite m ∀ v, (∀ w, v ⬝ᵥ M *ᵥ w = 0) → v = 0 variable [Fintype m] lemma nondegenerate_def {M : Matrix m m R} : M.Nondegenerate ↔ ∀ v, (∀ w, v ⬝ᵥ M *ᵥ w = 0) → v = 0 := by refine forall_congr' fun v ↦ ⟨fun hM hv ↦ hM ?_, fun hM hv ↦ hM ?_⟩ <;> convert hv /-- If `M` is nondegenerate and `w * M * v = 0` for all `w`, then `v = 0`. -/ theorem Nondegenerate.eq_zero_of_ortho {M : Matrix m m R} (hM : Nondegenerate M) {v : m → R} (hv : ∀ w, v ⬝ᵥ M *ᵥ w = 0) : v = 0 := nondegenerate_def.mp hM v hv /-- If `M` is nondegenerate and `v ≠ 0`, then there is some `w` such that `w * M * v ≠ 0`. -/ theorem Nondegenerate.exists_not_ortho_of_ne_zero {M : Matrix m m R} (hM : Nondegenerate M) {v : m → R} (hv : v ≠ 0) : ∃ w, v ⬝ᵥ M *ᵥ w ≠ 0 := not_forall.mp (mt hM.eq_zero_of_ortho hv) variable [CommRing A] [IsDomain A] /-- If `M` has a nonzero determinant, then `M` as a bilinear form on `n → A` is nondegenerate. See also `BilinForm.nondegenerateOfDetNeZero'` and `BilinForm.nondegenerateOfDetNeZero`. -/ theorem nondegenerate_of_det_ne_zero [DecidableEq m] {M : Matrix m m A} (hM : M.det ≠ 0) : Nondegenerate M := by refine nondegenerate_def.mpr fun v hv ↦ ?_ ext i specialize hv (M.cramer (Pi.single i 1)) simp_all theorem eq_zero_of_vecMul_eq_zero [DecidableEq m] {M : Matrix m m A} (hM : M.det ≠ 0) {v : m → A} (hv : v ᵥ* M = 0) : v = 0 := (nondegenerate_of_det_ne_zero hM).eq_zero_of_ortho fun w => by rw [dotProduct_mulVec, hv, zero_dotProduct] theorem eq_zero_of_mulVec_eq_zero [DecidableEq m] {M : Matrix m m A} (hM : M.det ≠ 0) {v : m → A} (hv : M *ᵥ v = 0) : v = 0 := eq_zero_of_vecMul_eq_zero (by rwa [det_transpose]) ((vecMul_transpose M v).trans hv) end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Hadamard.lean
import Mathlib.LinearAlgebra.Matrix.Trace import Mathlib.Data.Matrix.Basic /-! # Hadamard product of matrices This file defines the Hadamard product `Matrix.hadamard` and contains basic properties about them. ## Main definition - `Matrix.hadamard`: defines the Hadamard product, which is the pointwise product of two matrices of the same size. ## Notation * `⊙`: the Hadamard product `Matrix.hadamard`; ## References * <https://en.wikipedia.org/wiki/hadamard_product_(matrices)> ## Tags hadamard product, hadamard -/ variable {α m n R : Type*} namespace Matrix /-- `Matrix.hadamard` (denoted as `⊙` within the Matrix namespace) defines the Hadamard product, which is the pointwise product of two matrices of the same size. -/ def hadamard [Mul α] (A : Matrix m n α) (B : Matrix m n α) : Matrix m n α := of fun i j => A i j * B i j -- TODO: set as an equation lemma for `hadamard`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem hadamard_apply [Mul α] (A : Matrix m n α) (B : Matrix m n α) (i j) : hadamard A B i j = A i j * B i j := rfl @[inherit_doc] scoped infixl:100 " ⊙ " => Matrix.hadamard section BasicProperties variable (A : Matrix m n α) (B : Matrix m n α) (C : Matrix m n α) -- commutativity theorem hadamard_comm [CommMagma α] : A ⊙ B = B ⊙ A := ext fun _ _ => mul_comm _ _ -- associativity theorem hadamard_assoc [Semigroup α] : A ⊙ B ⊙ C = A ⊙ (B ⊙ C) := ext fun _ _ => mul_assoc _ _ _ -- distributivity theorem hadamard_add [Distrib α] : A ⊙ (B + C) = A ⊙ B + A ⊙ C := ext fun _ _ => left_distrib _ _ _ theorem add_hadamard [Distrib α] : (B + C) ⊙ A = B ⊙ A + C ⊙ A := ext fun _ _ => right_distrib _ _ _ -- scalar multiplication section Scalar @[simp] theorem smul_hadamard [Mul α] [SMul R α] [IsScalarTower R α α] (k : R) : (k • A) ⊙ B = k • A ⊙ B := ext fun _ _ => smul_mul_assoc _ _ _ @[simp] theorem hadamard_smul [Mul α] [SMul R α] [SMulCommClass R α α] (k : R) : A ⊙ (k • B) = k • A ⊙ B := ext fun _ _ => mul_smul_comm _ _ _ end Scalar section Zero variable [MulZeroClass α] @[simp] theorem hadamard_zero : A ⊙ (0 : Matrix m n α) = 0 := ext fun _ _ => mul_zero _ @[simp] theorem zero_hadamard : (0 : Matrix m n α) ⊙ A = 0 := ext fun _ _ => zero_mul _ end Zero section One variable [DecidableEq n] [MulZeroOneClass α] variable (M : Matrix n n α) theorem hadamard_one : M ⊙ (1 : Matrix n n α) = diagonal fun i => M i i := by ext i j by_cases h : i = j <;> simp [h] theorem one_hadamard : (1 : Matrix n n α) ⊙ M = diagonal fun i => M i i := by ext i j by_cases h : i = j <;> simp [h] end One section single variable [DecidableEq m] [DecidableEq n] [MulZeroClass α] theorem single_hadamard_single_eq (i : m) (j : n) (a b : α) : single i j a ⊙ single i j b = single i j (a * b) := ext fun _ _ => (apply_ite₂ _ _ _ _ _ _).trans (congr_arg _ <| zero_mul 0) @[deprecated (since := "2025-05-05")] alias stdBasisMatrix_hadamard_stdBasisMatrix_eq := single_hadamard_single_eq theorem single_hadamard_single_of_ne {ia : m} {ja : n} {ib : m} {jb : n} (h : ¬(ia = ib ∧ ja = jb)) (a b : α) : single ia ja a ⊙ single ib jb b = 0 := by rw [not_and_or] at h cases h <;> (simp only [single]; aesop) end single section Diagonal variable [DecidableEq n] [MulZeroClass α] theorem diagonal_hadamard_diagonal (v : n → α) (w : n → α) : diagonal v ⊙ diagonal w = diagonal (v * w) := ext fun _ _ => (apply_ite₂ _ _ _ _ _ _).trans (congr_arg _ <| zero_mul 0) end Diagonal section trace variable [Fintype m] [Fintype n] variable (R) [Semiring α] theorem sum_hadamard_eq : (∑ i : m, ∑ j : n, (A ⊙ B) i j) = trace (A * Bᵀ) := rfl theorem dotProduct_vecMul_hadamard [DecidableEq m] [DecidableEq n] (v : m → α) (w : n → α) : v ᵥ* (A ⊙ B) ⬝ᵥ w = trace (diagonal v * A * (B * diagonal w)ᵀ) := by rw [← sum_hadamard_eq, Finset.sum_comm] simp [dotProduct, vecMul, Finset.sum_mul, mul_assoc] end trace end BasicProperties end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/SesquilinearForm.lean
import Mathlib.Algebra.GroupWithZero.Action.Opposite import Mathlib.LinearAlgebra.Finsupp.VectorSpace import Mathlib.LinearAlgebra.Matrix.Basis import Mathlib.LinearAlgebra.Matrix.Nondegenerate import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.LinearAlgebra.Matrix.ToLinearEquiv import Mathlib.LinearAlgebra.SesquilinearForm.Basic import Mathlib.LinearAlgebra.Basis.Bilinear /-! # Sesquilinear form This file defines the conversion between sesquilinear maps and matrices. ## Main definitions * `Matrix.toLinearMap₂` given a basis define a bilinear map * `Matrix.toLinearMap₂'` define the bilinear map on `n → R` * `LinearMap.toMatrix₂`: calculate the matrix coefficients of a bilinear map * `LinearMap.toMatrix₂'`: calculate the matrix coefficients of a bilinear map on `n → R` ## TODO At the moment this is quite a literal port from `Matrix.BilinearForm`. Everything should be generalized to fully semi-bilinear forms. ## Tags Sesquilinear form, Sesquilinear map, matrix, basis -/ open Finset LinearMap Matrix Module open scoped RightActions variable {R R₁ S₁ R₂ S₂ M₁ M₂ M₁' M₂' N₂ n m n' m' ι : Type*} section AuxToLinearMap variable [Semiring R₁] [Semiring S₁] [Semiring R₂] [Semiring S₂] [AddCommMonoid N₂] [Module S₁ N₂] [Module S₂ N₂] [SMulCommClass S₂ S₁ N₂] variable [Fintype n] [Fintype m] variable (σ₁ : R₁ →+* S₁) (σ₂ : R₂ →+* S₂) /-- The map from `Matrix n n R` to bilinear maps on `n → R`. This is an auxiliary definition for the equivalence `Matrix.toLinearMap₂'`. -/ def Matrix.toLinearMap₂'Aux (f : Matrix n m N₂) : (n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂ := -- porting note: we don't seem to have `∑ i j` as valid notation yet mk₂'ₛₗ σ₁ σ₂ (fun (v : n → R₁) (w : m → R₂) => ∑ i, ∑ j, σ₂ (w j) • σ₁ (v i) • f i j) (fun _ _ _ => by simp only [Pi.add_apply, map_add, smul_add, sum_add_distrib, add_smul]) (fun c v w => by simp only [Pi.smul_apply, smul_sum, smul_eq_mul, σ₁.map_mul, ← smul_comm _ (σ₁ c), MulAction.mul_smul]) (fun _ _ _ => by simp only [Pi.add_apply, map_add, add_smul, sum_add_distrib]) (fun _ v w => by simp only [Pi.smul_apply, smul_eq_mul, map_mul, MulAction.mul_smul, smul_sum]) variable [DecidableEq n] [DecidableEq m] theorem Matrix.toLinearMap₂'Aux_single (f : Matrix n m N₂) (i : n) (j : m) : f.toLinearMap₂'Aux σ₁ σ₂ (Pi.single i 1) (Pi.single j 1) = f i j := by rw [Matrix.toLinearMap₂'Aux, mk₂'ₛₗ_apply] have : (∑ i', ∑ j', (if i = i' then (1 : S₁) else (0 : S₁)) • (if j = j' then (1 : S₂) else (0 : S₂)) • f i' j') = f i j := by simp_rw [← Finset.smul_sum] simp only [ite_smul, one_smul, zero_smul, sum_ite_eq, mem_univ, ↓reduceIte] rw [← this] exact Finset.sum_congr rfl fun _ _ => Finset.sum_congr rfl fun _ _ => by aesop end AuxToLinearMap section AuxToMatrix section CommSemiring variable [CommSemiring R] [Semiring R₁] [Semiring S₁] [Semiring R₂] [Semiring S₂] variable [AddCommMonoid M₁] [Module R₁ M₁] [AddCommMonoid M₂] [Module R₂ M₂] [AddCommMonoid N₂] [Module R N₂] [Module S₁ N₂] [Module S₂ N₂] [SMulCommClass S₁ R N₂] [SMulCommClass S₂ R N₂] [SMulCommClass S₂ S₁ N₂] variable {σ₁ : R₁ →+* S₁} {σ₂ : R₂ →+* S₂} variable (R) /-- The linear map from sesquilinear maps to `Matrix n m N₂` given an `n`-indexed basis for `M₁` and an `m`-indexed basis for `M₂`. This is an auxiliary definition for the equivalence `Matrix.toLinearMapₛₗ₂'`. -/ def LinearMap.toMatrix₂Aux (b₁ : n → M₁) (b₂ : m → M₂) : (M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] N₂) →ₗ[R] Matrix n m N₂ where toFun f := of fun i j => f (b₁ i) (b₂ j) map_add' _f _g := rfl map_smul' _f _g := rfl @[simp] theorem LinearMap.toMatrix₂Aux_apply (f : M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] N₂) (b₁ : n → M₁) (b₂ : m → M₂) (i : n) (j : m) : LinearMap.toMatrix₂Aux R b₁ b₂ f i j = f (b₁ i) (b₂ j) := rfl variable [Fintype n] [Fintype m] variable [DecidableEq n] [DecidableEq m] theorem LinearMap.toLinearMap₂'Aux_toMatrix₂Aux (f : (n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂) : Matrix.toLinearMap₂'Aux σ₁ σ₂ (LinearMap.toMatrix₂Aux R (fun i => Pi.single i 1) (fun j => Pi.single j 1) f) = f := by refine ext_basis (Pi.basisFun R₁ n) (Pi.basisFun R₂ m) fun i j => ?_ simp_rw [Pi.basisFun_apply, Matrix.toLinearMap₂'Aux_single, LinearMap.toMatrix₂Aux_apply] theorem Matrix.toMatrix₂Aux_toLinearMap₂'Aux (f : Matrix n m N₂) : LinearMap.toMatrix₂Aux R (fun i => Pi.single i 1) (fun j => Pi.single j 1) (f.toLinearMap₂'Aux σ₁ σ₂) = f := by ext i j simp_rw [LinearMap.toMatrix₂Aux_apply, Matrix.toLinearMap₂'Aux_single] end CommSemiring end AuxToMatrix section ToMatrix' /-! ### Bilinear maps over `n → R` This section deals with the conversion between matrices and sesquilinear maps on `n → R`. -/ variable [CommSemiring R] [AddCommMonoid N₂] [Module R N₂] [Semiring R₁] [Semiring R₂] [Semiring S₁] [Semiring S₂] [Module S₁ N₂] [Module S₂ N₂] [SMulCommClass S₁ R N₂] [SMulCommClass S₂ R N₂] [SMulCommClass S₂ S₁ N₂] variable {σ₁ : R₁ →+* S₁} {σ₂ : R₂ →+* S₂} variable [Fintype n] [Fintype m] variable [DecidableEq n] [DecidableEq m] variable (R) /-- The linear equivalence between sesquilinear maps and `n × m` matrices -/ def LinearMap.toMatrixₛₗ₂' : ((n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂) ≃ₗ[R] Matrix n m N₂ := { LinearMap.toMatrix₂Aux R (fun i => Pi.single i 1) (fun j => Pi.single j 1) with toFun := LinearMap.toMatrix₂Aux R _ _ invFun := Matrix.toLinearMap₂'Aux σ₁ σ₂ left_inv := LinearMap.toLinearMap₂'Aux_toMatrix₂Aux R right_inv := Matrix.toMatrix₂Aux_toLinearMap₂'Aux R } /-- The linear equivalence between bilinear maps and `n × m` matrices -/ def LinearMap.toMatrix₂' : ((n → S₁) →ₗ[S₁] (m → S₂) →ₗ[S₂] N₂) ≃ₗ[R] Matrix n m N₂ := LinearMap.toMatrixₛₗ₂' R variable (σ₁ σ₂) /-- The linear equivalence between `n × n` matrices and sesquilinear maps on `n → R` -/ def Matrix.toLinearMapₛₗ₂' : Matrix n m N₂ ≃ₗ[R] (n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂ := (LinearMap.toMatrixₛₗ₂' R).symm /-- The linear equivalence between `n × n` matrices and bilinear maps on `n → R` -/ def Matrix.toLinearMap₂' : Matrix n m N₂ ≃ₗ[R] (n → S₁) →ₗ[S₁] (m → S₂) →ₗ[S₂] N₂ := (LinearMap.toMatrix₂' R).symm variable {R} theorem Matrix.toLinearMapₛₗ₂'_aux_eq (M : Matrix n m N₂) : Matrix.toLinearMap₂'Aux σ₁ σ₂ M = Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ M := rfl theorem Matrix.toLinearMapₛₗ₂'_apply (M : Matrix n m N₂) (x : n → R₁) (y : m → R₂) : -- porting note: we don't seem to have `∑ i j` as valid notation yet Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ M x y = ∑ i, ∑ j, σ₁ (x i) • σ₂ (y j) • M i j := by rw [toLinearMapₛₗ₂', toMatrixₛₗ₂', LinearEquiv.coe_symm_mk, toLinearMap₂'Aux, mk₂'ₛₗ_apply] apply Finset.sum_congr rfl fun _ _ => Finset.sum_congr rfl fun _ _ => by rw [smul_comm] theorem Matrix.toLinearMap₂'_apply (M : Matrix n m N₂) (x : n → S₁) (y : m → S₂) : -- porting note: we don't seem to have `∑ i j` as valid notation yet Matrix.toLinearMap₂' R M x y = ∑ i, ∑ j, x i • y j • M i j := Finset.sum_congr rfl fun _ _ => Finset.sum_congr rfl fun _ _ => by rw [RingHom.id_apply, RingHom.id_apply, smul_comm] theorem Matrix.toLinearMap₂'_apply' {T : Type*} [CommSemiring T] (M : Matrix n m T) (v : n → T) (w : m → T) : Matrix.toLinearMap₂' T M v w = v ⬝ᵥ (M *ᵥ w) := by simp_rw [Matrix.toLinearMap₂'_apply, dotProduct, Matrix.mulVec, dotProduct] refine Finset.sum_congr rfl fun _ _ => ?_ rw [Finset.mul_sum] refine Finset.sum_congr rfl fun _ _ => ?_ rw [smul_eq_mul, smul_eq_mul, mul_comm (w _), ← mul_assoc] @[simp] theorem Matrix.toLinearMapₛₗ₂'_single (M : Matrix n m N₂) (i : n) (j : m) : Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ M (Pi.single i 1) (Pi.single j 1) = M i j := Matrix.toLinearMap₂'Aux_single σ₁ σ₂ M i j @[simp] theorem Matrix.toLinearMap₂'_single (M : Matrix n m N₂) (i : n) (j : m) : Matrix.toLinearMap₂' R M (Pi.single i 1) (Pi.single j 1) = M i j := Matrix.toLinearMap₂'Aux_single _ _ M i j @[simp] theorem LinearMap.toMatrixₛₗ₂'_symm : ((LinearMap.toMatrixₛₗ₂' R).symm : Matrix n m N₂ ≃ₗ[R] _) = Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ := rfl @[simp] theorem Matrix.toLinearMapₛₗ₂'_symm : ((Matrix.toLinearMapₛₗ₂' R σ₁ σ₂).symm : _ ≃ₗ[R] Matrix n m N₂) = LinearMap.toMatrixₛₗ₂' R := (LinearMap.toMatrixₛₗ₂' R).symm_symm @[simp] theorem Matrix.toLinearMapₛₗ₂'_toMatrix' (B : (n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂) : Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ (LinearMap.toMatrixₛₗ₂' R B) = B := (Matrix.toLinearMapₛₗ₂' R σ₁ σ₂).apply_symm_apply B @[simp] theorem Matrix.toLinearMap₂'_toMatrix' (B : (n → S₁) →ₗ[S₁] (m → S₂) →ₗ[S₂] N₂) : Matrix.toLinearMap₂' R (LinearMap.toMatrix₂' R B) = B := (Matrix.toLinearMap₂' R).apply_symm_apply B @[simp] theorem LinearMap.toMatrix'_toLinearMapₛₗ₂' (M : Matrix n m N₂) : LinearMap.toMatrixₛₗ₂' R (Matrix.toLinearMapₛₗ₂' R σ₁ σ₂ M) = M := (LinearMap.toMatrixₛₗ₂' R).apply_symm_apply M @[simp] theorem LinearMap.toMatrix'_toLinearMap₂' (M : Matrix n m N₂) : LinearMap.toMatrix₂' R (Matrix.toLinearMap₂' R (S₁ := S₁) (S₂ := S₂) M) = M := (LinearMap.toMatrixₛₗ₂' R).apply_symm_apply M @[simp] theorem LinearMap.toMatrixₛₗ₂'_apply (B : (n → R₁) →ₛₗ[σ₁] (m → R₂) →ₛₗ[σ₂] N₂) (i : n) (j : m) : LinearMap.toMatrixₛₗ₂' R B i j = B (Pi.single i 1) (Pi.single j 1) := rfl @[simp] theorem LinearMap.toMatrix₂'_apply (B : (n → S₁) →ₗ[S₁] (m → S₂) →ₗ[S₂] N₂) (i : n) (j : m) : LinearMap.toMatrix₂' R B i j = B (Pi.single i 1) (Pi.single j 1) := rfl end ToMatrix' section CommToMatrix' -- TODO: Introduce matrix multiplication by matrices of scalars variable {R : Type*} [CommSemiring R] variable [Fintype n] [Fintype m] variable [DecidableEq n] [DecidableEq m] variable [Fintype n'] [Fintype m'] variable [DecidableEq n'] [DecidableEq m'] @[simp] theorem LinearMap.toMatrix₂'_compl₁₂ (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (l : (n' → R) →ₗ[R] n → R) (r : (m' → R) →ₗ[R] m → R) : toMatrix₂' R (B.compl₁₂ l r) = (toMatrix' l)ᵀ * toMatrix₂' R B * toMatrix' r := by ext i j simp only [LinearMap.toMatrix₂'_apply, LinearMap.compl₁₂_apply, transpose_apply, Matrix.mul_apply, LinearMap.toMatrix', LinearEquiv.coe_mk, LinearMap.coe_mk, AddHom.coe_mk, sum_mul] rw [sum_comm] conv_lhs => rw [← LinearMap.sum_repr_mul_repr_mul (Pi.basisFun R n) (Pi.basisFun R m) (l _) (r _)] rw [Finsupp.sum_fintype] · apply sum_congr rfl rintro i' - rw [Finsupp.sum_fintype] · apply sum_congr rfl rintro j' - simp only [smul_eq_mul, Pi.basisFun_repr, mul_assoc, mul_comm, mul_left_comm, Pi.basisFun_apply, of_apply] · intros simp only [zero_smul, smul_zero] · intros simp only [zero_smul, Finsupp.sum_zero] theorem LinearMap.toMatrix₂'_comp (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (f : (n' → R) →ₗ[R] n → R) : toMatrix₂' R (B.comp f) = (toMatrix' f)ᵀ * toMatrix₂' R B := by rw [← LinearMap.compl₂_id (B.comp f), ← LinearMap.compl₁₂] simp theorem LinearMap.toMatrix₂'_compl₂ (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (f : (m' → R) →ₗ[R] m → R) : toMatrix₂' R (B.compl₂ f) = toMatrix₂' R B * toMatrix' f := by rw [← LinearMap.comp_id B, ← LinearMap.compl₁₂] simp theorem LinearMap.mul_toMatrix₂'_mul (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (M : Matrix n' n R) (N : Matrix m m' R) : M * toMatrix₂' R B * N = toMatrix₂' R (B.compl₁₂ (toLin' Mᵀ) (toLin' N)) := by simp theorem LinearMap.mul_toMatrix' (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (M : Matrix n' n R) : M * toMatrix₂' R B = toMatrix₂' R (B.comp <| toLin' Mᵀ) := by simp only [B.toMatrix₂'_comp, transpose_transpose, toMatrix'_toLin'] theorem LinearMap.toMatrix₂'_mul (B : (n → R) →ₗ[R] (m → R) →ₗ[R] R) (M : Matrix m m' R) : toMatrix₂' R B * M = toMatrix₂' R (B.compl₂ <| toLin' M) := by simp only [B.toMatrix₂'_compl₂, toMatrix'_toLin'] theorem Matrix.toLinearMap₂'_comp (M : Matrix n m R) (P : Matrix n n' R) (Q : Matrix m m' R) : LinearMap.compl₁₂ (Matrix.toLinearMap₂' R M) (toLin' P) (toLin' Q) = toLinearMap₂' R (Pᵀ * M * Q) := (LinearMap.toMatrix₂' R).injective (by simp) end CommToMatrix' section ToMatrix /-! ### Bilinear maps over arbitrary vector spaces This section deals with the conversion between matrices and bilinear maps on a module with a fixed basis. -/ variable [CommSemiring R] variable [AddCommMonoid M₁] [Module R M₁] [AddCommMonoid M₂] [Module R M₂] [AddCommMonoid N₂] [Module R N₂] variable {σ₁ : R →+* R} {σ₂ : R →+* R} [Fintype n] [Fintype m] [DecidableEq m] [DecidableEq n] section variable (b₁ : Basis n R M₁) (b₂ : Basis m R M₂) /-- `LinearMap.toMatrix₂ b₁ b₂` is the equivalence between `R`-sesquilinear maps `M₁ →ₛₗ[σ₁] M₂ →ₗ[σ₂] N₂` and `n`-by-`m` matrices with entries in `N₂`, if `b₁` and `b₂` are `R`-bases for `M₁` and `M₂`, respectively. -/ noncomputable def LinearMap.toMatrix₂ : (M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] N₂) ≃ₗ[R] Matrix n m N₂ := (b₁.equivFun.arrowCongr (b₂.equivFun.arrowCongr (LinearEquiv.refl R N₂))).trans (LinearMap.toMatrixₛₗ₂' R) variable (σ₁) in /-- `Matrix.toLinearMapₛₗ₂ b₁ b₂` is the equivalence between `R`-sesquilinear maps `M₁ →ₛₗ[σ₁] M₂ →ₗ[R] N₂` and `n`-by-`m` matrices with entries in `N₂`, if `b₁` and `b₂` are `R`-bases for `M₁` and `M₂`, respectively; this is the reverse direction of `LinearMap.toMatrix₂ b₁ b₂`. -/ noncomputable def Matrix.toLinearMapₛₗ₂ : Matrix n m N₂ ≃ₗ[R] M₁ →ₛₗ[σ₁] M₂ →ₗ[R] N₂ := (LinearMap.toMatrix₂ b₁ b₂).symm /-- `Matrix.toLinearMap₂ b₁ b₂` is the same as `Matrix.toLinearMapₛₗ₂ b₁ b₂` but with `σ₁ := RingHom.id R` to avoid having to specify it. -/ noncomputable def Matrix.toLinearMap₂ : Matrix n m N₂ ≃ₗ[R] M₁ →ₗ[R] M₂ →ₗ[R] N₂ := toLinearMapₛₗ₂ (.id R) b₁ b₂ -- We make this and not `LinearMap.toMatrix₂` a `simp` lemma to avoid timeouts @[simp] theorem LinearMap.toMatrix₂_apply (B : M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] N₂) (i : n) (j : m) : LinearMap.toMatrix₂ b₁ b₂ B i j = B (b₁ i) (b₂ j) := by simp only [toMatrix₂, LinearEquiv.trans_apply, toMatrixₛₗ₂'_apply, LinearEquiv.arrowCongr_apply, Basis.equivFun_symm_apply, Pi.single_apply, ite_smul, one_smul, zero_smul, sum_ite_eq', mem_univ, ↓reduceIte, LinearEquiv.refl_apply] @[simp] theorem Matrix.toLinearMapₛₗ₂_apply (M : Matrix n m N₂) (x : M₁) (y : M₂) : Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂ M x y = ∑ i, ∑ j, σ₁ (b₁.repr x i) • b₂.repr y j • M i j := Finset.sum_congr rfl fun _ _ => Finset.sum_congr rfl fun _ _ => smul_algebra_smul_comm (σ₁ ((Basis.equivFun b₁) x _)) ((RingHom.id R) ((Basis.equivFun b₂) y _)) (M _ _) @[simp] theorem Matrix.toLinearMap₂_apply (M : Matrix n m N₂) (x : M₁) (y : M₂) : Matrix.toLinearMap₂ b₁ b₂ M x y = ∑ i, ∑ j, b₁.repr x i • b₂.repr y j • M i j := Finset.sum_congr rfl fun _ _ => Finset.sum_congr rfl fun _ _ => smul_algebra_smul_comm ((RingHom.id R) ((Basis.equivFun b₁) x _)) ((RingHom.id R) ((Basis.equivFun b₂) y _)) (M _ _) theorem Matrix.toLinearMapₛₗ₂_apply_basis (M : Matrix n m N₂) (i : n) (j : m) : Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂ M (b₁ i) (b₂ j) = M i j := by simp only [toLinearMapₛₗ₂_apply, Basis.repr_self] rw [Finset.sum_eq_single_of_mem i (by simp) fun k _ hk ↦ by simp [hk], Finset.sum_eq_single_of_mem j (by simp) fun k _ hk ↦ by simp [hk]] simp theorem Matrix.toLinearMap₂_apply_basis (M : Matrix n m N₂) (i : n) (j : m) : Matrix.toLinearMap₂ b₁ b₂ M (b₁ i) (b₂ j) = M i j := toLinearMapₛₗ₂_apply_basis .. theorem dotProduct_toMatrix₂_mulVec (B : M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] R) (x : n → R) (y : m → R) : (σ₁ ∘ x) ⬝ᵥ (toMatrix₂ b₁ b₂ B) *ᵥ (σ₂ ∘ y) = B (b₁.equivFun.symm x) (b₂.equivFun.symm y) := by simp only [dotProduct, Function.comp_apply, Function.comp_def, mulVec_eq_sum, op_smul_eq_smul, Finset.sum_apply, Pi.smul_apply, transpose_apply, toMatrix₂_apply, smul_eq_mul, mul_sum, Basis.equivFun_symm_apply, map_sum, LinearMap.map_smulₛₗ, coeFn_sum, LinearMap.smul_apply] rw [Finset.sum_comm] refine Finset.sum_congr rfl (fun i _ ↦ Finset.sum_congr rfl fun j _ ↦ ?_) ring lemma apply_eq_dotProduct_toMatrix₂_mulVec (B : M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] R) (x : M₁) (y : M₂) : B x y = (σ₁ ∘ b₁.repr x) ⬝ᵥ (toMatrix₂ b₁ b₂ B) *ᵥ (σ₂ ∘ b₂.repr y) := by nth_rw 1 [← b₁.sum_repr x, ← b₂.sum_repr y] suffices ∑ j, ∑ i, σ₂ (b₂.repr y j) * σ₁ (b₁.repr x i) * B (b₁ i) (b₂ j) = ∑ i, ∑ j, σ₁ (b₁.repr x i) * σ₂ (b₂.repr y j) * B (b₁ i) (b₂ j) by simpa [dotProduct, Matrix.mulVec_eq_sum, Finset.mul_sum, -Basis.sum_repr, ← mul_assoc] simp_rw [mul_comm (σ₂ _)] exact Finset.sum_comm -- Not a `simp` lemma since `LinearMap.toMatrix₂` needs an extra argument theorem LinearMap.toMatrix₂Aux_eq (B : M₁ →ₛₗ[σ₁] M₂ →ₛₗ[σ₂] N₂) : LinearMap.toMatrix₂Aux R b₁ b₂ B = LinearMap.toMatrix₂ b₁ b₂ B := Matrix.ext fun i j => by rw [LinearMap.toMatrix₂_apply, LinearMap.toMatrix₂Aux_apply] @[simp] theorem LinearMap.toMatrix₂_symm' : (LinearMap.toMatrix₂ b₁ b₂).symm = Matrix.toLinearMapₛₗ₂ σ₁ (N₂ := N₂) b₁ b₂ := rfl theorem LinearMap.toMatrix₂_symm : (LinearMap.toMatrix₂ b₁ b₂).symm = Matrix.toLinearMap₂ (N₂ := N₂) b₁ b₂ := rfl @[simp] theorem Matrix.toLinearMapₛₗ₂_symm : (Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂).symm = LinearMap.toMatrix₂ (N₂ := N₂) b₁ b₂ := (LinearMap.toMatrix₂ b₁ b₂).symm_symm theorem Matrix.toLinearMap₂_symm : (Matrix.toLinearMap₂ b₁ b₂).symm = LinearMap.toMatrix₂ (N₂ := N₂) b₁ b₂ := (LinearMap.toMatrix₂ b₁ b₂).symm_symm theorem Matrix.toLinearMap₂_basisFun : Matrix.toLinearMap₂ (Pi.basisFun R n) (Pi.basisFun R m) = Matrix.toLinearMap₂' R (N₂ := N₂) := by ext M simp only [coe_comp, coe_single, Function.comp_apply, toLinearMap₂_apply, Pi.basisFun_repr, toLinearMap₂'_apply] theorem LinearMap.toMatrix₂_basisFun : LinearMap.toMatrix₂ (Pi.basisFun R n) (Pi.basisFun R m) = LinearMap.toMatrix₂' R (N₂ := N₂) := by ext B rw [LinearMap.toMatrix₂_apply, LinearMap.toMatrix₂'_apply, Pi.basisFun_apply, Pi.basisFun_apply] @[simp] theorem Matrix.toLinearMapₛₗ₂_toMatrix₂ (B : M₁ →ₛₗ[σ₁] M₂ →ₗ[R] N₂) : Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂ (LinearMap.toMatrix₂ b₁ b₂ B) = B := (Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂).apply_symm_apply B theorem Matrix.toLinearMap₂_toMatrix₂ (B : M₁ →ₗ[R] M₂ →ₗ[R] N₂) : Matrix.toLinearMap₂ b₁ b₂ (LinearMap.toMatrix₂ b₁ b₂ B) = B := (Matrix.toLinearMap₂ b₁ b₂).apply_symm_apply B @[simp] theorem LinearMap.toMatrix₂_toLinearMapₛₗ₂ (M : Matrix n m N₂) : LinearMap.toMatrix₂ b₁ b₂ (Matrix.toLinearMapₛₗ₂ σ₁ b₁ b₂ M) = M := (LinearMap.toMatrix₂ b₁ b₂).apply_symm_apply M theorem LinearMap.toMatrix₂_toLinearMap₂ (M : Matrix n m N₂) : LinearMap.toMatrix₂ b₁ b₂ (Matrix.toLinearMap₂ b₁ b₂ M) = M := (LinearMap.toMatrix₂ b₁ b₂).apply_symm_apply M variable (b₁ : Basis n R M₁) (b₂ : Basis m R M₂) variable [AddCommMonoid M₁'] [Module R M₁'] variable [AddCommMonoid M₂'] [Module R M₂'] variable (b₁' : Basis n' R M₁') variable (b₂' : Basis m' R M₂') variable [Fintype n'] [Fintype m'] variable [DecidableEq n'] [DecidableEq m'] -- Cannot be a `simp` lemma because `b₁` and `b₂` must be inferred. theorem LinearMap.toMatrix₂_compl₁₂ (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (l : M₁' →ₗ[R] M₁) (r : M₂' →ₗ[R] M₂) : LinearMap.toMatrix₂ b₁' b₂' (B.compl₁₂ l r) = (toMatrix b₁' b₁ l)ᵀ * LinearMap.toMatrix₂ b₁ b₂ B * toMatrix b₂' b₂ r := by ext i j simp only [LinearMap.toMatrix₂_apply, compl₁₂_apply, transpose_apply, Matrix.mul_apply, LinearMap.toMatrix_apply, sum_mul] rw [sum_comm] conv_lhs => rw [← LinearMap.sum_repr_mul_repr_mul b₁ b₂] rw [Finsupp.sum_fintype] · apply sum_congr rfl rintro i' - rw [Finsupp.sum_fintype] · apply sum_congr rfl rintro j' - simp only [smul_eq_mul, mul_assoc, mul_comm, mul_left_comm] · intros simp only [zero_smul, smul_zero] · intros simp only [zero_smul, Finsupp.sum_zero] theorem LinearMap.toMatrix₂_comp (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (f : M₁' →ₗ[R] M₁) : LinearMap.toMatrix₂ b₁' b₂ (B.comp f) = (toMatrix b₁' b₁ f)ᵀ * LinearMap.toMatrix₂ b₁ b₂ B := by rw [← LinearMap.compl₂_id (B.comp f), ← LinearMap.compl₁₂, LinearMap.toMatrix₂_compl₁₂ b₁ b₂] simp theorem LinearMap.toMatrix₂_compl₂ (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (f : M₂' →ₗ[R] M₂) : LinearMap.toMatrix₂ b₁ b₂' (B.compl₂ f) = LinearMap.toMatrix₂ b₁ b₂ B * toMatrix b₂' b₂ f := by rw [← LinearMap.comp_id B, ← LinearMap.compl₁₂, LinearMap.toMatrix₂_compl₁₂ b₁ b₂] simp @[simp] theorem LinearMap.toMatrix₂_mul_basis_toMatrix (c₁ : Basis n' R M₁) (c₂ : Basis m' R M₂) (B : M₁ →ₗ[R] M₂ →ₗ[R] R) : (b₁.toMatrix c₁)ᵀ * LinearMap.toMatrix₂ b₁ b₂ B * b₂.toMatrix c₂ = LinearMap.toMatrix₂ c₁ c₂ B := by simp_rw [← LinearMap.toMatrix_id_eq_basis_toMatrix] rw [← LinearMap.toMatrix₂_compl₁₂, LinearMap.compl₁₂_id_id] theorem LinearMap.mul_toMatrix₂_mul (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (M : Matrix n' n R) (N : Matrix m m' R) : M * LinearMap.toMatrix₂ b₁ b₂ B * N = LinearMap.toMatrix₂ b₁' b₂' (B.compl₁₂ (toLin b₁' b₁ Mᵀ) (toLin b₂' b₂ N)) := by simp_rw [LinearMap.toMatrix₂_compl₁₂ b₁ b₂, toMatrix_toLin, transpose_transpose] theorem LinearMap.mul_toMatrix₂ (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (M : Matrix n' n R) : M * LinearMap.toMatrix₂ b₁ b₂ B = LinearMap.toMatrix₂ b₁' b₂ (B.comp (toLin b₁' b₁ Mᵀ)) := by rw [LinearMap.toMatrix₂_comp b₁, toMatrix_toLin, transpose_transpose] theorem LinearMap.toMatrix₂_mul (B : M₁ →ₗ[R] M₂ →ₗ[R] R) (M : Matrix m m' R) : LinearMap.toMatrix₂ b₁ b₂ B * M = LinearMap.toMatrix₂ b₁ b₂' (B.compl₂ (toLin b₂' b₂ M)) := by rw [LinearMap.toMatrix₂_compl₂ b₁ b₂, toMatrix_toLin] theorem Matrix.toLinearMap₂_compl₁₂ (M : Matrix n m R) (P : Matrix n n' R) (Q : Matrix m m' R) : (Matrix.toLinearMap₂ b₁ b₂ M).compl₁₂ (toLin b₁' b₁ P) (toLin b₂' b₂ Q) = Matrix.toLinearMap₂ b₁' b₂' (Pᵀ * M * Q) := (LinearMap.toMatrix₂ b₁' b₂').injective (by simp only [LinearMap.toMatrix₂_compl₁₂ b₁ b₂, LinearMap.toMatrix₂_toLinearMap₂, toMatrix_toLin]) end end ToMatrix /-! ### Adjoint pairs -/ section MatrixAdjoints open Matrix variable [CommRing R] variable [AddCommMonoid M₁] [Module R M₁] [AddCommMonoid M₂] [Module R M₂] variable [Fintype n] [Fintype n'] variable (b₁ : Basis n R M₁) (b₂ : Basis n' R M₂) variable (J J₂ : Matrix n n R) (J' : Matrix n' n' R) variable (A : Matrix n' n R) (A' : Matrix n n' R) variable (A₁ A₂ : Matrix n n R) /-- The condition for the matrices `A`, `A'` to be an adjoint pair with respect to the square matrices `J`, `J₃`. -/ def Matrix.IsAdjointPair := Aᵀ * J' = J * A' /-- The condition for a square matrix `A` to be self-adjoint with respect to the square matrix `J`. -/ protected def Matrix.IsSelfAdjoint := Matrix.IsAdjointPair J J A₁ A₁ /-- The condition for a square matrix `A` to be skew-adjoint with respect to the square matrix `J`. -/ protected def Matrix.IsSkewAdjoint := Matrix.IsAdjointPair J J A₁ (-A₁) variable [DecidableEq n] [DecidableEq n'] @[simp] theorem isAdjointPair_toLinearMap₂' : LinearMap.IsAdjointPair (Matrix.toLinearMap₂' R J) (Matrix.toLinearMap₂' R J') (Matrix.toLin' A) (Matrix.toLin' A') ↔ Matrix.IsAdjointPair J J' A A' := by rw [isAdjointPair_iff_comp_eq_compl₂] have h : ∀ B B' : (n → R) →ₗ[R] (n' → R) →ₗ[R] R, B = B' ↔ LinearMap.toMatrix₂' R B = LinearMap.toMatrix₂' R B' := by intro B B' constructor <;> intro h · rw [h] · exact (LinearMap.toMatrix₂' R).injective h simp_rw [h, LinearMap.toMatrix₂'_comp, LinearMap.toMatrix₂'_compl₂, LinearMap.toMatrix'_toLin', LinearMap.toMatrix'_toLinearMap₂'] rfl @[simp] theorem isAdjointPair_toLinearMap₂ : LinearMap.IsAdjointPair (Matrix.toLinearMap₂ b₁ b₁ J) (Matrix.toLinearMap₂ b₂ b₂ J') (Matrix.toLin b₁ b₂ A) (Matrix.toLin b₂ b₁ A') ↔ Matrix.IsAdjointPair J J' A A' := by rw [isAdjointPair_iff_comp_eq_compl₂] have h : ∀ B B' : M₁ →ₗ[R] M₂ →ₗ[R] R, B = B' ↔ LinearMap.toMatrix₂ b₁ b₂ B = LinearMap.toMatrix₂ b₁ b₂ B' := by intro B B' constructor <;> intro h · rw [h] · exact (LinearMap.toMatrix₂ b₁ b₂).injective h simp_rw [h, LinearMap.toMatrix₂_comp b₂ b₂, LinearMap.toMatrix₂_compl₂ b₁ b₁, LinearMap.toMatrix_toLin, LinearMap.toMatrix₂_toLinearMap₂] rfl theorem Matrix.isAdjointPair_equiv (P : Matrix n n R) (h : IsUnit P) : (Pᵀ * J * P).IsAdjointPair (Pᵀ * J * P) A₁ A₂ ↔ J.IsAdjointPair J (P * A₁ * P⁻¹) (P * A₂ * P⁻¹) := by have h' : IsUnit P.det := P.isUnit_iff_isUnit_det.mp h let u := P.nonsingInvUnit h' let v := Pᵀ.nonsingInvUnit (P.isUnit_det_transpose h') let x := A₁ᵀ * Pᵀ * J let y := J * P * A₂ suffices x * u = v * y ↔ v⁻¹ * x = y * u⁻¹ by dsimp only [Matrix.IsAdjointPair] simp only [Matrix.transpose_mul] simp only [← mul_assoc, P.transpose_nonsing_inv] convert this using 2 · rw [mul_assoc, mul_assoc, ← mul_assoc J] rfl · rw [mul_assoc, mul_assoc, ← mul_assoc _ _ J] rfl rw [Units.eq_mul_inv_iff_mul_eq] conv_rhs => rw [mul_assoc] rw [v.inv_mul_eq_iff_eq_mul] /-- The submodule of pair-self-adjoint matrices with respect to bilinear forms corresponding to given matrices `J`, `J₂`. -/ def pairSelfAdjointMatricesSubmodule : Submodule R (Matrix n n R) := (isPairSelfAdjointSubmodule (Matrix.toLinearMap₂' R J) (Matrix.toLinearMap₂' R J₂)).map ((LinearMap.toMatrix' : ((n → R) →ₗ[R] n → R) ≃ₗ[R] Matrix n n R) : ((n → R) →ₗ[R] n → R) →ₗ[R] Matrix n n R) @[simp] theorem mem_pairSelfAdjointMatricesSubmodule : A₁ ∈ pairSelfAdjointMatricesSubmodule J J₂ ↔ Matrix.IsAdjointPair J J₂ A₁ A₁ := by simp only [pairSelfAdjointMatricesSubmodule, Submodule.mem_map_equiv, mem_isPairSelfAdjointSubmodule, toMatrix'_symm, ← isAdjointPair_toLinearMap₂', IsPairSelfAdjoint, toLin'_apply'] /-- The submodule of self-adjoint matrices with respect to the bilinear form corresponding to the matrix `J`. -/ def selfAdjointMatricesSubmodule : Submodule R (Matrix n n R) := pairSelfAdjointMatricesSubmodule J J @[simp] theorem mem_selfAdjointMatricesSubmodule : A₁ ∈ selfAdjointMatricesSubmodule J ↔ J.IsSelfAdjoint A₁ := by rw [selfAdjointMatricesSubmodule, mem_pairSelfAdjointMatricesSubmodule, Matrix.IsSelfAdjoint] /-- The submodule of skew-adjoint matrices with respect to the bilinear form corresponding to the matrix `J`. -/ def skewAdjointMatricesSubmodule : Submodule R (Matrix n n R) := pairSelfAdjointMatricesSubmodule (-J) J @[simp] theorem mem_skewAdjointMatricesSubmodule : A₁ ∈ skewAdjointMatricesSubmodule J ↔ J.IsSkewAdjoint A₁ := by rw [skewAdjointMatricesSubmodule, mem_pairSelfAdjointMatricesSubmodule] simp [Matrix.IsSkewAdjoint, Matrix.IsAdjointPair] end MatrixAdjoints namespace LinearMap /-! ### Nondegenerate bilinear forms -/ section Det open Matrix variable [CommRing R₁] [AddCommMonoid M₁] [Module R₁ M₁] variable [DecidableEq ι] [Fintype ι] theorem _root_.Matrix.separatingLeft_toLinearMap₂'_iff_separatingLeft_toLinearMap₂ {M : Matrix ι ι R₁} (b : Basis ι R₁ M₁) : (Matrix.toLinearMap₂' R₁ M).SeparatingLeft (R := R₁) ↔ (Matrix.toLinearMap₂ b b M).SeparatingLeft := (separatingLeft_congr_iff b.equivFun.symm b.equivFun.symm).symm -- Lemmas transferring nondegeneracy between a matrix and its associated bilinear form theorem _root_.Matrix.Nondegenerate.toLinearMap₂' {M : Matrix ι ι R₁} (h : M.Nondegenerate) : (Matrix.toLinearMap₂' R₁ M).SeparatingLeft (R := R₁) := fun x hx => h.eq_zero_of_ortho fun y => by simpa only [toLinearMap₂'_apply'] using hx y @[simp] theorem _root_.Matrix.separatingLeft_toLinearMap₂'_iff {M : Matrix ι ι R₁} : (Matrix.toLinearMap₂' R₁ M).SeparatingLeft (R := R₁) ↔ M.Nondegenerate := by refine ⟨fun h ↦ Matrix.nondegenerate_def.mpr ?_, Matrix.Nondegenerate.toLinearMap₂'⟩ exact fun v hv => h v fun w => (M.toLinearMap₂'_apply' _ _).trans <| hv w theorem _root_.Matrix.Nondegenerate.toLinearMap₂ {M : Matrix ι ι R₁} (h : M.Nondegenerate) (b : Basis ι R₁ M₁) : (toLinearMap₂ b b M).SeparatingLeft := (Matrix.separatingLeft_toLinearMap₂'_iff_separatingLeft_toLinearMap₂ b).mp h.toLinearMap₂' @[simp] theorem _root_.Matrix.separatingLeft_toLinearMap₂_iff {M : Matrix ι ι R₁} (b : Basis ι R₁ M₁) : (toLinearMap₂ b b M).SeparatingLeft ↔ M.Nondegenerate := by rw [← Matrix.separatingLeft_toLinearMap₂'_iff_separatingLeft_toLinearMap₂, Matrix.separatingLeft_toLinearMap₂'_iff] -- Lemmas transferring nondegeneracy between a bilinear form and its associated matrix @[simp] theorem nondegenerate_toMatrix₂'_iff {B : (ι → R₁) →ₗ[R₁] (ι → R₁) →ₗ[R₁] R₁} : (LinearMap.toMatrix₂' R₁ B).Nondegenerate ↔ B.SeparatingLeft := Matrix.separatingLeft_toLinearMap₂'_iff.symm.trans <| (Matrix.toLinearMap₂'_toMatrix' (R := R₁) B).symm ▸ Iff.rfl theorem SeparatingLeft.toMatrix₂' {B : (ι → R₁) →ₗ[R₁] (ι → R₁) →ₗ[R₁] R₁} (h : B.SeparatingLeft) : (LinearMap.toMatrix₂' R₁ B).Nondegenerate := nondegenerate_toMatrix₂'_iff.mpr h @[simp] theorem nondegenerate_toMatrix_iff {B : M₁ →ₗ[R₁] M₁ →ₗ[R₁] R₁} (b : Basis ι R₁ M₁) : (toMatrix₂ b b B).Nondegenerate ↔ B.SeparatingLeft := (Matrix.separatingLeft_toLinearMap₂_iff b).symm.trans <| (Matrix.toLinearMap₂_toMatrix₂ b b B).symm ▸ Iff.rfl theorem SeparatingLeft.toMatrix₂ {B : M₁ →ₗ[R₁] M₁ →ₗ[R₁] R₁} (h : B.SeparatingLeft) (b : Basis ι R₁ M₁) : (toMatrix₂ b b B).Nondegenerate := (nondegenerate_toMatrix_iff b).mpr h -- Some shorthands for combining the above with `Matrix.nondegenerate_of_det_ne_zero` variable [IsDomain R₁] theorem separatingLeft_toLinearMap₂'_iff_det_ne_zero {M : Matrix ι ι R₁} : (Matrix.toLinearMap₂' R₁ M).SeparatingLeft (R := R₁) ↔ M.det ≠ 0 := by rw [Matrix.separatingLeft_toLinearMap₂'_iff, Matrix.nondegenerate_iff_det_ne_zero] theorem separatingLeft_toLinearMap₂'_of_det_ne_zero' (M : Matrix ι ι R₁) (h : M.det ≠ 0) : (Matrix.toLinearMap₂' R₁ M).SeparatingLeft (R := R₁) := separatingLeft_toLinearMap₂'_iff_det_ne_zero.mpr h theorem separatingLeft_iff_det_ne_zero {B : M₁ →ₗ[R₁] M₁ →ₗ[R₁] R₁} (b : Basis ι R₁ M₁) : B.SeparatingLeft ↔ (toMatrix₂ b b B).det ≠ 0 := by rw [← Matrix.nondegenerate_iff_det_ne_zero, nondegenerate_toMatrix_iff] theorem separatingLeft_of_det_ne_zero {B : M₁ →ₗ[R₁] M₁ →ₗ[R₁] R₁} (b : Basis ι R₁ M₁) (h : (toMatrix₂ b b B).det ≠ 0) : B.SeparatingLeft := (separatingLeft_iff_det_ne_zero b).mpr h end Det end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Rank.lean
import Mathlib.LinearAlgebra.Determinant import Mathlib.LinearAlgebra.Dual.Lemmas import Mathlib.LinearAlgebra.FiniteDimensional.Lemmas import Mathlib.LinearAlgebra.Matrix.Diagonal import Mathlib.LinearAlgebra.Matrix.DotProduct import Mathlib.LinearAlgebra.Matrix.Dual /-! # Rank of matrices The rank of a matrix `A` is defined to be the rank of range of the linear map corresponding to `A`. This definition does not depend on the choice of basis, see `Matrix.rank_eq_finrank_range_toLin`. ## Main declarations * `Matrix.rank`: the rank of a matrix * `Matrix.cRank`: the rank of a matrix as a cardinal * `Matrix.eRank`: the rank of a matrix as a term in `ℕ∞`. -/ open Matrix namespace Matrix open Module Cardinal Set Submodule universe ul um um₀ un un₀ uo uR variable {l : Type ul} {m : Type um} {m₀ : Type um₀} {n : Type un} {n₀ : Type un₀} {o : Type uo} variable {R : Type uR} section Infinite variable [Semiring R] /-- The rank of a matrix, defined as the dimension of its column space, as a cardinal. -/ noncomputable def cRank (A : Matrix m n R) : Cardinal := Module.rank R <| span R <| range Aᵀ @[simp] theorem cRank_subsingleton [Subsingleton R] (A : Matrix m n R) : A.cRank = 1 := rank_subsingleton _ _ lemma cRank_toNat_eq_finrank (A : Matrix m n R) : A.cRank.toNat = Module.finrank R (span R (range A.col)) := rfl lemma lift_cRank_submatrix_le (A : Matrix m n R) (r : m₀ → m) (c : n₀ → n) : lift.{um} (A.submatrix r c).cRank ≤ lift.{um₀} A.cRank := by have h : ((A.submatrix r id).submatrix id c).cRank ≤ (A.submatrix r id).cRank := Submodule.rank_mono <| span_mono <| by rintro _ ⟨x, rfl⟩; exact ⟨c x, rfl⟩ refine (Cardinal.lift_monotone h).trans ?_ let f : (m → R) →ₗ[R] (m₀ → R) := LinearMap.funLeft R R r have h_eq : Submodule.map f (span R (range Aᵀ)) = span R (range (A.submatrix r id)ᵀ) := by rw [LinearMap.map_span, ← image_univ, image_image, transpose_submatrix] aesop rw [cRank, ← h_eq] have hwin := lift_rank_map_le f (span R (range Aᵀ)) simp_rw [← lift_umax] at hwin ⊢ exact hwin /-- A special case of `lift_cRank_submatrix_le` for when `m₀` and `m` are in the same universe. -/ lemma cRank_submatrix_le {m m₀ : Type um} (A : Matrix m n R) (r : m₀ → m) (c : n₀ → n) : (A.submatrix r c).cRank ≤ A.cRank := by simpa using lift_cRank_submatrix_le A r c lemma cRank_le_card_height [StrongRankCondition R] [Fintype m] (A : Matrix m n R) : A.cRank ≤ Fintype.card m := (Submodule.rank_le (span R (range Aᵀ))).trans <| by rw [rank_fun'] lemma cRank_le_card_width [StrongRankCondition R] [Fintype n] (A : Matrix m n R) : A.cRank ≤ Fintype.card n := (rank_span_le ..).trans <| by simpa using Cardinal.mk_range_le_lift (f := Aᵀ) /-- The rank of a matrix, defined as the dimension of its column space, as a term in `ℕ∞`. -/ noncomputable def eRank (A : Matrix m n R) : ℕ∞ := A.cRank.toENat @[simp] theorem eRank_subsingleton [Subsingleton R] (A : Matrix m n R) : A.eRank = 1 := by simp [eRank] lemma eRank_toNat_eq_finrank (A : Matrix m n R) : A.eRank.toNat = Module.finrank R (span R (range A.col)) := toNat_toENat .. lemma eRank_submatrix_le (A : Matrix m n R) (r : m₀ → m) (c : n₀ → n) : (A.submatrix r c).eRank ≤ A.eRank := by simpa using OrderHom.mono (β := ℕ∞) Cardinal.toENat <| lift_cRank_submatrix_le A r c lemma eRank_le_card_width [StrongRankCondition R] (A : Matrix m n R) : A.eRank ≤ ENat.card n := by wlog hfin : Finite n · simp [ENat.card_eq_top.2 (by simpa using hfin)] have _ := Fintype.ofFinite n rw [ENat.card_eq_coe_fintype_card, eRank, toENat_le_nat] exact A.cRank_le_card_width lemma eRank_le_card_height [StrongRankCondition R] (A : Matrix m n R) : A.eRank ≤ ENat.card m := by classical wlog hfin : Finite m · simp [ENat.card_eq_top.2 (by simpa using hfin)] have _ := Fintype.ofFinite m rw [ENat.card_eq_coe_fintype_card, eRank, toENat_le_nat] exact A.cRank_le_card_height end Infinite variable [Fintype n] [Fintype o] section CommRing variable [CommRing R] /-- The rank of a matrix is the rank of its image. -/ noncomputable def rank (A : Matrix m n R) : ℕ := finrank R <| LinearMap.range A.mulVecLin @[simp] theorem rank_subsingleton [Subsingleton R] (A : Matrix m n R) : A.rank = 1 := finrank_subsingleton @[simp] theorem cRank_one [Nontrivial R] [DecidableEq m] : (cRank (1 : Matrix m m R)) = lift.{uR} #m := by have h : LinearIndependent R (1 : Matrix m m R)ᵀ := by convert Pi.linearIndependent_single_one m R simp [funext_iff, Matrix.one_eq_pi_single] rw [cRank, rank_span h, ← lift_umax, ← Cardinal.mk_range_eq_of_injective h.injective, lift_id'] @[simp] theorem eRank_one [Nontrivial R] [DecidableEq m] : (eRank (1 : Matrix m m R)) = ENat.card m := by rw [eRank, cRank_one, toENat_lift, ENat.card] @[simp] theorem rank_one [Nontrivial R] [DecidableEq n] : rank (1 : Matrix n n R) = Fintype.card n := by rw [rank, mulVecLin_one, LinearMap.range_id, finrank_top, finrank_pi] @[simp] theorem rank_zero [Nontrivial R] : rank (0 : Matrix m n R) = 0 := by rw [rank, mulVecLin_zero, LinearMap.range_zero, finrank_bot] @[simp] theorem cRank_zero {m n : Type*} [Nontrivial R] : cRank (0 : Matrix m n R) = 0 := by obtain hn | hn := isEmpty_or_nonempty n · rw [cRank, range_eq_empty, span_empty, rank_bot] rw [cRank, transpose_zero, range_zero, span_zero_singleton, rank_bot] @[simp] theorem eRank_zero {m n : Type*} [Nontrivial R] : eRank (0 : Matrix m n R) = 0 := by simp [eRank] theorem rank_le_card_width [Nontrivial R] (A : Matrix m n R) : A.rank ≤ Fintype.card n := by haveI : Module.Finite R (n → R) := Module.Finite.pi haveI : Module.Free R (n → R) := Module.Free.pi _ _ exact A.mulVecLin.finrank_range_le.trans_eq (finrank_pi _) theorem rank_le_width [Nontrivial R] {m n : ℕ} (A : Matrix (Fin m) (Fin n) R) : A.rank ≤ n := A.rank_le_card_width.trans <| (Fintype.card_fin n).le theorem rank_mul_le_left (A : Matrix m n R) (B : Matrix n o R) : (A * B).rank ≤ A.rank := by nontriviality R rw [rank, rank, mulVecLin_mul] exact Cardinal.toNat_le_toNat (LinearMap.rank_comp_le_left _ _) (rank_lt_aleph0 _ _) theorem rank_mul_le_right (A : Matrix m n R) (B : Matrix n o R) : (A * B).rank ≤ B.rank := by nontriviality R rw [rank, rank, mulVecLin_mul] exact finrank_le_finrank_of_rank_le_rank (LinearMap.lift_rank_comp_le_right _ _) (rank_lt_aleph0 _ _) theorem rank_mul_le (A : Matrix m n R) (B : Matrix n o R) : (A * B).rank ≤ min A.rank B.rank := le_min (rank_mul_le_left _ _) (rank_mul_le_right _ _) theorem rank_vecMulVec_le (w : m → R) (v : n → R) : (Matrix.vecMulVec w v).rank ≤ 1 := by rw [Matrix.vecMulVec_eq Unit] refine le_trans (rank_mul_le_left _ _) ?_ nontriviality R exact rank_le_card_width _ theorem rank_unit [Nontrivial R] [DecidableEq n] (A : (Matrix n n R)ˣ) : (A : Matrix n n R).rank = Fintype.card n := by apply le_antisymm (rank_le_card_width (A : Matrix n n R)) _ have := rank_mul_le_left (A : Matrix n n R) (↑A⁻¹ : Matrix n n R) rwa [← Units.val_mul, mul_inv_cancel, Units.val_one, rank_one] at this theorem rank_of_isUnit [Nontrivial R] [DecidableEq n] (A : Matrix n n R) (h : IsUnit A) : A.rank = Fintype.card n := by obtain ⟨A, rfl⟩ := h exact rank_unit A /-- Right multiplying by an invertible matrix does not change the rank -/ @[simp] lemma rank_mul_eq_left_of_isUnit_det [DecidableEq n] (A : Matrix n n R) (B : Matrix m n R) (hA : IsUnit A.det) : (B * A).rank = B.rank := by suffices Function.Surjective A.mulVecLin by rw [rank, mulVecLin_mul, LinearMap.range_comp_of_range_eq_top _ (LinearMap.range_eq_top.mpr this), ← rank] intro v exact ⟨(A⁻¹).mulVecLin v, by simp [mul_nonsing_inv _ hA]⟩ /-- Left multiplying by an invertible matrix does not change the rank -/ @[simp] lemma rank_mul_eq_right_of_isUnit_det [Fintype m] [DecidableEq m] (A : Matrix m m R) (B : Matrix m n R) (hA : IsUnit A.det) : (A * B).rank = B.rank := by let b : Basis m R (m → R) := Pi.basisFun R m replace hA : IsUnit (LinearMap.toMatrix b b A.mulVecLin).det := by convert hA; rw [← LinearEquiv.eq_symm_apply]; rfl have hAB : mulVecLin (A * B) = (LinearEquiv.ofIsUnitDet hA).comp (mulVecLin B) := by ext; simp rw [rank, rank, hAB, LinearMap.range_comp, LinearEquiv.finrank_map_eq] omit [Fintype n] in /-- Taking a subset of the rows and permuting the columns reduces the rank. -/ theorem rank_submatrix_le [Nontrivial R] [Fintype m] [Fintype m₀] (f : n₀ → n) (e : m₀ ≃ m) (A : Matrix n m R) : rank (A.submatrix f e) ≤ rank A := by rw [rank, rank, mulVecLin_submatrix, LinearMap.range_comp, LinearMap.range_comp, show LinearMap.funLeft R R e.symm = LinearEquiv.funCongrLeft R R e.symm from rfl, LinearEquiv.range, Submodule.map_top] exact Submodule.finrank_map_le _ _ theorem rank_reindex [Fintype n₀] (em : m ≃ m₀) (en : n ≃ n₀) (A : Matrix m n R) : rank (A.reindex em en) = rank A := by rw [rank, rank, mulVecLin_reindex, LinearMap.range_comp, LinearMap.range_comp, LinearEquiv.range, Submodule.map_top, LinearEquiv.finrank_map_eq] @[simp] theorem rank_submatrix [Fintype n₀] (A : Matrix m n R) (em : m₀ ≃ m) (en : n₀ ≃ n) : rank (A.submatrix em en) = rank A := by simpa only [reindex_apply] using rank_reindex em.symm en.symm A @[simp] theorem lift_cRank_submatrix {n : Type un} (A : Matrix m n R) (em : m₀ ≃ m) (en : n₀ ≃ n) : lift.{um} (cRank (A.submatrix em en)) = lift.{um₀} (cRank A) := (A.lift_cRank_submatrix_le em en).antisymm <| by simpa using ((A.reindex em.symm en.symm).lift_cRank_submatrix_le em.symm en.symm) /-- A special case of `lift_cRank_submatrix` for when the row types are in the same universe. -/ @[simp] theorem cRank_submatrix {m₀ : Type um} {n : Type un} (A : Matrix m n R) (em : m₀ ≃ m) (en : n₀ ≃ n) : cRank (A.submatrix em en) = cRank A := by simpa [-lift_cRank_submatrix] using A.lift_cRank_submatrix em en theorem lift_cRank_reindex {n : Type un} (A : Matrix m n R) (em : m ≃ m₀) (en : n ≃ n₀) : lift.{um} (cRank (A.reindex em en)) = lift.{um₀} (cRank A) := lift_cRank_submatrix .. /-- A special case of `lift_cRank_reindex` for when the row types are in the same universe. -/ theorem cRank_reindex {m₀ : Type um} {n : Type un} (A : Matrix m n R) (em : m ≃ m₀) (en : n ≃ n₀) : cRank (A.reindex em en) = cRank A := cRank_submatrix .. @[simp] theorem eRank_submatrix {n : Type un} (A : Matrix m n R) (em : m₀ ≃ m) (en : n₀ ≃ n) : eRank (A.submatrix em en) = eRank A := by simpa [-lift_cRank_submatrix] using congr_arg Cardinal.toENat <| A.lift_cRank_submatrix em en theorem eRank_reindex {m₀ : Type um} {n : Type un} (A : Matrix m n R) (em : m ≃ m₀) (en : n ≃ n₀) : eRank (A.reindex em en) = eRank A := eRank_submatrix .. theorem rank_eq_finrank_range_toLin [Finite m] [DecidableEq n] {M₁ M₂ : Type*} [AddCommGroup M₁] [AddCommGroup M₂] [Module R M₁] [Module R M₂] (A : Matrix m n R) (v₁ : Basis m R M₁) (v₂ : Basis n R M₂) : A.rank = finrank R (LinearMap.range (toLin v₂ v₁ A)) := by cases nonempty_fintype m let e₁ := (Pi.basisFun R m).equiv v₁ (Equiv.refl _) let e₂ := (Pi.basisFun R n).equiv v₂ (Equiv.refl _) have range_e₂ : LinearMap.range e₂ = ⊤ := by rw [LinearMap.range_eq_top] exact e₂.surjective refine LinearEquiv.finrank_eq (e₁.ofSubmodules _ _ ?_) rw [← LinearMap.range_comp, ← LinearMap.range_comp_of_range_eq_top (toLin v₂ v₁ A) range_e₂] congr 1 apply LinearMap.pi_ext' rintro i apply LinearMap.ext_ring have aux₁ := toLin_self (Pi.basisFun R n) (Pi.basisFun R m) A i have aux₂ := Basis.equiv_apply (Pi.basisFun R n) i v₂ rw [toLin_eq_toLin', toLin'_apply'] at aux₁ rw [Pi.basisFun_apply] at aux₁ aux₂ simp only [e₁, e₂, LinearMap.comp_apply, LinearEquiv.coe_coe, Equiv.refl_apply, aux₁, aux₂, LinearMap.coe_single, toLin_self, map_sum, LinearEquiv.map_smul, Basis.equiv_apply] theorem rank_le_card_height [Fintype m] [Nontrivial R] (A : Matrix m n R) : A.rank ≤ Fintype.card m := by haveI : Module.Finite R (m → R) := Module.Finite.pi haveI : Module.Free R (m → R) := Module.Free.pi _ _ exact (Submodule.finrank_le _).trans (finrank_pi R).le theorem rank_le_height [Nontrivial R] {m n : ℕ} (A : Matrix (Fin m) (Fin n) R) : A.rank ≤ m := A.rank_le_card_height.trans <| (Fintype.card_fin m).le /-- The rank of a matrix is the rank of the space spanned by its columns. -/ theorem rank_eq_finrank_span_cols (A : Matrix m n R) : A.rank = finrank R (Submodule.span R (Set.range A.col)) := by rw [rank, Matrix.range_mulVecLin] @[simp] theorem cRank_toNat_eq_rank (A : Matrix m n R) : A.cRank.toNat = A.rank := by rw [cRank_toNat_eq_finrank, ← rank_eq_finrank_span_cols] @[simp] theorem eRank_toNat_eq_rank (A : Matrix m n R) : A.eRank.toNat = A.rank := by rw [eRank_toNat_eq_finrank, ← rank_eq_finrank_span_cols] end CommRing section Field variable [Field R] /-- The rank of a diagonal matrix is the count of non-zero elements on its main diagonal -/ theorem rank_diagonal [Fintype m] [DecidableEq m] [DecidableEq R] (w : m → R) : (diagonal w).rank = Fintype.card {i // (w i) ≠ 0} := by rw [Matrix.rank, ← Matrix.toLin'_apply', Module.finrank, ← LinearMap.rank, LinearMap.rank_diagonal, Cardinal.toNat_natCast] theorem cRank_diagonal [DecidableEq m] (w : m → R) : (diagonal w).cRank = lift.{uR} #{i // (w i) ≠ 0} := by classical set w' : {i // (w i) ≠ 0} → _ := fun i ↦ (diagonal w) i have h : LinearIndependent R w' := by have hli' := Pi.linearIndependent_single_of_ne_zero (R := R) (v := fun i : m ↦ if w i = 0 then (1 : R) else w i) (by simp [ite_eq_iff']) convert hli'.comp Subtype.val Subtype.val_injective ext ⟨j, hj⟩ k simp [w', diagonal, hj, Pi.single_apply, eq_comm] have hrw : insert 0 (range (diagonal w)ᵀ) = insert 0 (range w') := by suffices ∀ a, diagonal w a = 0 ∨ ∃ b, w b ≠ 0 ∧ diagonal w b = diagonal w a by simpa [subset_antisymm_iff, subset_def, w'] simp_rw [or_iff_not_imp_right, not_exists, not_and, not_imp_not] simp +contextual [funext_iff, diagonal] rw [cRank, ← span_insert_zero, hrw, span_insert_zero, rank_span h, ← lift_umax, ← Cardinal.mk_range_eq_of_injective h.injective, lift_id'] theorem eRank_diagonal [DecidableEq m] (w : m → R) : (diagonal w).eRank = {i | (w i) ≠ 0}.encard := by simp [eRank, cRank_diagonal, toENat_cardinalMk_subtype] end Field /-! ### Lemmas about transpose and conjugate transpose This section contains lemmas about the rank of `Matrix.transpose` and `Matrix.conjTranspose`. Unfortunately the proofs are essentially duplicated between the two; `ℚ` is a linearly-ordered ring but can't be a star-ordered ring, while `ℂ` is star-ordered (with `open ComplexOrder`) but not linearly ordered. For now we don't prove the transpose case for `ℂ`. TODO: the lemmas `Matrix.rank_transpose` and `Matrix.rank_conjTranspose` current follow a short proof that is a simple consequence of `Matrix.rank_transpose_mul_self` and `Matrix.rank_conjTranspose_mul_self`. This proof pulls in unnecessary assumptions on `R`, and should be replaced with a proof that uses Gaussian reduction or argues via linear combinations. -/ section StarOrderedField variable [Fintype m] [Field R] [PartialOrder R] [StarRing R] [StarOrderedRing R] theorem ker_mulVecLin_conjTranspose_mul_self (A : Matrix m n R) : LinearMap.ker (Aᴴ * A).mulVecLin = LinearMap.ker (mulVecLin A) := by ext x simp only [LinearMap.mem_ker, mulVecLin_apply, conjTranspose_mul_self_mulVec_eq_zero] theorem rank_conjTranspose_mul_self (A : Matrix m n R) : (Aᴴ * A).rank = A.rank := by dsimp only [rank] refine add_left_injective (finrank R (LinearMap.ker (mulVecLin A))) ?_ dsimp only trans finrank R { x // x ∈ LinearMap.range (mulVecLin (Aᴴ * A)) } + finrank R { x // x ∈ LinearMap.ker (mulVecLin (Aᴴ * A)) } · rw [ker_mulVecLin_conjTranspose_mul_self] · simp only [LinearMap.finrank_range_add_finrank_ker] -- this follows the proof here https://math.stackexchange.com/a/81903/1896 /-- TODO: prove this in greater generality. -/ @[simp] theorem rank_conjTranspose (A : Matrix m n R) : Aᴴ.rank = A.rank := le_antisymm (((rank_conjTranspose_mul_self _).symm.trans_le <| rank_mul_le_left _ _).trans_eq <| congr_arg _ <| conjTranspose_conjTranspose _) ((rank_conjTranspose_mul_self _).symm.trans_le <| rank_mul_le_left _ _) @[simp] theorem rank_self_mul_conjTranspose (A : Matrix m n R) : (A * Aᴴ).rank = A.rank := by simpa only [rank_conjTranspose, conjTranspose_conjTranspose] using rank_conjTranspose_mul_self Aᴴ end StarOrderedField section LinearOrderedField variable [Fintype m] [Field R] [LinearOrder R] [IsStrictOrderedRing R] theorem ker_mulVecLin_transpose_mul_self (A : Matrix m n R) : LinearMap.ker (Aᵀ * A).mulVecLin = LinearMap.ker (mulVecLin A) := by ext x simp only [LinearMap.mem_ker, mulVecLin_apply, ← mulVec_mulVec] constructor · intro h replace h := congr_arg (dotProduct x) h rwa [dotProduct_mulVec, dotProduct_zero, vecMul_transpose, dotProduct_self_eq_zero] at h · intro h rw [h, mulVec_zero] theorem rank_transpose_mul_self (A : Matrix m n R) : (Aᵀ * A).rank = A.rank := by dsimp only [rank] refine add_left_injective (finrank R <| LinearMap.ker A.mulVecLin) ?_ dsimp only trans finrank R { x // x ∈ LinearMap.range (mulVecLin (Aᵀ * A)) } + finrank R { x // x ∈ LinearMap.ker (mulVecLin (Aᵀ * A)) } · rw [ker_mulVecLin_transpose_mul_self] · simp only [LinearMap.finrank_range_add_finrank_ker] end LinearOrderedField @[simp] theorem rank_transpose [Field R] [Fintype m] (A : Matrix m n R) : Aᵀ.rank = A.rank := by classical rw [Aᵀ.rank_eq_finrank_range_toLin (Pi.basisFun R n).dualBasis (Pi.basisFun R m).dualBasis, toLin_transpose, ← LinearMap.dualMap_def, LinearMap.finrank_range_dualMap_eq_finrank_range, toLin_eq_toLin', toLin'_apply', rank] @[simp] theorem rank_self_mul_transpose [Field R] [LinearOrder R] [IsStrictOrderedRing R] [Fintype m] (A : Matrix m n R) : (A * Aᵀ).rank = A.rank := by simpa only [rank_transpose, transpose_transpose] using rank_transpose_mul_self Aᵀ /-- The rank of a matrix is the rank of the space spanned by its rows. -/ theorem rank_eq_finrank_span_row [Field R] [Finite m] (A : Matrix m n R) : A.rank = finrank R (Submodule.span R (Set.range A.row)) := by cases nonempty_fintype m rw [← rank_transpose, rank_eq_finrank_span_cols, col_transpose] theorem _root_.LinearIndependent.rank_matrix [Field R] [Fintype m] {M : Matrix m n R} (h : LinearIndependent R M.row) : M.rank = Fintype.card m := by rw [M.rank_eq_finrank_span_row, linearIndependent_iff_card_eq_finrank_span.mp h, Set.finrank] lemma rank_add_rank_le_card_of_mul_eq_zero [Field R] [Finite l] [Fintype m] {A : Matrix l m R} {B : Matrix m n R} (hAB : A * B = 0) : A.rank + B.rank ≤ Fintype.card m := by classical let el : Basis l R (l → R) := Pi.basisFun R l let em : Basis m R (m → R) := Pi.basisFun R m let en : Basis n R (n → R) := Pi.basisFun R n rw [Matrix.rank_eq_finrank_range_toLin A el em, Matrix.rank_eq_finrank_range_toLin B em en, ← Module.finrank_fintype_fun_eq_card R, ← LinearMap.finrank_range_add_finrank_ker (Matrix.toLin em el A), add_le_add_iff_left] apply Submodule.finrank_mono rw [LinearMap.range_le_ker_iff, ← Matrix.toLin_mul, hAB, map_zero] end Matrix -- TODO: generalize to `cRank` then deprecate theorem Matrix.rank_vecMulVec.{u} {K m n : Type u} [CommRing K] [Fintype n] [DecidableEq n] (w : m → K) (v : n → K) : (Matrix.vecMulVec w v).toLin'.rank ≤ 1 := by nontriviality K rw [Matrix.vecMulVec_eq (Fin 1), Matrix.toLin'_mul] refine le_trans (LinearMap.rank_comp_le_left _ _) ?_ refine (LinearMap.rank_le_domain _).trans_eq ?_ rw [rank_fun', Fintype.card_ofSubsingleton, Nat.cast_one]
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Permutation.lean
import Mathlib.Analysis.CStarAlgebra.Matrix import Mathlib.Data.Matrix.PEquiv import Mathlib.Data.Set.Card import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.LinearAlgebra.Matrix.Trace /-! # Permutation matrices This file defines the matrix associated with a permutation ## Main definitions - `Equiv.Perm.permMatrix`: the permutation matrix associated with an `Equiv.Perm` ## Main results - `Matrix.det_permutation`: the determinant is the sign of the permutation - `Matrix.trace_permutation`: the trace is the number of fixed points of the permutation -/ open Equiv variable {n R : Type*} [DecidableEq n] (σ : Perm n) variable (R) in /-- the permutation matrix associated with an `Equiv.Perm` -/ abbrev Equiv.Perm.permMatrix [Zero R] [One R] : Matrix n n R := σ.toPEquiv.toMatrix namespace Matrix @[simp] lemma transpose_permMatrix [Zero R] [One R] : (σ.permMatrix R).transpose = (σ⁻¹).permMatrix R := by rw [← PEquiv.toMatrix_symm, ← Equiv.toPEquiv_symm, ← Equiv.Perm.inv_def] @[simp] lemma conjTranspose_permMatrix [NonAssocSemiring R] [StarRing R] : (σ.permMatrix R).conjTranspose = (σ⁻¹).permMatrix R := by simp only [conjTranspose, transpose_permMatrix, map] aesop variable [Fintype n] /-- The determinant of a permutation matrix equals its sign. -/ @[simp] theorem det_permutation [CommRing R] : det (σ.permMatrix R) = Perm.sign σ := by rw [← Matrix.mul_one (σ.permMatrix R), PEquiv.toMatrix_toPEquiv_mul, det_permute, det_one, mul_one] /-- The trace of a permutation matrix equals the number of fixed points. -/ theorem trace_permutation [AddCommMonoidWithOne R] : trace (σ.permMatrix R) = (Function.fixedPoints σ).ncard := by delta trace simp [toPEquiv_apply, ← Set.ncard_coe_finset, Function.fixedPoints, Function.IsFixedPt] lemma permMatrix_mulVec {v : n → R} [CommRing R] : σ.permMatrix R *ᵥ v = v ∘ σ := by ext j simp [mulVec_eq_sum, Pi.single, Function.update, Equiv.eq_symm_apply] lemma vecMul_permMatrix {v : n → R} [CommRing R] : v ᵥ* σ.permMatrix R = v ∘ σ.symm := by ext j simp [vecMul_eq_sum, Pi.single, Function.update, ← Equiv.symm_apply_eq] open scoped Matrix.Norms.L2Operator variable {𝕜 : Type*} [RCLike 𝕜] /-- The l2-operator norm of a permutation matrix is bounded above by 1. See `Matrix.permMatrix_l2_opNorm_eq` for the equality statement assuming the matrix is nonempty. -/ theorem permMatrix_l2_opNorm_le : ‖σ.permMatrix 𝕜‖ ≤ 1 := ContinuousLinearMap.opNorm_le_bound _ (by simp) <| by simp [EuclideanSpace.norm_eq, toEuclideanLin_apply, permMatrix_mulVec, σ.sum_comp _ (fun i ↦ ‖_‖ ^ 2)] /-- The l2-operator norm of a nonempty permutation matrix is equal to 1. Note that this is not true for the empty case, since the empty matrix has l2-operator norm 0. See `Matrix.permMatrix_l2_opNorm_le` for the inequality version of the empty case. -/ theorem permMatrix_l2_opNorm_eq [Nonempty n] : ‖σ.permMatrix 𝕜‖ = 1 := le_antisymm (permMatrix_l2_opNorm_le σ) <| by inhabit n simpa [EuclideanSpace.norm_eq, permMatrix_mulVec, ← Equiv.eq_symm_apply, apply_ite] using (σ.permMatrix 𝕜).l2_opNorm_mulVec (WithLp.toLp _ (Pi.single default 1)) end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Integer.lean
import Mathlib.Algebra.Algebra.Defs import Mathlib.Algebra.GCDMonoid.Finset import Mathlib.Algebra.GCDMonoid.Nat import Mathlib.Data.Matrix.Mul import Mathlib.Data.Rat.Cast.CharZero /-! # Lemmas on integer matrices Here we collect some results about matrices over `ℚ` and `ℤ`. ## Main definitions and results * `Matrix.num`, `Matrix.den`: express a rational matrix `A` as the quotient of an integer matrix by a (non-zero) natural. ## TODO Consider generalizing these constructions to matrices over localizations of rings (or semirings). -/ namespace Matrix variable {m n : Type*} [Fintype m] [Fintype n] /-! ## Casts These results are useful shortcuts because the canonical casting maps out of `ℕ`, `ℤ`, and `ℚ` to suitable types are bare functions, not ring homs, so we cannot apply `Matrix.map_mul` directly to them. -/ lemma map_mul_natCast {α : Type*} [NonAssocSemiring α] (A B : Matrix n n ℕ) : map (A * B) ((↑) : ℕ → α) = map A (↑) * map B (↑) := Matrix.map_mul (f := Nat.castRingHom α) lemma map_mul_intCast {α : Type*} [NonAssocRing α] (A B : Matrix n n ℤ) : map (A * B) ((↑) : ℤ → α) = map A (↑) * map B (↑) := Matrix.map_mul (f := Int.castRingHom α) lemma map_mul_ratCast {α : Type*} [DivisionRing α] [CharZero α] (A B : Matrix n n ℚ) : map (A * B) ((↑) : ℚ → α) = map A (↑) * map B (↑) := Matrix.map_mul (f := Rat.castHom α) /-! ## Denominator of a rational matrix -/ /-- The denominator of a matrix of rationals (as a `Nat`, defined as the LCM of the denominators of the entries). -/ protected def den (A : Matrix m n ℚ) : ℕ := Finset.univ.lcm (fun P : m × n ↦ (A P.1 P.2).den) /-- The numerator of a matrix of rationals (a matrix of integers, defined so that `A.num / A.den = A`). -/ protected def num (A : Matrix m n ℚ) : Matrix m n ℤ := ((A.den : ℚ) • A).map Rat.num lemma den_ne_zero (A : Matrix m n ℚ) : A.den ≠ 0 := by simp [Matrix.den, Finset.lcm_eq_zero_iff] lemma num_eq_zero_iff (A : Matrix m n ℚ) : A.num = 0 ↔ A = 0 := by simp [Matrix.num, ← ext_iff, A.den_ne_zero] lemma den_dvd_iff {A : Matrix m n ℚ} {r : ℕ} : A.den ∣ r ↔ ∀ i j, (A i j).den ∣ r := by simp [Matrix.den] lemma num_div_den (A : Matrix m n ℚ) (i : m) (j : n) : A.num i j / A.den = A i j := by obtain ⟨k, hk⟩ := den_dvd_iff.mp (dvd_refl A.den) i j rw [Matrix.num, map_apply, smul_apply, smul_eq_mul, mul_comm, div_eq_iff <| Nat.cast_ne_zero.mpr A.den_ne_zero, hk, Nat.cast_mul, ← mul_assoc, Rat.mul_den_eq_num, ← Int.cast_natCast k, ← Int.cast_mul, Rat.num_intCast] lemma inv_denom_smul_num (A : Matrix m n ℚ) : (A.den⁻¹ : ℚ) • A.num.map (↑) = A := by ext simp [← Matrix.num_div_den A, div_eq_inv_mul] @[simp] lemma den_neg (A : Matrix m n ℚ) : (-A).den = A.den := eq_of_forall_dvd <| by simp [den_dvd_iff] @[simp] lemma num_neg (A : Matrix m n ℚ) : (-A).num = -A.num := by ext simp [Matrix.num] @[simp] lemma den_transpose (A : Matrix m n ℚ) : (Aᵀ).den = A.den := eq_of_forall_dvd fun _ ↦ by simpa [den_dvd_iff] using forall_comm @[simp] lemma num_transpose (A : Matrix m n ℚ) : (Aᵀ).num = (A.num)ᵀ := by ext; simp [Matrix.num] /-! ### Compatibility with `map` -/ @[simp] lemma den_map_intCast (A : Matrix m n ℤ) : (A.map (↑)).den = 1 := by simp [← Nat.dvd_one, Matrix.den_dvd_iff] @[simp] lemma num_map_intCast (A : Matrix m n ℤ) : (A.map (↑)).num = A := by simp [Matrix.num, Function.comp_def] @[simp] lemma den_map_natCast (A : Matrix m n ℕ) : (A.map (↑)).den = 1 := by simp [← Nat.dvd_one, Matrix.den_dvd_iff] @[simp] lemma num_map_natCast (A : Matrix m n ℕ) : (A.map (↑)).num = A.map (↑) := by simp [Matrix.num, Function.comp_def] /-! ### Casts from scalar types -/ @[simp] lemma den_natCast [DecidableEq m] (a : ℕ) : (a : Matrix m m ℚ).den = 1 := by simpa [← diagonal_natCast] using den_map_natCast (a : Matrix m m ℕ) @[simp] lemma num_natCast [DecidableEq m] (a : ℕ) : (a : Matrix m m ℚ).num = a := by simpa [← diagonal_natCast] using num_map_natCast (a : Matrix m m ℕ) @[simp] lemma den_ofNat [DecidableEq m] (a : ℕ) [a.AtLeastTwo] : (ofNat(a) : Matrix m m ℚ).den = 1 := den_natCast a @[simp] lemma num_ofNat [DecidableEq m] (a : ℕ) [a.AtLeastTwo] : (ofNat(a) : Matrix m m ℚ).num = a := num_natCast a @[simp] lemma den_intCast [DecidableEq m] (a : ℤ) : (a : Matrix m m ℚ).den = 1 := by simpa [← diagonal_intCast] using den_map_intCast (a : Matrix m m ℤ) @[simp] lemma num_intCast [DecidableEq m] (a : ℤ) : (a : Matrix m m ℚ).num = a := by simpa [← diagonal_intCast] using num_map_intCast (a : Matrix m m ℤ) @[simp] lemma den_zero : (0 : Matrix m n ℚ).den = 1 := den_map_natCast 0 @[simp] lemma num_zero : (0 : Matrix m n ℚ).num = 0 := num_map_natCast 0 @[simp] lemma den_one [DecidableEq m] : (1 : Matrix m m ℚ).den = 1 := den_natCast 1 @[simp] lemma num_one [DecidableEq m] : (1 : Matrix m m ℚ).num = 1 := num_natCast 1 end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/CharP.lean
import Mathlib.Algebra.CharP.Defs import Mathlib.Data.Matrix.Diagonal /-! # Matrices in prime characteristic In this file we prove that matrices over a ring of characteristic `p` with nonempty index type have the same characteristic. -/ open Matrix variable {n : Type*} {R : Type*} [AddMonoidWithOne R] instance Matrix.charP [DecidableEq n] [Nonempty n] (p : ℕ) [CharP R p] : CharP (Matrix n n R) p where cast_eq_zero_iff k := by simp_rw [← diagonal_natCast, ← diagonal_zero, diagonal_eq_diagonal_iff, CharP.cast_eq_zero_iff R p k, forall_const]
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/ZPow.lean
import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.LinearAlgebra.Matrix.Symmetric /-! # Integer powers of square matrices In this file, we define integer power of matrices, relying on the nonsingular inverse definition for negative powers. ## Implementation details The main definition is a direct recursive call on the integer inductive type, as provided by the `DivInvMonoid.Pow` default implementation. The lemma names are taken from `Algebra.GroupWithZero.Power`. ## Tags matrix inverse, matrix powers -/ open Matrix namespace Matrix variable {n' : Type*} [DecidableEq n'] [Fintype n'] {R : Type*} [CommRing R] local notation "M" => Matrix n' n' R noncomputable instance : DivInvMonoid M := { show Monoid M by infer_instance, show Inv M by infer_instance with } section NatPow @[simp] theorem inv_pow' (A : M) (n : ℕ) : A⁻¹ ^ n = (A ^ n)⁻¹ := by induction n with | zero => simp | succ n ih => rw [pow_succ A, mul_inv_rev, ← ih, ← pow_succ'] theorem pow_sub' (A : M) {m n : ℕ} (ha : IsUnit A.det) (h : n ≤ m) : A ^ (m - n) = A ^ m * (A ^ n)⁻¹ := by rw [← tsub_add_cancel_of_le h, pow_add, Matrix.mul_assoc, mul_nonsing_inv, tsub_add_cancel_of_le h, Matrix.mul_one] simpa using ha.pow n theorem pow_inv_comm' (A : M) (m n : ℕ) : A⁻¹ ^ m * A ^ n = A ^ n * A⁻¹ ^ m := by induction n generalizing m with | zero => simp | succ n IH => rcases m with m | m · simp rcases nonsing_inv_cancel_or_zero A with ⟨h, h'⟩ | h · calc A⁻¹ ^ (m + 1) * A ^ (n + 1) = A⁻¹ ^ m * (A⁻¹ * A) * A ^ n := by simp only [pow_succ A⁻¹, pow_succ' A, Matrix.mul_assoc] _ = A ^ n * A⁻¹ ^ m := by simp only [h, Matrix.mul_one, IH m] _ = A ^ n * (A * A⁻¹) * A⁻¹ ^ m := by simp only [h', Matrix.mul_one] _ = A ^ (n + 1) * A⁻¹ ^ (m + 1) := by simp only [pow_succ A, pow_succ' A⁻¹, Matrix.mul_assoc] · simp [h] end NatPow section ZPow open Int @[simp] theorem one_zpow : ∀ n : ℤ, (1 : M) ^ n = 1 | (n : ℕ) => by rw [zpow_natCast, one_pow] | -[n+1] => by rw [zpow_negSucc, one_pow, inv_one] theorem zero_zpow : ∀ z : ℤ, z ≠ 0 → (0 : M) ^ z = 0 | (n : ℕ), h => by rw [zpow_natCast, zero_pow] exact mod_cast h | -[n+1], _ => by simp [zero_pow n.succ_ne_zero] theorem zero_zpow_eq (n : ℤ) : (0 : M) ^ n = if n = 0 then 1 else 0 := by split_ifs with h · rw [h, zpow_zero] · rw [zero_zpow _ h] theorem inv_zpow (A : M) : ∀ n : ℤ, A⁻¹ ^ n = (A ^ n)⁻¹ | (n : ℕ) => by rw [zpow_natCast, zpow_natCast, inv_pow'] | -[n+1] => by rw [zpow_negSucc, zpow_negSucc, inv_pow'] @[simp] theorem zpow_neg_one (A : M) : A ^ (-1 : ℤ) = A⁻¹ := by convert DivInvMonoid.zpow_neg' 0 A simp only [zpow_one, Int.ofNat_zero, Int.natCast_succ, zpow_eq_pow, zero_add] @[simp] theorem zpow_neg_natCast (A : M) (n : ℕ) : A ^ (-n : ℤ) = (A ^ n)⁻¹ := by cases n · simp · exact DivInvMonoid.zpow_neg' _ _ theorem _root_.IsUnit.det_zpow {A : M} (h : IsUnit A.det) (n : ℤ) : IsUnit (A ^ n).det := by rcases n with n | n · simpa using h.pow n · simpa using h.pow n.succ theorem isUnit_det_zpow_iff {A : M} {z : ℤ} : IsUnit (A ^ z).det ↔ IsUnit A.det ∨ z = 0 := by induction z with | zero => simp | succ z => rw [← Int.natCast_succ, zpow_natCast, det_pow, isUnit_pow_succ_iff, ← Int.ofNat_zero, Int.ofNat_inj] simp | pred z => rw [← neg_add', ← Int.natCast_succ, zpow_neg_natCast, isUnit_nonsing_inv_det_iff, det_pow, isUnit_pow_succ_iff, neg_eq_zero, ← Int.ofNat_zero, Int.ofNat_inj] simp theorem zpow_neg {A : M} (h : IsUnit A.det) : ∀ n : ℤ, A ^ (-n) = (A ^ n)⁻¹ | (n : ℕ) => zpow_neg_natCast _ _ | -[n+1] => by rw [zpow_negSucc, neg_negSucc, zpow_natCast, nonsing_inv_nonsing_inv] rw [det_pow] exact h.pow _ theorem inv_zpow' {A : M} (h : IsUnit A.det) (n : ℤ) : A⁻¹ ^ n = A ^ (-n) := by rw [zpow_neg h, inv_zpow] theorem zpow_add_one {A : M} (h : IsUnit A.det) : ∀ n : ℤ, A ^ (n + 1) = A ^ n * A | (n : ℕ) => by simp only [← Nat.cast_succ, pow_succ, zpow_natCast] | -[n+1] => calc A ^ (-(n + 1) + 1 : ℤ) = (A ^ n)⁻¹ := by rw [neg_add, neg_add_cancel_right, zpow_neg h, zpow_natCast] _ = (A * A ^ n)⁻¹ * A := by rw [mul_inv_rev, Matrix.mul_assoc, nonsing_inv_mul _ h, Matrix.mul_one] _ = A ^ (-(n + 1 : ℤ)) * A := by rw [zpow_neg h, ← Int.natCast_succ, zpow_natCast, pow_succ'] theorem zpow_sub_one {A : M} (h : IsUnit A.det) (n : ℤ) : A ^ (n - 1) = A ^ n * A⁻¹ := calc A ^ (n - 1) = A ^ (n - 1) * A * A⁻¹ := by rw [mul_assoc, mul_nonsing_inv _ h, mul_one] _ = A ^ n * A⁻¹ := by rw [← zpow_add_one h, sub_add_cancel] theorem zpow_add {A : M} (ha : IsUnit A.det) (m n : ℤ) : A ^ (m + n) = A ^ m * A ^ n := by induction n with | zero => simp | succ n ihn => simp only [← add_assoc, zpow_add_one ha, ihn, mul_assoc] | pred n ihn => rw [zpow_sub_one ha, ← mul_assoc, ← ihn, ← zpow_sub_one ha, add_sub_assoc] theorem zpow_add_of_nonpos {A : M} {m n : ℤ} (hm : m ≤ 0) (hn : n ≤ 0) : A ^ (m + n) = A ^ m * A ^ n := by rcases nonsing_inv_cancel_or_zero A with (⟨h, _⟩ | h) · exact zpow_add (isUnit_det_of_left_inverse h) m n · obtain ⟨k, rfl⟩ := exists_eq_neg_ofNat hm obtain ⟨l, rfl⟩ := exists_eq_neg_ofNat hn simp_rw [← neg_add, ← Int.natCast_add, zpow_neg_natCast, ← inv_pow', h, pow_add] theorem zpow_add_of_nonneg {A : M} {m n : ℤ} (hm : 0 ≤ m) (hn : 0 ≤ n) : A ^ (m + n) = A ^ m * A ^ n := by obtain ⟨k, rfl⟩ := eq_ofNat_of_zero_le hm obtain ⟨l, rfl⟩ := eq_ofNat_of_zero_le hn rw [← Int.natCast_add, zpow_natCast, zpow_natCast, zpow_natCast, pow_add] theorem zpow_one_add {A : M} (h : IsUnit A.det) (i : ℤ) : A ^ (1 + i) = A * A ^ i := by rw [zpow_add h, zpow_one] theorem SemiconjBy.zpow_right {A X Y : M} (hx : IsUnit X.det) (hy : IsUnit Y.det) (h : SemiconjBy A X Y) : ∀ m : ℤ, SemiconjBy A (X ^ m) (Y ^ m) | (n : ℕ) => by simp [h.pow_right n] | -[n+1] => by have hx' : IsUnit (X ^ n.succ).det := by rw [det_pow] exact hx.pow n.succ have hy' : IsUnit (Y ^ n.succ).det := by rw [det_pow] exact hy.pow n.succ rw [zpow_negSucc, zpow_negSucc, nonsing_inv_apply _ hx', nonsing_inv_apply _ hy', SemiconjBy] refine (isRegular_of_isLeftRegular_det hy'.isRegular.left).left ?_ dsimp only rw [← mul_assoc, ← (h.pow_right n.succ).eq, mul_assoc, mul_smul, mul_adjugate, ← Matrix.mul_assoc, mul_smul (Y ^ _) (↑hy'.unit⁻¹ : R), mul_adjugate, smul_smul, smul_smul, hx'.val_inv_mul, hy'.val_inv_mul, one_smul, Matrix.mul_one, Matrix.one_mul] theorem Commute.zpow_right {A B : M} (h : Commute A B) (m : ℤ) : Commute A (B ^ m) := by rcases nonsing_inv_cancel_or_zero B with (⟨hB, _⟩ | hB) · refine SemiconjBy.zpow_right ?_ ?_ h _ <;> exact isUnit_det_of_left_inverse hB · cases m · simpa using h.pow_right _ · simp [← inv_pow', hB] theorem Commute.zpow_left {A B : M} (h : Commute A B) (m : ℤ) : Commute (A ^ m) B := (Commute.zpow_right h.symm m).symm theorem Commute.zpow_zpow {A B : M} (h : Commute A B) (m n : ℤ) : Commute (A ^ m) (B ^ n) := Commute.zpow_right (Commute.zpow_left h _) _ theorem Commute.zpow_self (A : M) (n : ℤ) : Commute (A ^ n) A := Commute.zpow_left (Commute.refl A) _ theorem Commute.self_zpow (A : M) (n : ℤ) : Commute A (A ^ n) := Commute.zpow_right (Commute.refl A) _ theorem Commute.zpow_zpow_self (A : M) (m n : ℤ) : Commute (A ^ m) (A ^ n) := Commute.zpow_zpow (Commute.refl A) _ _ theorem zpow_add_one_of_ne_neg_one {A : M} : ∀ n : ℤ, n ≠ -1 → A ^ (n + 1) = A ^ n * A | (n : ℕ), _ => by simp only [pow_succ, ← Nat.cast_succ, zpow_natCast] | -1, h => absurd rfl h | -((n : ℕ) + 2), _ => by rcases nonsing_inv_cancel_or_zero A with (⟨h, _⟩ | h) · apply zpow_add_one (isUnit_det_of_left_inverse h) · change A ^ (-((n + 1 : ℕ) : ℤ)) = A ^ (-((n + 2 : ℕ) : ℤ)) * A simp_rw [zpow_neg_natCast, ← inv_pow', h, zero_pow <| Nat.succ_ne_zero _, zero_mul] theorem zpow_mul (A : M) (h : IsUnit A.det) : ∀ m n : ℤ, A ^ (m * n) = (A ^ m) ^ n | (m : ℕ), (n : ℕ) => by rw [zpow_natCast, zpow_natCast, ← pow_mul, ← zpow_natCast, Int.natCast_mul] | (m : ℕ), -[n+1] => by rw [zpow_natCast, zpow_negSucc, ← pow_mul, ofNat_mul_negSucc, zpow_neg_natCast] | -[m+1], (n : ℕ) => by rw [zpow_natCast, zpow_negSucc, ← inv_pow', ← pow_mul, negSucc_mul_ofNat, zpow_neg_natCast, inv_pow'] | -[m+1], -[n+1] => by rw [zpow_negSucc, zpow_negSucc, negSucc_mul_negSucc, ← Int.natCast_mul, zpow_natCast, inv_pow', ← pow_mul, nonsing_inv_nonsing_inv] rw [det_pow] exact h.pow _ theorem zpow_mul' (A : M) (h : IsUnit A.det) (m n : ℤ) : A ^ (m * n) = (A ^ n) ^ m := by rw [mul_comm, zpow_mul _ h] @[simp, norm_cast] theorem coe_units_zpow (u : Mˣ) : ∀ n : ℤ, ((u ^ n : Mˣ) : M) = (u : M) ^ n | (n : ℕ) => by rw [zpow_natCast, zpow_natCast, Units.val_pow_eq_pow_val] | -[k+1] => by rw [zpow_negSucc, zpow_negSucc, ← inv_pow, u⁻¹.val_pow_eq_pow_val, ← inv_pow', coe_units_inv] theorem zpow_ne_zero_of_isUnit_det [Nonempty n'] [Nontrivial R] {A : M} (ha : IsUnit A.det) (z : ℤ) : A ^ z ≠ 0 := by have := ha.det_zpow z contrapose! this rw [this, det_zero ‹_›] exact not_isUnit_zero theorem zpow_sub {A : M} (ha : IsUnit A.det) (z1 z2 : ℤ) : A ^ (z1 - z2) = A ^ z1 / A ^ z2 := by rw [sub_eq_add_neg, zpow_add ha, zpow_neg ha, div_eq_mul_inv] theorem Commute.mul_zpow {A B : M} (h : Commute A B) : ∀ i : ℤ, (A * B) ^ i = A ^ i * B ^ i | (n : ℕ) => by simp [h.mul_pow n] | -[n+1] => by rw [zpow_negSucc, zpow_negSucc, zpow_negSucc, ← mul_inv_rev, h.mul_pow n.succ, (h.pow_pow _ _).eq] theorem zpow_neg_mul_zpow_self (n : ℤ) {A : M} (h : IsUnit A.det) : A ^ (-n) * A ^ n = 1 := by rw [zpow_neg h, nonsing_inv_mul _ (h.det_zpow _)] theorem one_div_pow {A : M} (n : ℕ) : (1 / A) ^ n = 1 / A ^ n := by simp only [one_div, inv_pow'] theorem one_div_zpow {A : M} (n : ℤ) : (1 / A) ^ n = 1 / A ^ n := by simp only [one_div, inv_zpow] @[simp] theorem transpose_zpow (A : M) : ∀ n : ℤ, (A ^ n)ᵀ = Aᵀ ^ n | (n : ℕ) => by rw [zpow_natCast, zpow_natCast, transpose_pow] | -[n+1] => by rw [zpow_negSucc, zpow_negSucc, transpose_nonsing_inv, transpose_pow] @[simp] theorem conjTranspose_zpow [StarRing R] (A : M) : ∀ n : ℤ, (A ^ n)ᴴ = Aᴴ ^ n | (n : ℕ) => by rw [zpow_natCast, zpow_natCast, conjTranspose_pow] | -[n+1] => by rw [zpow_negSucc, zpow_negSucc, conjTranspose_nonsing_inv, conjTranspose_pow] theorem IsSymm.zpow {A : M} (h : A.IsSymm) (k : ℤ) : (A ^ k).IsSymm := by rw [IsSymm, transpose_zpow, h] end ZPow end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Reindex.lean
import Mathlib.LinearAlgebra.Matrix.Determinant.Basic /-! # Changing the index type of a matrix This file concerns the map `Matrix.reindex`, mapping a `m` by `n` matrix to an `m'` by `n'` matrix, as long as `m ≃ m'` and `n ≃ n'`. ## Main definitions * `Matrix.reindexLinearEquiv R A`: `Matrix.reindex` is an `R`-linear equivalence between `A`-matrices. * `Matrix.reindexAlgEquiv R`: `Matrix.reindex` is an `R`-algebra equivalence between `R`-matrices. ## Tags matrix, reindex -/ namespace Matrix open Equiv Matrix variable {l m n o : Type*} {l' m' n' o' : Type*} {m'' n'' : Type*} variable (R A : Type*) section AddCommMonoid variable [Semiring R] [AddCommMonoid A] [Module R A] /-- The natural map that reindexes a matrix's rows and columns with equivalent types, `Matrix.reindex`, is a linear equivalence. -/ def reindexLinearEquiv (eₘ : m ≃ m') (eₙ : n ≃ n') : Matrix m n A ≃ₗ[R] Matrix m' n' A := { reindex eₘ eₙ with map_add' := fun _ _ => rfl map_smul' := fun _ _ => rfl } @[simp] theorem reindexLinearEquiv_apply (eₘ : m ≃ m') (eₙ : n ≃ n') (M : Matrix m n A) : reindexLinearEquiv R A eₘ eₙ M = reindex eₘ eₙ M := rfl @[simp] theorem reindexLinearEquiv_symm (eₘ : m ≃ m') (eₙ : n ≃ n') : (reindexLinearEquiv R A eₘ eₙ).symm = reindexLinearEquiv R A eₘ.symm eₙ.symm := rfl @[simp] theorem reindexLinearEquiv_refl_refl : reindexLinearEquiv R A (Equiv.refl m) (Equiv.refl n) = LinearEquiv.refl R _ := LinearEquiv.ext fun _ => rfl theorem reindexLinearEquiv_trans (e₁ : m ≃ m') (e₂ : n ≃ n') (e₁' : m' ≃ m'') (e₂' : n' ≃ n'') : (reindexLinearEquiv R A e₁ e₂).trans (reindexLinearEquiv R A e₁' e₂') = (reindexLinearEquiv R A (e₁.trans e₁') (e₂.trans e₂') : _ ≃ₗ[R] _) := by ext rfl theorem reindexLinearEquiv_comp (e₁ : m ≃ m') (e₂ : n ≃ n') (e₁' : m' ≃ m'') (e₂' : n' ≃ n'') : reindexLinearEquiv R A e₁' e₂' ∘ reindexLinearEquiv R A e₁ e₂ = reindexLinearEquiv R A (e₁.trans e₁') (e₂.trans e₂') := by rw [← reindexLinearEquiv_trans] rfl theorem reindexLinearEquiv_comp_apply (e₁ : m ≃ m') (e₂ : n ≃ n') (e₁' : m' ≃ m'') (e₂' : n' ≃ n'') (M : Matrix m n A) : (reindexLinearEquiv R A e₁' e₂') (reindexLinearEquiv R A e₁ e₂ M) = reindexLinearEquiv R A (e₁.trans e₁') (e₂.trans e₂') M := submatrix_submatrix _ _ _ _ _ theorem reindexLinearEquiv_one [DecidableEq m] [DecidableEq m'] [One A] (e : m ≃ m') : reindexLinearEquiv R A e e (1 : Matrix m m A) = 1 := submatrix_one_equiv e.symm end AddCommMonoid section Semiring variable [Semiring R] [Semiring A] [Module R A] theorem reindexLinearEquiv_mul [Fintype n] [Fintype n'] (eₘ : m ≃ m') (eₙ : n ≃ n') (eₒ : o ≃ o') (M : Matrix m n A) (N : Matrix n o A) : reindexLinearEquiv R A eₘ eₙ M * reindexLinearEquiv R A eₙ eₒ N = reindexLinearEquiv R A eₘ eₒ (M * N) := submatrix_mul_equiv M N _ _ _ theorem mul_reindexLinearEquiv_one [Fintype n] [DecidableEq o] (e₁ : o ≃ n) (e₂ : o ≃ n') (M : Matrix m n A) : M * (reindexLinearEquiv R A e₁ e₂ 1) = reindexLinearEquiv R A (Equiv.refl m) (e₁.symm.trans e₂) M := haveI := Fintype.ofEquiv _ e₁.symm mul_submatrix_one _ _ _ end Semiring section Algebra variable [CommSemiring R] [Fintype n] [Fintype m] [DecidableEq m] [DecidableEq n] [Semiring A] [Algebra R A] /-- For square matrices with coefficients in an algebra over a commutative semiring, the natural map that reindexes a matrix's rows and columns with equivalent types, `Matrix.reindex`, is an equivalence of algebras. -/ def reindexAlgEquiv (e : m ≃ n) : Matrix m m A ≃ₐ[R] Matrix n n A := { reindexLinearEquiv A A e e with toFun := reindex e e map_mul' := fun a b => (reindexLinearEquiv_mul A A e e e a b).symm commutes' := fun r => by simp [algebraMap, Algebra.algebraMap] } @[simp] theorem reindexAlgEquiv_apply (e : m ≃ n) (M : Matrix m m A) : reindexAlgEquiv R A e M = reindex e e M := rfl @[simp] theorem reindexAlgEquiv_symm (e : m ≃ n) : (reindexAlgEquiv R A e).symm = reindexAlgEquiv R A e.symm := rfl @[simp] theorem reindexAlgEquiv_refl : reindexAlgEquiv R A (Equiv.refl m) = AlgEquiv.refl := AlgEquiv.ext fun _ => rfl theorem reindexAlgEquiv_mul (e : m ≃ n) (M : Matrix m m A) (N : Matrix m m A) : reindexAlgEquiv R A e (M * N) = reindexAlgEquiv R A e M * reindexAlgEquiv R A e N := map_mul .. end Algebra /-- Reindexing both indices along the same equivalence preserves the determinant. For the `simp` version of this lemma, see `det_submatrix_equiv_self`. -/ theorem det_reindexLinearEquiv_self [CommRing R] [Fintype m] [DecidableEq m] [Fintype n] [DecidableEq n] (e : m ≃ n) (M : Matrix m m R) : det (reindexLinearEquiv R R e e M) = det M := det_reindex_self e M /-- Reindexing both indices along the same equivalence preserves the determinant. For the `simp` version of this lemma, see `det_submatrix_equiv_self`. -/ theorem det_reindexAlgEquiv (B : Type*) [CommSemiring R] [CommRing B] [Algebra R B] [Fintype m] [DecidableEq m] [Fintype n] [DecidableEq n] (e : m ≃ n) (A : Matrix m m B) : det (reindexAlgEquiv R B e A) = det A := det_reindex_self e A end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/SchurComplement.lean
import Mathlib.Data.Matrix.Invertible import Mathlib.LinearAlgebra.Matrix.NonsingularInverse /-! # 2×2 block matrices and the Schur complement This file proves properties of 2×2 block matrices `[A B; C D]` that relate to the Schur complement `D - C*A⁻¹*B`. Some of the results here generalize to 2×2 matrices in a category, rather than just a ring. A few results in this direction can be found in `Mathlib/CategoryTheory/Preadditive/Biproducts.lean`, especially the declarations `CategoryTheory.Biprod.gaussian` and `CategoryTheory.Biprod.isoElim`. Compare with `Matrix.invertibleOfFromBlocks₁₁Invertible`. ## Main results * `Matrix.det_fromBlocks₁₁`, `Matrix.det_fromBlocks₂₂`: determinant of a block matrix in terms of the Schur complement. * `Matrix.invOf_fromBlocks_zero₂₁_eq`, `Matrix.invOf_fromBlocks_zero₁₂_eq`: the inverse of a block triangular matrix. * `Matrix.isUnit_fromBlocks_zero₂₁`, `Matrix.isUnit_fromBlocks_zero₁₂`: invertibility of a block triangular matrix. * `Matrix.det_one_add_mul_comm`: the **Weinstein–Aronszajn identity**. -/ variable {l m n α : Type*} namespace Matrix open scoped Matrix section CommRing variable [Fintype l] [Fintype m] [Fintype n] variable [DecidableEq l] [DecidableEq m] [DecidableEq n] variable [CommRing α] /-- LDU decomposition of a block matrix with an invertible top-left corner, using the Schur complement. -/ theorem fromBlocks_eq_of_invertible₁₁ (A : Matrix m m α) (B : Matrix m n α) (C : Matrix l m α) (D : Matrix l n α) [Invertible A] : fromBlocks A B C D = fromBlocks 1 0 (C * ⅟A) 1 * fromBlocks A 0 0 (D - C * ⅟A * B) * fromBlocks 1 (⅟A * B) 0 1 := by simp only [fromBlocks_multiply, Matrix.mul_zero, Matrix.zero_mul, add_zero, zero_add, Matrix.one_mul, Matrix.mul_one, invOf_mul_self, Matrix.mul_invOf_cancel_left, Matrix.mul_assoc, add_sub_cancel] /-- LDU decomposition of a block matrix with an invertible bottom-right corner, using the Schur complement. -/ theorem fromBlocks_eq_of_invertible₂₂ (A : Matrix l m α) (B : Matrix l n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] : fromBlocks A B C D = fromBlocks 1 (B * ⅟D) 0 1 * fromBlocks (A - B * ⅟D * C) 0 0 D * fromBlocks 1 0 (⅟D * C) 1 := (Matrix.reindex (Equiv.sumComm _ _) (Equiv.sumComm _ _)).injective <| by simpa [reindex_apply, Equiv.sumComm_symm, ← submatrix_mul_equiv _ _ _ (Equiv.sumComm n m), ← submatrix_mul_equiv _ _ _ (Equiv.sumComm n l), Equiv.sumComm_apply, fromBlocks_submatrix_sum_swap_sum_swap] using fromBlocks_eq_of_invertible₁₁ D C B A section Triangular /-! #### Block triangular matrices -/ /-- An upper-block-triangular matrix is invertible if its diagonal is. -/ def fromBlocksZero₂₁Invertible (A : Matrix m m α) (B : Matrix m n α) (D : Matrix n n α) [Invertible A] [Invertible D] : Invertible (fromBlocks A B 0 D) := invertibleOfLeftInverse _ (fromBlocks (⅟A) (-(⅟A * B * ⅟D)) 0 (⅟D)) <| by simp_rw [fromBlocks_multiply, Matrix.mul_zero, Matrix.zero_mul, zero_add, add_zero, Matrix.neg_mul, invOf_mul_self, Matrix.invOf_mul_cancel_right, add_neg_cancel, fromBlocks_one] /-- A lower-block-triangular matrix is invertible if its diagonal is. -/ def fromBlocksZero₁₂Invertible (A : Matrix m m α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] [Invertible D] : Invertible (fromBlocks A 0 C D) := invertibleOfLeftInverse _ (fromBlocks (⅟A) 0 (-(⅟D * C * ⅟A)) (⅟D)) <| by -- a symmetry argument is more work than just copying the proof simp_rw [fromBlocks_multiply, Matrix.mul_zero, Matrix.zero_mul, zero_add, add_zero, Matrix.neg_mul, invOf_mul_self, Matrix.invOf_mul_cancel_right, neg_add_cancel, fromBlocks_one] theorem invOf_fromBlocks_zero₂₁_eq (A : Matrix m m α) (B : Matrix m n α) (D : Matrix n n α) [Invertible A] [Invertible D] [Invertible (fromBlocks A B 0 D)] : ⅟(fromBlocks A B 0 D) = fromBlocks (⅟A) (-(⅟A * B * ⅟D)) 0 (⅟D) := by letI := fromBlocksZero₂₁Invertible A B D convert (rfl : ⅟(fromBlocks A B 0 D) = _) theorem invOf_fromBlocks_zero₁₂_eq (A : Matrix m m α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] [Invertible D] [Invertible (fromBlocks A 0 C D)] : ⅟(fromBlocks A 0 C D) = fromBlocks (⅟A) 0 (-(⅟D * C * ⅟A)) (⅟D) := by letI := fromBlocksZero₁₂Invertible A C D convert (rfl : ⅟(fromBlocks A 0 C D) = _) /-- Both diagonal entries of an invertible upper-block-triangular matrix are invertible (by reading off the diagonal entries of the inverse). -/ def invertibleOfFromBlocksZero₂₁Invertible (A : Matrix m m α) (B : Matrix m n α) (D : Matrix n n α) [Invertible (fromBlocks A B 0 D)] : Invertible A × Invertible D where fst := invertibleOfLeftInverse _ (⅟(fromBlocks A B 0 D)).toBlocks₁₁ <| by have := invOf_mul_self (fromBlocks A B 0 D) rw [← fromBlocks_toBlocks (⅟(fromBlocks A B 0 D)), fromBlocks_multiply] at this replace := congr_arg Matrix.toBlocks₁₁ this simpa only [Matrix.toBlocks_fromBlocks₁₁, Matrix.mul_zero, add_zero, ← fromBlocks_one] using this snd := invertibleOfRightInverse _ (⅟(fromBlocks A B 0 D)).toBlocks₂₂ <| by have := mul_invOf_self (fromBlocks A B 0 D) rw [← fromBlocks_toBlocks (⅟(fromBlocks A B 0 D)), fromBlocks_multiply] at this replace := congr_arg Matrix.toBlocks₂₂ this simpa only [Matrix.toBlocks_fromBlocks₂₂, Matrix.zero_mul, zero_add, ← fromBlocks_one] using this /-- Both diagonal entries of an invertible lower-block-triangular matrix are invertible (by reading off the diagonal entries of the inverse). -/ def invertibleOfFromBlocksZero₁₂Invertible (A : Matrix m m α) (C : Matrix n m α) (D : Matrix n n α) [Invertible (fromBlocks A 0 C D)] : Invertible A × Invertible D where fst := invertibleOfRightInverse _ (⅟(fromBlocks A 0 C D)).toBlocks₁₁ <| by have := mul_invOf_self (fromBlocks A 0 C D) rw [← fromBlocks_toBlocks (⅟(fromBlocks A 0 C D)), fromBlocks_multiply] at this replace := congr_arg Matrix.toBlocks₁₁ this simpa only [Matrix.toBlocks_fromBlocks₁₁, Matrix.zero_mul, add_zero, ← fromBlocks_one] using this snd := invertibleOfLeftInverse _ (⅟(fromBlocks A 0 C D)).toBlocks₂₂ <| by have := invOf_mul_self (fromBlocks A 0 C D) rw [← fromBlocks_toBlocks (⅟(fromBlocks A 0 C D)), fromBlocks_multiply] at this replace := congr_arg Matrix.toBlocks₂₂ this simpa only [Matrix.toBlocks_fromBlocks₂₂, Matrix.mul_zero, zero_add, ← fromBlocks_one] using this /-- `invertibleOfFromBlocksZero₂₁Invertible` and `Matrix.fromBlocksZero₂₁Invertible` form an equivalence. -/ def fromBlocksZero₂₁InvertibleEquiv (A : Matrix m m α) (B : Matrix m n α) (D : Matrix n n α) : Invertible (fromBlocks A B 0 D) ≃ Invertible A × Invertible D where toFun _ := invertibleOfFromBlocksZero₂₁Invertible A B D invFun i := by letI := i.1 letI := i.2 exact fromBlocksZero₂₁Invertible A B D left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ /-- `invertibleOfFromBlocksZero₁₂Invertible` and `Matrix.fromBlocksZero₁₂Invertible` form an equivalence. -/ def fromBlocksZero₁₂InvertibleEquiv (A : Matrix m m α) (C : Matrix n m α) (D : Matrix n n α) : Invertible (fromBlocks A 0 C D) ≃ Invertible A × Invertible D where toFun _ := invertibleOfFromBlocksZero₁₂Invertible A C D invFun i := by letI := i.1 letI := i.2 exact fromBlocksZero₁₂Invertible A C D left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ /-- An upper block-triangular matrix is invertible iff both elements of its diagonal are. This is a propositional form of `Matrix.fromBlocksZero₂₁InvertibleEquiv`. -/ @[simp] theorem isUnit_fromBlocks_zero₂₁ {A : Matrix m m α} {B : Matrix m n α} {D : Matrix n n α} : IsUnit (fromBlocks A B 0 D) ↔ IsUnit A ∧ IsUnit D := by simp only [← nonempty_invertible_iff_isUnit, ← nonempty_prod, (fromBlocksZero₂₁InvertibleEquiv _ _ _).nonempty_congr] /-- A lower block-triangular matrix is invertible iff both elements of its diagonal are. This is a propositional form of `Matrix.fromBlocksZero₁₂InvertibleEquiv` forms an `iff`. -/ @[simp] theorem isUnit_fromBlocks_zero₁₂ {A : Matrix m m α} {C : Matrix n m α} {D : Matrix n n α} : IsUnit (fromBlocks A 0 C D) ↔ IsUnit A ∧ IsUnit D := by simp only [← nonempty_invertible_iff_isUnit, ← nonempty_prod, (fromBlocksZero₁₂InvertibleEquiv _ _ _).nonempty_congr] /-- An expression for the inverse of an upper block-triangular matrix, when either both elements of diagonal are invertible, or both are not. -/ theorem inv_fromBlocks_zero₂₁_of_isUnit_iff (A : Matrix m m α) (B : Matrix m n α) (D : Matrix n n α) (hAD : IsUnit A ↔ IsUnit D) : (fromBlocks A B 0 D)⁻¹ = fromBlocks A⁻¹ (-(A⁻¹ * B * D⁻¹)) 0 D⁻¹ := by by_cases hA : IsUnit A · have hD := hAD.mp hA cases hA.nonempty_invertible cases hD.nonempty_invertible letI := fromBlocksZero₂₁Invertible A B D simp_rw [← invOf_eq_nonsing_inv, invOf_fromBlocks_zero₂₁_eq] · have hD := hAD.not.mp hA have : ¬IsUnit (fromBlocks A B 0 D) := isUnit_fromBlocks_zero₂₁.not.mpr (not_and'.mpr fun _ => hA) simp_rw [nonsing_inv_eq_ringInverse, Ring.inverse_non_unit _ hA, Ring.inverse_non_unit _ hD, Ring.inverse_non_unit _ this, Matrix.zero_mul, neg_zero, fromBlocks_zero] /-- An expression for the inverse of a lower block-triangular matrix, when either both elements of diagonal are invertible, or both are not. -/ theorem inv_fromBlocks_zero₁₂_of_isUnit_iff (A : Matrix m m α) (C : Matrix n m α) (D : Matrix n n α) (hAD : IsUnit A ↔ IsUnit D) : (fromBlocks A 0 C D)⁻¹ = fromBlocks A⁻¹ 0 (-(D⁻¹ * C * A⁻¹)) D⁻¹ := by by_cases hA : IsUnit A · have hD := hAD.mp hA cases hA.nonempty_invertible cases hD.nonempty_invertible letI := fromBlocksZero₁₂Invertible A C D simp_rw [← invOf_eq_nonsing_inv, invOf_fromBlocks_zero₁₂_eq] · have hD := hAD.not.mp hA have : ¬IsUnit (fromBlocks A 0 C D) := isUnit_fromBlocks_zero₁₂.not.mpr (not_and'.mpr fun _ => hA) simp_rw [nonsing_inv_eq_ringInverse, Ring.inverse_non_unit _ hA, Ring.inverse_non_unit _ hD, Ring.inverse_non_unit _ this, Matrix.zero_mul, neg_zero, fromBlocks_zero] end Triangular /-! ### 2×2 block matrices -/ section Block /-! #### General 2×2 block matrices -/ /-- A block matrix is invertible if the bottom right corner and the corresponding Schur complement is. -/ def fromBlocks₂₂Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] [Invertible (A - B * ⅟D * C)] : Invertible (fromBlocks A B C D) := by -- factor `fromBlocks` via `fromBlocks_eq_of_invertible₂₂`, and state the inverse we expect convert Invertible.copy' _ _ (fromBlocks (⅟(A - B * ⅟D * C)) (-(⅟(A - B * ⅟D * C) * B * ⅟D)) (-(⅟D * C * ⅟(A - B * ⅟D * C))) (⅟D + ⅟D * C * ⅟(A - B * ⅟D * C) * B * ⅟D)) (fromBlocks_eq_of_invertible₂₂ _ _ _ _) _ · -- the product is invertible because all the factors are letI : Invertible (1 : Matrix n n α) := invertibleOne letI : Invertible (1 : Matrix m m α) := invertibleOne refine Invertible.mul ?_ (fromBlocksZero₁₂Invertible _ _ _) exact Invertible.mul (fromBlocksZero₂₁Invertible _ _ _) (fromBlocksZero₂₁Invertible _ _ _) · -- unfold the `Invertible` instances to get the raw factors change _ = fromBlocks 1 0 (-(1 * (⅟D * C) * 1)) 1 * (fromBlocks (⅟(A - B * ⅟D * C)) (-(⅟(A - B * ⅟D * C) * 0 * ⅟D)) 0 (⅟D) * fromBlocks 1 (-(1 * (B * ⅟D) * 1)) 0 1) -- combine into a single block matrix simp only [fromBlocks_multiply, Matrix.one_mul, Matrix.mul_one, Matrix.zero_mul, Matrix.mul_zero, add_zero, zero_add, neg_zero, Matrix.mul_neg, Matrix.neg_mul, neg_neg, ← Matrix.mul_assoc, add_comm (⅟D)] /-- A block matrix is invertible if the top left corner and the corresponding Schur complement is. -/ def fromBlocks₁₁Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] [Invertible (D - C * ⅟A * B)] : Invertible (fromBlocks A B C D) := by -- we argue by symmetry letI := fromBlocks₂₂Invertible D C B A letI iDCBA := submatrixEquivInvertible (fromBlocks D C B A) (Equiv.sumComm _ _) (Equiv.sumComm _ _) exact iDCBA.copy' _ (fromBlocks (⅟A + ⅟A * B * ⅟(D - C * ⅟A * B) * C * ⅟A) (-(⅟A * B * ⅟(D - C * ⅟A * B))) (-(⅟(D - C * ⅟A * B) * C * ⅟A)) (⅟(D - C * ⅟A * B))) (fromBlocks_submatrix_sum_swap_sum_swap _ _ _ _).symm (fromBlocks_submatrix_sum_swap_sum_swap _ _ _ _).symm theorem invOf_fromBlocks₂₂_eq (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] [Invertible (A - B * ⅟D * C)] [Invertible (fromBlocks A B C D)] : ⅟(fromBlocks A B C D) = fromBlocks (⅟(A - B * ⅟D * C)) (-(⅟(A - B * ⅟D * C) * B * ⅟D)) (-(⅟D * C * ⅟(A - B * ⅟D * C))) (⅟D + ⅟D * C * ⅟(A - B * ⅟D * C) * B * ⅟D) := by letI := fromBlocks₂₂Invertible A B C D convert (rfl : ⅟(fromBlocks A B C D) = _) theorem invOf_fromBlocks₁₁_eq (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] [Invertible (D - C * ⅟A * B)] [Invertible (fromBlocks A B C D)] : ⅟(fromBlocks A B C D) = fromBlocks (⅟A + ⅟A * B * ⅟(D - C * ⅟A * B) * C * ⅟A) (-(⅟A * B * ⅟(D - C * ⅟A * B))) (-(⅟(D - C * ⅟A * B) * C * ⅟A)) (⅟(D - C * ⅟A * B)) := by letI := fromBlocks₁₁Invertible A B C D convert (rfl : ⅟(fromBlocks A B C D) = _) /-- If a block matrix is invertible and so is its bottom left element, then so is the corresponding Schur complement. -/ def invertibleOfFromBlocks₂₂Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] [Invertible (fromBlocks A B C D)] : Invertible (A - B * ⅟D * C) := by suffices Invertible (fromBlocks (A - B * ⅟D * C) 0 0 D) by exact (invertibleOfFromBlocksZero₁₂Invertible (A - B * ⅟D * C) 0 D).1 letI : Invertible (1 : Matrix n n α) := invertibleOne letI : Invertible (1 : Matrix m m α) := invertibleOne letI iDC : Invertible (fromBlocks 1 0 (⅟D * C) 1 : Matrix (m ⊕ n) (m ⊕ n) α) := fromBlocksZero₁₂Invertible _ _ _ letI iBD : Invertible (fromBlocks 1 (B * ⅟D) 0 1 : Matrix (m ⊕ n) (m ⊕ n) α) := fromBlocksZero₂₁Invertible _ _ _ letI iBDC := Invertible.copy ‹_› _ (fromBlocks_eq_of_invertible₂₂ A B C D).symm refine (iBD.mulLeft _).symm ?_ exact (iDC.mulRight _).symm iBDC /-- If a block matrix is invertible and so is its bottom left element, then so is the corresponding Schur complement. -/ def invertibleOfFromBlocks₁₁Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] [Invertible (fromBlocks A B C D)] : Invertible (D - C * ⅟A * B) := by -- another symmetry argument letI iABCD' := submatrixEquivInvertible (fromBlocks A B C D) (Equiv.sumComm _ _) (Equiv.sumComm _ _) letI iDCBA := iABCD'.copy _ (fromBlocks_submatrix_sum_swap_sum_swap _ _ _ _).symm exact invertibleOfFromBlocks₂₂Invertible D C B A /-- `Matrix.invertibleOfFromBlocks₂₂Invertible` and `Matrix.fromBlocks₂₂Invertible` as an equivalence. -/ def invertibleEquivFromBlocks₂₂Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] : Invertible (fromBlocks A B C D) ≃ Invertible (A - B * ⅟D * C) where toFun _iABCD := invertibleOfFromBlocks₂₂Invertible _ _ _ _ invFun _i_schur := fromBlocks₂₂Invertible _ _ _ _ left_inv _iABCD := Subsingleton.elim _ _ right_inv _i_schur := Subsingleton.elim _ _ /-- `Matrix.invertibleOfFromBlocks₁₁Invertible` and `Matrix.fromBlocks₁₁Invertible` as an equivalence. -/ def invertibleEquivFromBlocks₁₁Invertible (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] : Invertible (fromBlocks A B C D) ≃ Invertible (D - C * ⅟A * B) where toFun _iABCD := invertibleOfFromBlocks₁₁Invertible _ _ _ _ invFun _i_schur := fromBlocks₁₁Invertible _ _ _ _ left_inv _iABCD := Subsingleton.elim _ _ right_inv _i_schur := Subsingleton.elim _ _ /-- If the bottom-left element of a block matrix is invertible, then the whole matrix is invertible iff the corresponding Schur complement is. -/ theorem isUnit_fromBlocks_iff_of_invertible₂₂ {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} [Invertible D] : IsUnit (fromBlocks A B C D) ↔ IsUnit (A - B * ⅟D * C) := by simp only [← nonempty_invertible_iff_isUnit, (invertibleEquivFromBlocks₂₂Invertible A B C D).nonempty_congr] /-- If the top-right element of a block matrix is invertible, then the whole matrix is invertible iff the corresponding Schur complement is. -/ theorem isUnit_fromBlocks_iff_of_invertible₁₁ {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} [Invertible A] : IsUnit (fromBlocks A B C D) ↔ IsUnit (D - C * ⅟A * B) := by simp only [← nonempty_invertible_iff_isUnit, (invertibleEquivFromBlocks₁₁Invertible A B C D).nonempty_congr] end Block /-! ### Lemmas about `Matrix.det` -/ section Det /-- Determinant of a 2×2 block matrix, expanded around an invertible top left element in terms of the Schur complement. -/ theorem det_fromBlocks₁₁ (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible A] : (Matrix.fromBlocks A B C D).det = det A * det (D - C * ⅟A * B) := by rw [fromBlocks_eq_of_invertible₁₁ (A := A), det_mul, det_mul, det_fromBlocks_zero₂₁, det_fromBlocks_zero₂₁, det_fromBlocks_zero₁₂, det_one, det_one, one_mul, one_mul, mul_one] @[simp] theorem det_fromBlocks_one₁₁ (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) : (Matrix.fromBlocks 1 B C D).det = det (D - C * B) := by haveI : Invertible (1 : Matrix m m α) := invertibleOne rw [det_fromBlocks₁₁, invOf_one, Matrix.mul_one, det_one, one_mul] /-- Determinant of a 2×2 block matrix, expanded around an invertible bottom right element in terms of the Schur complement. -/ theorem det_fromBlocks₂₂ (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) (D : Matrix n n α) [Invertible D] : (Matrix.fromBlocks A B C D).det = det D * det (A - B * ⅟D * C) := by have : fromBlocks A B C D = (fromBlocks D C B A).submatrix (Equiv.sumComm _ _) (Equiv.sumComm _ _) := by ext (i j) cases i <;> cases j <;> rfl rw [this, det_submatrix_equiv_self, det_fromBlocks₁₁] @[simp] theorem det_fromBlocks_one₂₂ (A : Matrix m m α) (B : Matrix m n α) (C : Matrix n m α) : (Matrix.fromBlocks A B C 1).det = det (A - B * C) := by haveI : Invertible (1 : Matrix n n α) := invertibleOne rw [det_fromBlocks₂₂, invOf_one, Matrix.mul_one, det_one, one_mul] /-- The **Weinstein–Aronszajn identity**. Note the `1` on the LHS is of shape m×m, while the `1` on the RHS is of shape n×n. -/ theorem det_one_add_mul_comm (A : Matrix m n α) (B : Matrix n m α) : det (1 + A * B) = det (1 + B * A) := calc det (1 + A * B) = det (fromBlocks 1 (-A) B 1) := by rw [det_fromBlocks_one₂₂, Matrix.neg_mul, sub_neg_eq_add] _ = det (1 + B * A) := by rw [det_fromBlocks_one₁₁, Matrix.mul_neg, sub_neg_eq_add] /-- Alternate statement of the **Weinstein–Aronszajn identity** -/ theorem det_mul_add_one_comm (A : Matrix m n α) (B : Matrix n m α) : det (A * B + 1) = det (B * A + 1) := by rw [add_comm, det_one_add_mul_comm, add_comm] theorem det_one_sub_mul_comm (A : Matrix m n α) (B : Matrix n m α) : det (1 - A * B) = det (1 - B * A) := by rw [sub_eq_add_neg, ← Matrix.neg_mul, det_one_add_mul_comm, Matrix.mul_neg, ← sub_eq_add_neg] /-- A special case of the **Matrix determinant lemma** for when `A = I`. -/ theorem det_one_add_replicateCol_mul_replicateRow {ι : Type*} [Unique ι] (u v : m → α) : det (1 + replicateCol ι u * replicateRow ι v) = 1 + v ⬝ᵥ u := by rw [det_one_add_mul_comm, det_unique, Pi.add_apply, Pi.add_apply, Matrix.one_apply_eq, Matrix.replicateRow_mul_replicateCol_apply] /-- The **Matrix determinant lemma** TODO: show the more general version without `hA : IsUnit A.det` as `(A + replicateCol u * replicateRow v).det = A.det + v ⬝ᵥ (adjugate A) *ᵥ u`. -/ theorem det_add_replicateCol_mul_replicateRow {ι : Type*} [Unique ι] {A : Matrix m m α} (hA : IsUnit A.det) (u v : m → α) : (A + replicateCol ι u * replicateRow ι v).det = A.det * (1 + replicateRow ι v * A⁻¹ * replicateCol ι u).det := by nth_rewrite 1 [← Matrix.mul_one A] rwa [← Matrix.mul_nonsing_inv_cancel_left A (replicateCol ι u * replicateRow ι v), ← Matrix.mul_add, det_mul, ← Matrix.mul_assoc, det_one_add_mul_comm, ← Matrix.mul_assoc] /-- A generalization of the **Matrix determinant lemma** -/ theorem det_add_mul {A : Matrix m m α} (U : Matrix m n α) (V : Matrix n m α) (hA : IsUnit A.det) : (A + U * V).det = A.det * (1 + V * A⁻¹ * U).det := by nth_rewrite 1 [← Matrix.mul_one A] rwa [← Matrix.mul_nonsing_inv_cancel_left A (U * V), ← Matrix.mul_add, det_mul, ← Matrix.mul_assoc, det_one_add_mul_comm, ← Matrix.mul_assoc] end Det end CommRing end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/ToLin.lean
import Mathlib.Algebra.Algebra.Subalgebra.Tower import Mathlib.Data.Finite.Sum import Mathlib.Data.Matrix.Block import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.Basis.Fin import Mathlib.LinearAlgebra.Basis.Prod import Mathlib.LinearAlgebra.Basis.SMul import Mathlib.LinearAlgebra.Matrix.Notation import Mathlib.LinearAlgebra.Matrix.StdBasis import Mathlib.RingTheory.AlgebraTower import Mathlib.RingTheory.Ideal.Span /-! # Linear maps and matrices This file defines the maps to send matrices to a linear map, and to send linear maps between modules with a finite bases to matrices. This defines a linear equivalence between linear maps between finite-dimensional vector spaces and matrices indexed by the respective bases. ## Main definitions In the list below, and in all this file, `R` is a commutative ring (semiring is sometimes enough), `M` and its variations are `R`-modules, `ι`, `κ`, `n` and `m` are finite types used for indexing. * `LinearMap.toMatrix`: given bases `v₁ : ι → M₁` and `v₂ : κ → M₂`, the `R`-linear equivalence from `M₁ →ₗ[R] M₂` to `Matrix κ ι R` * `Matrix.toLin`: the inverse of `LinearMap.toMatrix` * `LinearMap.toMatrix'`: the `R`-linear equivalence from `(m → R) →ₗ[R] (n → R)` to `Matrix m n R` (with the standard basis on `m → R` and `n → R`) * `Matrix.toLin'`: the inverse of `LinearMap.toMatrix'` * `algEquivMatrix`: given a basis indexed by `n`, the `R`-algebra equivalence between `R`-endomorphisms of `M` and `Matrix n n R` ## Issues This file was originally written without attention to non-commutative rings, and so mostly only works in the commutative setting. This should be fixed. In particular, `Matrix.mulVec` gives us a linear equivalence `Matrix m n R ≃ₗ[R] (n → R) →ₗ[Rᵐᵒᵖ] (m → R)` while `Matrix.vecMul` gives us a linear equivalence `Matrix m n R ≃ₗ[Rᵐᵒᵖ] (m → R) →ₗ[R] (n → R)`. At present, the first equivalence is developed in detail but only for commutative rings (and we omit the distinction between `Rᵐᵒᵖ` and `R`), while the second equivalence is developed only in brief, but for not-necessarily-commutative rings. Naming is slightly inconsistent between the two developments. In the original (commutative) development `linear` is abbreviated to `lin`, although this is not consistent with the rest of mathlib. In the new (non-commutative) development `linear` is not abbreviated, and declarations use `_right` to indicate they use the right action of matrices on vectors (via `Matrix.vecMul`). When the two developments are made uniform, the names should be made uniform, too, by choosing between `linear` and `lin` consistently, and (presumably) adding `_left` where necessary. ## Tags linear_map, matrix, linear_equiv, diagonal, det, trace -/ noncomputable section open LinearMap Matrix Module Set Submodule /-! ### Bilinear versions of matrix products The definitions in this section are stated with two extra rings, to allow for non-commutative rings. -/ section Bilinear variable {l m n R S A : Type*} variable [Semiring R] [Semiring S] [NonUnitalNonAssocSemiring A] variable [Module R A] [Module S A] variable [SMulCommClass S R A] [SMulCommClass S A A] [IsScalarTower R A A] variable (R S) /-- `Matrix.vecMul` as a bilinear map. When `A` is non-commutative, this can be instantiated as `vecMulBilin A Aᵐᵒᵖ` -/ def Matrix.vecMulBilin [Fintype m] : (m → A) →ₗ[R] Matrix m n A →ₗ[S] (n → A) where toFun x := { toFun M := x ᵥ* M map_add' _ _ := vecMul_add _ _ _ map_smul' _ _ := vecMul_smul _ _ _ } map_add' _ _ := LinearMap.ext fun _ => add_vecMul _ _ _ map_smul' _ _ := LinearMap.ext fun _ => smul_vecMul _ _ _ @[simp] theorem Matrix.vecMulBilin_apply [Fintype m] (v : m → A) (M : Matrix m n A) : Matrix.vecMulBilin R S v M = v ᵥ* M := rfl example {A} [Semiring A] [Fintype m] := (vecMulBilin A Aᵐᵒᵖ : _ →ₗ[_] Matrix m n A →ₗ[_] _) /-- `Matrix.mulVec` as a bilinear map. When `A` is non-commutative, this can be instantiated as `mulVecBilin A Aᵐᵒᵖ` -/ def Matrix.mulVecBilin [Fintype n] : Matrix m n A →ₗ[R] (n → A) →ₗ[S] (m → A) where toFun M := { toFun x := M *ᵥ x map_add' _ _ := mulVec_add _ _ _ map_smul' _ _ := mulVec_smul _ _ _ } map_add' _ _ := LinearMap.ext fun _ => add_mulVec _ _ _ map_smul' _ _ := LinearMap.ext fun _ => smul_mulVec _ _ _ @[simp] theorem Matrix.mulVecBilin_apply [Fintype n] (M : Matrix m n A) (v : n → A) : Matrix.mulVecBilin R S M v = M *ᵥ v := rfl example {A} [Semiring A] [Fintype n] := (mulVecBilin A Aᵐᵒᵖ : Matrix m n A →ₗ[_] _ →ₗ[_] _) /-- `vecMulVec` as a bilinear map. When `A` is noncommutative, `R` and `S` can be instantiated as `vecMulVecLinear A Aᵐᵒᵖ`. -/ @[simps] def vecMulVecBilin : (m → A) →ₗ[R] (n → A) →ₗ[S] Matrix m n A where toFun x := { toFun y := vecMulVec x y map_add' _ _ := vecMulVec_add _ _ _ map_smul' _ _ := vecMulVec_smul _ _ _ } map_add' _ _ := LinearMap.ext fun _ => add_vecMulVec _ _ _ map_smul' _ _ := LinearMap.ext fun _ => smul_vecMulVec _ _ _ example {A} [Semiring A] := (vecMulVecBilin A Aᵐᵒᵖ : (m → A) →ₗ[_] (n → A) →ₗ[_] _) /-- `vecMulVec` as a bilinear map. When `A` is noncommutative, `R` and `S` can be instantiated as `vecMulVecLinear A Aᵐᵒᵖ`. -/ @[simps] def dotProductBilin [Fintype m] : (m → A) →ₗ[R] (m → A) →ₗ[S] A where toFun x := { toFun y := dotProduct x y map_add' _ _ := dotProduct_add _ _ _ map_smul' _ _ := dotProduct_smul _ _ _ } map_add' _ _ := LinearMap.ext fun _ => add_dotProduct _ _ _ map_smul' _ _ := LinearMap.ext fun _ => smul_dotProduct _ _ _ example {A} [Semiring A] [Fintype m] := (dotProductBilin A Aᵐᵒᵖ : (m → A) →ₗ[_] _ →ₗ[_] _) end Bilinear section ToMatrixRight variable {R : Type*} [Semiring R] variable {l m n : Type*} /-- `Matrix.vecMul M` is a linear map. Note this is a special case of `Matrix.vecMulBilin`. -/ abbrev Matrix.vecMulLinear [Fintype m] (M : Matrix m n R) : (m → R) →ₗ[R] n → R := Matrix.vecMulBilin R Rᵐᵒᵖ |>.flip M @[simp] theorem Matrix.vecMulLinear_apply [Fintype m] (M : Matrix m n R) (x : m → R) : M.vecMulLinear x = x ᵥ* M := rfl theorem Matrix.coe_vecMulLinear [Fintype m] (M : Matrix m n R) : (M.vecMulLinear : _ → _) = M.vecMul := rfl variable [Fintype m] theorem range_vecMulLinear (M : Matrix m n R) : LinearMap.range M.vecMulLinear = span R (range M.row) := by letI := Classical.decEq m simp_rw [range_eq_map, ← iSup_range_single, Submodule.map_iSup, range_eq_map, ← Ideal.span_singleton_one, Ideal.span, Submodule.map_span, image_image, image_singleton, Matrix.vecMulLinear_apply, iSup_span, range_eq_iUnion, iUnion_singleton_eq_range, LinearMap.single, LinearMap.coe_mk, AddHom.coe_mk, row_def] unfold vecMul simp_rw [single_dotProduct, one_mul] theorem Matrix.vecMul_injective_iff {M : Matrix m n R} : Function.Injective M.vecMul ↔ LinearIndependent R M.row := by rw [← coe_vecMulLinear, linearIndependent_iff_injective_fintypeLinearCombination] congr! 1 exact funext fun _ => Matrix.vecMul_eq_sum _ _ lemma Matrix.linearIndependent_rows_of_isUnit {A : Matrix m m R} [DecidableEq m] (ha : IsUnit A) : LinearIndependent R A.row := by rw [← Matrix.vecMul_injective_iff] exact Matrix.vecMul_injective_of_isUnit ha section variable [DecidableEq m] /-- Linear maps `(m → R) →ₗ[R] (n → R)` are linearly equivalent over `Rᵐᵒᵖ` to `Matrix m n R`, by having matrices act by right multiplication. -/ def LinearMap.toMatrixRight' : ((m → R) →ₗ[R] n → R) ≃ₗ[Rᵐᵒᵖ] Matrix m n R where toFun f i j := f (single R (fun _ ↦ R) i 1) j invFun := Matrix.vecMulLinear right_inv M := by ext i j simp left_inv f := by apply (Pi.basisFun R m).ext intro j; ext i simp map_add' f g := by ext i j simp only [Pi.add_apply, LinearMap.add_apply, Matrix.add_apply] map_smul' c f := by ext i j simp only [Pi.smul_apply, LinearMap.smul_apply, RingHom.id_apply, Matrix.smul_apply] /-- A `Matrix m n R` is linearly equivalent over `Rᵐᵒᵖ` to a linear map `(m → R) →ₗ[R] (n → R)`, by having matrices act by right multiplication. -/ abbrev Matrix.toLinearMapRight' [DecidableEq m] : Matrix m n R ≃ₗ[Rᵐᵒᵖ] (m → R) →ₗ[R] n → R := LinearEquiv.symm LinearMap.toMatrixRight' @[simp] theorem Matrix.toLinearMapRight'_apply (M : Matrix m n R) (v : m → R) : (Matrix.toLinearMapRight') M v = v ᵥ* M := rfl @[simp] theorem Matrix.toLinearMapRight'_mul [Fintype l] [DecidableEq l] (M : Matrix l m R) (N : Matrix m n R) : Matrix.toLinearMapRight' (M * N) = (Matrix.toLinearMapRight' N).comp (Matrix.toLinearMapRight' M) := LinearMap.ext fun _x ↦ (vecMul_vecMul _ M N).symm theorem Matrix.toLinearMapRight'_mul_apply [Fintype l] [DecidableEq l] (M : Matrix l m R) (N : Matrix m n R) (x) : Matrix.toLinearMapRight' (M * N) x = Matrix.toLinearMapRight' N (Matrix.toLinearMapRight' M x) := (vecMul_vecMul _ M N).symm @[simp] theorem Matrix.toLinearMapRight'_one : Matrix.toLinearMapRight' (1 : Matrix m m R) = LinearMap.id := by ext simp /-- If `M` and `M'` are each other's inverse matrices, they provide an equivalence between `n → A` and `m → A` corresponding to `M.vecMul` and `M'.vecMul`. -/ @[simps] def Matrix.toLinearEquivRight'OfInv [Fintype n] [DecidableEq n] {M : Matrix m n R} {M' : Matrix n m R} (hMM' : M * M' = 1) (hM'M : M' * M = 1) : (n → R) ≃ₗ[R] m → R := { LinearMap.toMatrixRight'.symm M' with toFun := Matrix.toLinearMapRight' M' invFun := Matrix.toLinearMapRight' M left_inv := fun x ↦ by rw [← Matrix.toLinearMapRight'_mul_apply, hM'M, Matrix.toLinearMapRight'_one, id_apply] right_inv := fun x ↦ by rw [← Matrix.toLinearMapRight'_mul_apply, hMM', Matrix.toLinearMapRight'_one, id_apply] } end end ToMatrixRight /-! From this point on, we only work with commutative rings, and fail to distinguish between `Rᵐᵒᵖ` and `R`. This should eventually be remedied. -/ section mulVec variable {R : Type*} [CommSemiring R] variable {k l m n : Type*} /-- `Matrix.mulVec M` as a linear map. Note this is a special case of `Matrix.mulVecBilin`. -/ abbrev Matrix.mulVecLin [Fintype n] (M : Matrix m n R) : (n → R) →ₗ[R] m → R := mulVecBilin R R M theorem Matrix.coe_mulVecLin [Fintype n] (M : Matrix m n R) : (M.mulVecLin : _ → _) = M.mulVec := rfl @[simp] theorem Matrix.mulVecLin_apply [Fintype n] (M : Matrix m n R) (v : n → R) : M.mulVecLin v = M *ᵥ v := rfl @[simp] theorem Matrix.mulVecLin_zero [Fintype n] : Matrix.mulVecLin (0 : Matrix m n R) = 0 := LinearMap.ext zero_mulVec @[simp] theorem Matrix.mulVecLin_add [Fintype n] (M N : Matrix m n R) : (M + N).mulVecLin = M.mulVecLin + N.mulVecLin := LinearMap.ext fun _ ↦ add_mulVec _ _ _ @[simp] theorem Matrix.mulVecLin_transpose [Fintype m] (M : Matrix m n R) : Mᵀ.mulVecLin = M.vecMulLinear := by ext; simp [mulVec_transpose] @[simp] theorem Matrix.vecMulLinear_transpose [Fintype n] (M : Matrix m n R) : Mᵀ.vecMulLinear = M.mulVecLin := by ext; simp [vecMul_transpose] theorem Matrix.mulVecLin_submatrix [Fintype n] [Fintype l] (f₁ : m → k) (e₂ : n ≃ l) (M : Matrix k l R) : (M.submatrix f₁ e₂).mulVecLin = funLeft R R f₁ ∘ₗ M.mulVecLin ∘ₗ funLeft _ _ e₂.symm := LinearMap.ext fun _ ↦ submatrix_mulVec_equiv _ _ _ _ /-- A variant of `Matrix.mulVecLin_submatrix` that keeps around `LinearEquiv`s. -/ theorem Matrix.mulVecLin_reindex [Fintype n] [Fintype l] (e₁ : k ≃ m) (e₂ : l ≃ n) (M : Matrix k l R) : (reindex e₁ e₂ M).mulVecLin = ↑(LinearEquiv.funCongrLeft R R e₁.symm) ∘ₗ M.mulVecLin ∘ₗ ↑(LinearEquiv.funCongrLeft R R e₂) := Matrix.mulVecLin_submatrix _ _ _ variable [Fintype n] @[simp] theorem Matrix.mulVecLin_one [DecidableEq n] : Matrix.mulVecLin (1 : Matrix n n R) = LinearMap.id := by ext; simp [Matrix.one_apply, Pi.single_apply, eq_comm] @[simp] theorem Matrix.mulVecLin_mul [Fintype m] (M : Matrix l m R) (N : Matrix m n R) : Matrix.mulVecLin (M * N) = (Matrix.mulVecLin M).comp (Matrix.mulVecLin N) := LinearMap.ext fun _ ↦ (mulVec_mulVec _ _ _).symm theorem Matrix.ker_mulVecLin_eq_bot_iff {M : Matrix m n R} : (LinearMap.ker M.mulVecLin) = ⊥ ↔ ∀ v, M *ᵥ v = 0 → v = 0 := by simp only [Submodule.eq_bot_iff, LinearMap.mem_ker, Matrix.mulVecLin_apply] theorem Matrix.range_mulVecLin (M : Matrix m n R) : LinearMap.range M.mulVecLin = span R (range M.col) := by rw [← vecMulLinear_transpose, range_vecMulLinear, row_transpose] theorem Matrix.mulVec_injective_iff {M : Matrix m n R} : Function.Injective M.mulVec ↔ LinearIndependent R M.col := by change Function.Injective (fun x ↦ _) ↔ _ simp_rw [← M.vecMul_transpose, vecMul_injective_iff, row_transpose] lemma Matrix.linearIndependent_cols_of_isUnit [Fintype m] {A : Matrix m m R} [DecidableEq m] (ha : IsUnit A) : LinearIndependent R A.col := by rw [← Matrix.mulVec_injective_iff] exact Matrix.mulVec_injective_of_isUnit ha end mulVec section ToMatrix' variable {R : Type*} [CommSemiring R] variable {k l m n : Type*} [DecidableEq n] [Fintype n] /-- Linear maps `(n → R) →ₗ[R] (m → R)` are linearly equivalent to `Matrix m n R`. -/ def LinearMap.toMatrix' : ((n → R) →ₗ[R] m → R) ≃ₗ[R] Matrix m n R where toFun f := of fun i j ↦ f (Pi.single j 1) i invFun := Matrix.mulVecLin right_inv M := by ext i j simp only [Matrix.mulVec_single_one, col_apply, Matrix.mulVecLin_apply, of_apply] left_inv f := by apply (Pi.basisFun R n).ext intro j; ext i simp only [Pi.basisFun_apply, Matrix.mulVec_single_one, col_apply, Matrix.mulVecLin_apply, of_apply] map_add' f g := by ext i j simp only [Pi.add_apply, LinearMap.add_apply, of_apply, Matrix.add_apply] map_smul' c f := by ext i j simp only [Pi.smul_apply, LinearMap.smul_apply, RingHom.id_apply, of_apply, Matrix.smul_apply] /-- A `Matrix m n R` is linearly equivalent to a linear map `(n → R) →ₗ[R] (m → R)`. Note that the forward-direction does not require `DecidableEq` and is `Matrix.mulVecLin`. -/ def Matrix.toLin' : Matrix m n R ≃ₗ[R] (n → R) →ₗ[R] m → R := LinearMap.toMatrix'.symm theorem Matrix.toLin'_apply' (M : Matrix m n R) : Matrix.toLin' M = M.mulVecLin := rfl @[simp] theorem LinearMap.toMatrix'_symm : (LinearMap.toMatrix'.symm : Matrix m n R ≃ₗ[R] _) = Matrix.toLin' := rfl @[simp] theorem Matrix.toLin'_symm : (Matrix.toLin'.symm : ((n → R) →ₗ[R] m → R) ≃ₗ[R] _) = LinearMap.toMatrix' := rfl @[simp] theorem LinearMap.toMatrix'_toLin' (M : Matrix m n R) : LinearMap.toMatrix' (Matrix.toLin' M) = M := LinearMap.toMatrix'.apply_symm_apply M @[simp] theorem Matrix.toLin'_toMatrix' (f : (n → R) →ₗ[R] m → R) : Matrix.toLin' (LinearMap.toMatrix' f) = f := Matrix.toLin'.apply_symm_apply f @[simp] theorem LinearMap.toMatrix'_apply (f : (n → R) →ₗ[R] m → R) (i j) : LinearMap.toMatrix' f i j = f (fun j' ↦ if j' = j then 1 else 0) i := by simp [toMatrix', ← Pi.single_apply] @[simp] theorem Matrix.toLin'_apply (M : Matrix m n R) (v : n → R) : Matrix.toLin' M v = M *ᵥ v := rfl @[simp] theorem LinearMap.toMatrix'_mulVec (f : (n → R) →ₗ[R] m → R) (v : n → R) : LinearMap.toMatrix' f *ᵥ v = f v := by rw [← toLin'_apply, toLin'_toMatrix'] @[simp] theorem Matrix.toLin'_one : Matrix.toLin' (1 : Matrix n n R) = LinearMap.id := Matrix.mulVecLin_one @[simp] theorem LinearMap.toMatrix'_id : LinearMap.toMatrix' (LinearMap.id : (n → R) →ₗ[R] n → R) = 1 := by ext rw [Matrix.one_apply, LinearMap.toMatrix'_apply, id_apply] @[simp] theorem LinearMap.toMatrix'_one : LinearMap.toMatrix' (1 : (n → R) →ₗ[R] n → R) = 1 := LinearMap.toMatrix'_id @[simp] theorem Matrix.toLin'_mul [Fintype m] [DecidableEq m] (M : Matrix l m R) (N : Matrix m n R) : Matrix.toLin' (M * N) = (Matrix.toLin' M).comp (Matrix.toLin' N) := Matrix.mulVecLin_mul _ _ @[simp] theorem Matrix.toLin'_pow (M : Matrix n n R) (k : ℕ) : (M ^ k).toLin' = M.toLin' ^ k := by induction k with | zero => simp [End.one_eq_id] | succ n ih => rw [pow_succ, pow_succ, toLin'_mul, ih, Module.End.mul_eq_comp] @[simp] theorem Matrix.toLin'_submatrix [Fintype l] [DecidableEq l] (f₁ : m → k) (e₂ : n ≃ l) (M : Matrix k l R) : Matrix.toLin' (M.submatrix f₁ e₂) = funLeft R R f₁ ∘ₗ (Matrix.toLin' M) ∘ₗ funLeft _ _ e₂.symm := Matrix.mulVecLin_submatrix _ _ _ /-- A variant of `Matrix.toLin'_submatrix` that keeps around `LinearEquiv`s. -/ theorem Matrix.toLin'_reindex [Fintype l] [DecidableEq l] (e₁ : k ≃ m) (e₂ : l ≃ n) (M : Matrix k l R) : Matrix.toLin' (reindex e₁ e₂ M) = ↑(LinearEquiv.funCongrLeft R R e₁.symm) ∘ₗ (Matrix.toLin' M) ∘ₗ ↑(LinearEquiv.funCongrLeft R R e₂) := Matrix.mulVecLin_reindex _ _ _ /-- Shortcut lemma for `Matrix.toLin'_mul` and `LinearMap.comp_apply` -/ theorem Matrix.toLin'_mul_apply [Fintype m] [DecidableEq m] (M : Matrix l m R) (N : Matrix m n R) (x) : Matrix.toLin' (M * N) x = Matrix.toLin' M (Matrix.toLin' N x) := by rw [Matrix.toLin'_mul, LinearMap.comp_apply] theorem LinearMap.toMatrix'_comp [Fintype l] [DecidableEq l] (f : (n → R) →ₗ[R] m → R) (g : (l → R) →ₗ[R] n → R) : LinearMap.toMatrix' (f.comp g) = LinearMap.toMatrix' f * LinearMap.toMatrix' g := by suffices f.comp g = Matrix.toLin' (LinearMap.toMatrix' f * LinearMap.toMatrix' g) by rw [this, LinearMap.toMatrix'_toLin'] rw [Matrix.toLin'_mul, Matrix.toLin'_toMatrix', Matrix.toLin'_toMatrix'] theorem LinearMap.toMatrix'_mul [Fintype m] [DecidableEq m] (f g : (m → R) →ₗ[R] m → R) : LinearMap.toMatrix' (f * g) = LinearMap.toMatrix' f * LinearMap.toMatrix' g := LinearMap.toMatrix'_comp f g @[simp] theorem LinearMap.toMatrix'_algebraMap (x : R) : LinearMap.toMatrix' (algebraMap R (Module.End R (n → R)) x) = scalar n x := by simp [Module.algebraMap_end_eq_smul_id, smul_eq_diagonal_mul] theorem Matrix.ker_toLin'_eq_bot_iff {M : Matrix n n R} : LinearMap.ker (Matrix.toLin' M) = ⊥ ↔ ∀ v, M *ᵥ v = 0 → v = 0 := Matrix.ker_mulVecLin_eq_bot_iff theorem Matrix.range_toLin' (M : Matrix m n R) : LinearMap.range (Matrix.toLin' M) = span R (range M.col) := Matrix.range_mulVecLin _ /-- If `M` and `M'` are each other's inverse matrices, they provide an equivalence between `m → A` and `n → A` corresponding to `M.mulVec` and `M'.mulVec`. -/ @[simps] def Matrix.toLin'OfInv [Fintype m] [DecidableEq m] {M : Matrix m n R} {M' : Matrix n m R} (hMM' : M * M' = 1) (hM'M : M' * M = 1) : (m → R) ≃ₗ[R] n → R := { Matrix.toLin' M' with toFun := Matrix.toLin' M' invFun := Matrix.toLin' M left_inv := fun x ↦ by rw [← Matrix.toLin'_mul_apply, hMM', Matrix.toLin'_one, id_apply] right_inv := fun x ↦ by rw [← Matrix.toLin'_mul_apply, hM'M, Matrix.toLin'_one, id_apply] } /-- Linear maps `(n → R) →ₗ[R] (n → R)` are algebra equivalent to `Matrix n n R`. -/ def LinearMap.toMatrixAlgEquiv' : ((n → R) →ₗ[R] n → R) ≃ₐ[R] Matrix n n R := AlgEquiv.ofLinearEquiv LinearMap.toMatrix' LinearMap.toMatrix'_one LinearMap.toMatrix'_mul /-- A `Matrix n n R` is algebra equivalent to a linear map `(n → R) →ₗ[R] (n → R)`. -/ def Matrix.toLinAlgEquiv' : Matrix n n R ≃ₐ[R] (n → R) →ₗ[R] n → R := LinearMap.toMatrixAlgEquiv'.symm @[simp] theorem LinearMap.toMatrixAlgEquiv'_symm : (LinearMap.toMatrixAlgEquiv'.symm : Matrix n n R ≃ₐ[R] _) = Matrix.toLinAlgEquiv' := rfl @[simp] theorem Matrix.toLinAlgEquiv'_symm : (Matrix.toLinAlgEquiv'.symm : ((n → R) →ₗ[R] n → R) ≃ₐ[R] _) = LinearMap.toMatrixAlgEquiv' := rfl @[simp] theorem LinearMap.toMatrixAlgEquiv'_toLinAlgEquiv' (M : Matrix n n R) : LinearMap.toMatrixAlgEquiv' (Matrix.toLinAlgEquiv' M) = M := LinearMap.toMatrixAlgEquiv'.apply_symm_apply M @[simp] theorem Matrix.toLinAlgEquiv'_toMatrixAlgEquiv' (f : (n → R) →ₗ[R] n → R) : Matrix.toLinAlgEquiv' (LinearMap.toMatrixAlgEquiv' f) = f := Matrix.toLinAlgEquiv'.apply_symm_apply f @[simp] theorem LinearMap.toMatrixAlgEquiv'_apply (f : (n → R) →ₗ[R] n → R) (i j) : LinearMap.toMatrixAlgEquiv' f i j = f (fun j' ↦ if j' = j then 1 else 0) i := by simp [LinearMap.toMatrixAlgEquiv'] @[simp] theorem Matrix.toLinAlgEquiv'_apply (M : Matrix n n R) (v : n → R) : Matrix.toLinAlgEquiv' M v = M *ᵥ v := rfl theorem Matrix.toLinAlgEquiv'_one : Matrix.toLinAlgEquiv' (1 : Matrix n n R) = LinearMap.id := Matrix.toLin'_one @[simp] theorem LinearMap.toMatrixAlgEquiv'_id : LinearMap.toMatrixAlgEquiv' (LinearMap.id : (n → R) →ₗ[R] n → R) = 1 := LinearMap.toMatrix'_id theorem LinearMap.toMatrixAlgEquiv'_comp (f g : (n → R) →ₗ[R] n → R) : LinearMap.toMatrixAlgEquiv' (f.comp g) = LinearMap.toMatrixAlgEquiv' f * LinearMap.toMatrixAlgEquiv' g := LinearMap.toMatrix'_comp _ _ theorem LinearMap.toMatrixAlgEquiv'_mul (f g : (n → R) →ₗ[R] n → R) : LinearMap.toMatrixAlgEquiv' (f * g) = LinearMap.toMatrixAlgEquiv' f * LinearMap.toMatrixAlgEquiv' g := LinearMap.toMatrixAlgEquiv'_comp f g @[simp] theorem LinearMap.isUnit_toMatrix'_iff {f : (n → R) →ₗ[R] n → R} : IsUnit f.toMatrix' ↔ IsUnit f := isUnit_map_iff LinearMap.toMatrixAlgEquiv' f @[simp] theorem Matrix.isUnit_toLin'_iff {M : Matrix n n R} : IsUnit M.toLin' ↔ IsUnit M := isUnit_map_iff LinearMap.toMatrixAlgEquiv'.symm M end ToMatrix' section ToMatrix section Finite variable {R : Type*} [CommSemiring R] variable {l m n : Type*} [Fintype n] [Finite m] [DecidableEq n] variable {M₁ M₂ : Type*} [AddCommMonoid M₁] [AddCommMonoid M₂] [Module R M₁] [Module R M₂] variable (v₁ : Basis n R M₁) (v₂ : Basis m R M₂) /-- Given bases of two modules `M₁` and `M₂` over a commutative ring `R`, we get a linear equivalence between linear maps `M₁ →ₗ M₂` and matrices over `R` indexed by the bases. -/ def LinearMap.toMatrix : (M₁ →ₗ[R] M₂) ≃ₗ[R] Matrix m n R := LinearEquiv.trans (LinearEquiv.arrowCongr v₁.equivFun v₂.equivFun) LinearMap.toMatrix' /-- `LinearMap.toMatrix'` is a particular case of `LinearMap.toMatrix`, for the standard basis `Pi.basisFun R n`. -/ @[simp] theorem LinearMap.toMatrix_eq_toMatrix' : LinearMap.toMatrix (Pi.basisFun R n) (Pi.basisFun R n) = LinearMap.toMatrix' := rfl /-- Given bases of two modules `M₁` and `M₂` over a commutative ring `R`, we get a linear equivalence between matrices over `R` indexed by the bases and linear maps `M₁ →ₗ M₂`. -/ def Matrix.toLin : Matrix m n R ≃ₗ[R] M₁ →ₗ[R] M₂ := (LinearMap.toMatrix v₁ v₂).symm /-- `Matrix.toLin'` is a particular case of `Matrix.toLin`, for the standard basis `Pi.basisFun R n`. -/ theorem Matrix.toLin_eq_toLin' : Matrix.toLin (Pi.basisFun R n) (Pi.basisFun R m) = Matrix.toLin' := rfl @[simp] theorem LinearMap.toMatrix_symm : (LinearMap.toMatrix v₁ v₂).symm = Matrix.toLin v₁ v₂ := rfl @[simp] theorem Matrix.toLin_symm : (Matrix.toLin v₁ v₂).symm = LinearMap.toMatrix v₁ v₂ := rfl @[simp] theorem Matrix.toLin_toMatrix (f : M₁ →ₗ[R] M₂) : Matrix.toLin v₁ v₂ (LinearMap.toMatrix v₁ v₂ f) = f := by rw [← Matrix.toLin_symm, LinearEquiv.apply_symm_apply] @[simp] theorem LinearMap.toMatrix_toLin (M : Matrix m n R) : LinearMap.toMatrix v₁ v₂ (Matrix.toLin v₁ v₂ M) = M := by rw [← Matrix.toLin_symm, LinearEquiv.symm_apply_apply] theorem LinearMap.toMatrix_apply (f : M₁ →ₗ[R] M₂) (i : m) (j : n) : LinearMap.toMatrix v₁ v₂ f i j = v₂.repr (f (v₁ j)) i := by rw [LinearMap.toMatrix, LinearEquiv.trans_apply, LinearMap.toMatrix'_apply, LinearEquiv.arrowCongr_apply, Basis.equivFun_symm_apply, Finset.sum_eq_single j, if_pos rfl, one_smul, Basis.equivFun_apply] · intro j' _ hj' rw [if_neg hj', zero_smul] · intro hj have := Finset.mem_univ j contradiction theorem LinearMap.toMatrix_transpose_apply (f : M₁ →ₗ[R] M₂) (j : n) : (LinearMap.toMatrix v₁ v₂ f)ᵀ j = v₂.repr (f (v₁ j)) := funext fun i ↦ f.toMatrix_apply _ _ i j theorem LinearMap.toMatrix_apply' (f : M₁ →ₗ[R] M₂) (i : m) (j : n) : LinearMap.toMatrix v₁ v₂ f i j = v₂.repr (f (v₁ j)) i := LinearMap.toMatrix_apply v₁ v₂ f i j theorem LinearMap.toMatrix_transpose_apply' (f : M₁ →ₗ[R] M₂) (j : n) : (LinearMap.toMatrix v₁ v₂ f)ᵀ j = v₂.repr (f (v₁ j)) := LinearMap.toMatrix_transpose_apply v₁ v₂ f j /-- This will be a special case of `LinearMap.toMatrix_id_eq_basis_toMatrix`. -/ theorem LinearMap.toMatrix_id : LinearMap.toMatrix v₁ v₁ id = 1 := by ext i j simp [LinearMap.toMatrix_apply, Matrix.one_apply, Finsupp.single_apply, eq_comm] @[simp] theorem LinearMap.toMatrix_one : LinearMap.toMatrix v₁ v₁ 1 = 1 := LinearMap.toMatrix_id v₁ @[simp] lemma LinearMap.toMatrix_singleton {ι : Type*} [Unique ι] (f : R →ₗ[R] R) (i j : ι) : f.toMatrix (.singleton ι R) (.singleton ι R) i j = f 1 := by simp [toMatrix, Subsingleton.elim j default] @[simp] theorem Matrix.toLin_one : Matrix.toLin v₁ v₁ 1 = LinearMap.id := by rw [← LinearMap.toMatrix_id v₁, Matrix.toLin_toMatrix] theorem LinearMap.toMatrix_reindexRange [DecidableEq M₁] (f : M₁ →ₗ[R] M₂) (k : m) (i : n) : LinearMap.toMatrix v₁.reindexRange v₂.reindexRange f ⟨v₂ k, Set.mem_range_self k⟩ ⟨v₁ i, Set.mem_range_self i⟩ = LinearMap.toMatrix v₁ v₂ f k i := by simp_rw [LinearMap.toMatrix_apply, Basis.reindexRange_self, Basis.reindexRange_repr] @[simp] theorem LinearMap.toMatrix_algebraMap (x : R) : LinearMap.toMatrix v₁ v₁ (algebraMap R (Module.End R M₁) x) = scalar n x := by simp [Module.algebraMap_end_eq_smul_id, LinearMap.toMatrix_id, smul_eq_diagonal_mul] theorem LinearMap.toMatrix_mulVec_repr (f : M₁ →ₗ[R] M₂) (x : M₁) : LinearMap.toMatrix v₁ v₂ f *ᵥ v₁.repr x = v₂.repr (f x) := by ext i rw [← Matrix.toLin'_apply, LinearMap.toMatrix, LinearEquiv.trans_apply, Matrix.toLin'_toMatrix', LinearEquiv.arrowCongr_apply, v₂.equivFun_apply] congr exact v₁.equivFun.symm_apply_apply x @[simp] theorem LinearMap.toMatrix_basis_equiv [Fintype l] [DecidableEq l] (b : Basis l R M₁) (b' : Basis l R M₂) : LinearMap.toMatrix b' b (b'.equiv b (Equiv.refl l) : M₂ →ₗ[R] M₁) = 1 := by ext i j simp [LinearMap.toMatrix_apply, Matrix.one_apply, Finsupp.single_apply, eq_comm] theorem LinearMap.toMatrix_smulBasis_left {G} [Group G] [DistribMulAction G M₁] [SMulCommClass G R M₁] (g : G) (f : M₁ →ₗ[R] M₂) : LinearMap.toMatrix (g • v₁) v₂ f = LinearMap.toMatrix v₁ v₂ (f ∘ₗ DistribMulAction.toLinearMap _ _ g) := by rfl theorem LinearMap.toMatrix_smulBasis_right {G} [Group G] [DistribMulAction G M₂] [SMulCommClass G R M₂] (g : G) (f : M₁ →ₗ[R] M₂) : LinearMap.toMatrix v₁ (g • v₂) f = LinearMap.toMatrix v₁ v₂ (DistribMulAction.toLinearMap _ _ g⁻¹ ∘ₗ f) := by rfl end Finite variable {R : Type*} [CommSemiring R] variable {l m n : Type*} [Fintype n] [Fintype m] [DecidableEq n] variable {M₁ M₂ : Type*} [AddCommMonoid M₁] [AddCommMonoid M₂] [Module R M₁] [Module R M₂] variable (v₁ : Basis n R M₁) (v₂ : Basis m R M₂) /-- The matrix of `toSpanSingleton R M₂ x` given by bases `v₁` and `v₂` is equal to `vecMulVec (v₂.repr x) v₁`. When `v₁ = Module.Basis.singleton` then this is the column matrix of `v₂.repr x`. -/ theorem LinearMap.toMatrix_toSpanSingleton (v₁ : Basis n R R) (v₂ : Basis m R M₂) (x : M₂) : (toSpanSingleton R M₂ x).toMatrix v₁ v₂ = vecMulVec (v₂.repr x) v₁ := by ext; simp [toMatrix_apply, vecMulVec_apply, mul_comm] @[simp] lemma LinearMap.toMatrix_smulRight (f : M₁ →ₗ[R] R) (x : M₂) : toMatrix v₁ v₂ (f.smulRight x) = vecMulVec (v₂.repr x) (f ∘ v₁) := by ext i j simpa [toMatrix_apply, vecMulVec_apply] using mul_comm _ _ theorem Matrix.toLin_apply (M : Matrix m n R) (v : M₁) : Matrix.toLin v₁ v₂ M v = ∑ j, (M *ᵥ v₁.repr v) j • v₂ j := show v₂.equivFun.symm (Matrix.toLin' M (v₁.repr v)) = _ by rw [Matrix.toLin'_apply, v₂.equivFun_symm_apply] @[simp] theorem Matrix.toLin_self (M : Matrix m n R) (i : n) : Matrix.toLin v₁ v₂ M (v₁ i) = ∑ j, M j i • v₂ j := by rw [Matrix.toLin_apply, Finset.sum_congr rfl fun j _hj ↦ ?_] rw [Basis.repr_self, Matrix.mulVec, dotProduct, Finset.sum_eq_single i, Finsupp.single_eq_same, mul_one] · intro i' _ i'_ne rw [Finsupp.single_eq_of_ne i'_ne, mul_zero] · intros have := Finset.mem_univ i contradiction theorem Matrix.toLin_apply_eq_zero_iff {R M₁ M₂ : Type*} [CommRing R] [AddCommGroup M₁] [AddCommGroup M₂] [Module R M₁] [Module R M₂] {v₁ : Basis n R M₁} {v₂ : Basis m R M₂} {A : Matrix m n R} {x : M₁} : A.toLin v₁ v₂ x = 0 ↔ ∀ j, (A *ᵥ v₁.repr x) j = 0 := by rw [toLin_apply] exact ⟨Fintype.linearIndependent_iff.mp v₂.linearIndependent _, fun h ↦ by simp [h]⟩ variable {M₃ : Type*} [AddCommMonoid M₃] [Module R M₃] (v₃ : Basis l R M₃) theorem LinearMap.toMatrix_comp [Finite l] [DecidableEq m] (f : M₂ →ₗ[R] M₃) (g : M₁ →ₗ[R] M₂) : LinearMap.toMatrix v₁ v₃ (f.comp g) = LinearMap.toMatrix v₂ v₃ f * LinearMap.toMatrix v₁ v₂ g := by simp_rw [LinearMap.toMatrix, LinearEquiv.trans_apply] rw [LinearEquiv.arrowCongr_comp _ v₂.equivFun, LinearMap.toMatrix'_comp] theorem LinearMap.toMatrix_mul (f g : M₁ →ₗ[R] M₁) : LinearMap.toMatrix v₁ v₁ (f * g) = LinearMap.toMatrix v₁ v₁ f * LinearMap.toMatrix v₁ v₁ g := by rw [Module.End.mul_eq_comp, LinearMap.toMatrix_comp v₁ v₁ v₁ f g] lemma LinearMap.toMatrix_pow (f : M₁ →ₗ[R] M₁) (k : ℕ) : (toMatrix v₁ v₁ f) ^ k = toMatrix v₁ v₁ (f ^ k) := by induction k with | zero => simp | succ k ih => rw [pow_succ, pow_succ, ih, ← toMatrix_mul] theorem Matrix.toLin_mul [Finite l] [DecidableEq m] (A : Matrix l m R) (B : Matrix m n R) : Matrix.toLin v₁ v₃ (A * B) = (Matrix.toLin v₂ v₃ A).comp (Matrix.toLin v₁ v₂ B) := by apply (LinearMap.toMatrix v₁ v₃).injective haveI : DecidableEq l := fun _ _ ↦ Classical.propDecidable _ rw [LinearMap.toMatrix_comp v₁ v₂ v₃] repeat' rw [LinearMap.toMatrix_toLin] @[simp] theorem Matrix.toLin_pow (A : Matrix n n R) (k : ℕ) : (A ^ k).toLin v₁ v₁ = (A.toLin v₁ v₁) ^ k := by induction k with | zero => simp only [pow_zero, toLin_one, End.one_eq_id] | succ n ih => rw [pow_succ, pow_succ, toLin_mul v₁ v₁, ih, Module.End.mul_eq_comp] /-- Shortcut lemma for `Matrix.toLin_mul` and `LinearMap.comp_apply`. -/ theorem Matrix.toLin_mul_apply [Finite l] [DecidableEq m] (A : Matrix l m R) (B : Matrix m n R) (x) : Matrix.toLin v₁ v₃ (A * B) x = (Matrix.toLin v₂ v₃ A) (Matrix.toLin v₁ v₂ B x) := by rw [Matrix.toLin_mul v₁ v₂, LinearMap.comp_apply] /-- If `M` and `M` are each other's inverse matrices, `Matrix.toLin M` and `Matrix.toLin M'` form a linear equivalence. -/ @[simps] def Matrix.toLinOfInv [DecidableEq m] {M : Matrix m n R} {M' : Matrix n m R} (hMM' : M * M' = 1) (hM'M : M' * M = 1) : M₁ ≃ₗ[R] M₂ := { Matrix.toLin v₁ v₂ M with toFun := Matrix.toLin v₁ v₂ M invFun := Matrix.toLin v₂ v₁ M' left_inv := fun x ↦ by rw [← Matrix.toLin_mul_apply, hM'M, Matrix.toLin_one, id_apply] right_inv := fun x ↦ by rw [← Matrix.toLin_mul_apply, hMM', Matrix.toLin_one, id_apply] } /-- Given a basis of a module `M₁` over a commutative ring `R`, we get an algebra equivalence between linear maps `M₁ →ₗ M₁` and square matrices over `R` indexed by the basis. -/ def LinearMap.toMatrixAlgEquiv : (M₁ →ₗ[R] M₁) ≃ₐ[R] Matrix n n R := AlgEquiv.ofLinearEquiv (LinearMap.toMatrix v₁ v₁) (LinearMap.toMatrix_one v₁) (LinearMap.toMatrix_mul v₁) /-- Given a basis of a module `M₁` over a commutative ring `R`, we get an algebra equivalence between square matrices over `R` indexed by the basis and linear maps `M₁ →ₗ M₁`. -/ def Matrix.toLinAlgEquiv : Matrix n n R ≃ₐ[R] M₁ →ₗ[R] M₁ := (LinearMap.toMatrixAlgEquiv v₁).symm @[simp] theorem LinearMap.toMatrixAlgEquiv_symm : (LinearMap.toMatrixAlgEquiv v₁).symm = Matrix.toLinAlgEquiv v₁ := rfl @[simp] theorem Matrix.toLinAlgEquiv_symm : (Matrix.toLinAlgEquiv v₁).symm = LinearMap.toMatrixAlgEquiv v₁ := rfl @[simp] theorem Matrix.toLinAlgEquiv_toMatrixAlgEquiv (f : M₁ →ₗ[R] M₁) : Matrix.toLinAlgEquiv v₁ (LinearMap.toMatrixAlgEquiv v₁ f) = f := by rw [← Matrix.toLinAlgEquiv_symm, AlgEquiv.apply_symm_apply] @[simp] theorem LinearMap.toMatrixAlgEquiv_toLinAlgEquiv (M : Matrix n n R) : LinearMap.toMatrixAlgEquiv v₁ (Matrix.toLinAlgEquiv v₁ M) = M := by rw [← Matrix.toLinAlgEquiv_symm, AlgEquiv.symm_apply_apply] theorem LinearMap.toMatrixAlgEquiv_apply (f : M₁ →ₗ[R] M₁) (i j : n) : LinearMap.toMatrixAlgEquiv v₁ f i j = v₁.repr (f (v₁ j)) i := by simp [LinearMap.toMatrixAlgEquiv, LinearMap.toMatrix_apply] theorem LinearMap.toMatrixAlgEquiv_transpose_apply (f : M₁ →ₗ[R] M₁) (j : n) : (LinearMap.toMatrixAlgEquiv v₁ f)ᵀ j = v₁.repr (f (v₁ j)) := funext fun i ↦ f.toMatrix_apply _ _ i j theorem LinearMap.toMatrixAlgEquiv_apply' (f : M₁ →ₗ[R] M₁) (i j : n) : LinearMap.toMatrixAlgEquiv v₁ f i j = v₁.repr (f (v₁ j)) i := LinearMap.toMatrixAlgEquiv_apply v₁ f i j theorem LinearMap.toMatrixAlgEquiv_transpose_apply' (f : M₁ →ₗ[R] M₁) (j : n) : (LinearMap.toMatrixAlgEquiv v₁ f)ᵀ j = v₁.repr (f (v₁ j)) := LinearMap.toMatrixAlgEquiv_transpose_apply v₁ f j theorem Matrix.toLinAlgEquiv_apply (M : Matrix n n R) (v : M₁) : Matrix.toLinAlgEquiv v₁ M v = ∑ j, (M *ᵥ v₁.repr v) j • v₁ j := show v₁.equivFun.symm (Matrix.toLinAlgEquiv' M (v₁.repr v)) = _ by rw [Matrix.toLinAlgEquiv'_apply, v₁.equivFun_symm_apply] @[simp] theorem Matrix.toLinAlgEquiv_self (M : Matrix n n R) (i : n) : Matrix.toLinAlgEquiv v₁ M (v₁ i) = ∑ j, M j i • v₁ j := Matrix.toLin_self _ _ _ _ theorem LinearMap.toMatrixAlgEquiv_id : LinearMap.toMatrixAlgEquiv v₁ id = 1 := by simp_rw [LinearMap.toMatrixAlgEquiv, AlgEquiv.ofLinearEquiv_apply, LinearMap.toMatrix_id] theorem Matrix.toLinAlgEquiv_one : Matrix.toLinAlgEquiv v₁ 1 = LinearMap.id := by rw [← LinearMap.toMatrixAlgEquiv_id v₁, Matrix.toLinAlgEquiv_toMatrixAlgEquiv] theorem LinearMap.toMatrixAlgEquiv_reindexRange [DecidableEq M₁] (f : M₁ →ₗ[R] M₁) (k i : n) : LinearMap.toMatrixAlgEquiv v₁.reindexRange f ⟨v₁ k, Set.mem_range_self k⟩ ⟨v₁ i, Set.mem_range_self i⟩ = LinearMap.toMatrixAlgEquiv v₁ f k i := by simp_rw [LinearMap.toMatrixAlgEquiv_apply, Basis.reindexRange_self, Basis.reindexRange_repr] theorem LinearMap.toMatrixAlgEquiv_comp (f g : M₁ →ₗ[R] M₁) : LinearMap.toMatrixAlgEquiv v₁ (f.comp g) = LinearMap.toMatrixAlgEquiv v₁ f * LinearMap.toMatrixAlgEquiv v₁ g := by simp [LinearMap.toMatrixAlgEquiv, LinearMap.toMatrix_comp v₁ v₁ v₁ f g] theorem LinearMap.toMatrixAlgEquiv_mul (f g : M₁ →ₗ[R] M₁) : LinearMap.toMatrixAlgEquiv v₁ (f * g) = LinearMap.toMatrixAlgEquiv v₁ f * LinearMap.toMatrixAlgEquiv v₁ g := by rw [Module.End.mul_eq_comp, LinearMap.toMatrixAlgEquiv_comp v₁ f g] theorem Matrix.toLinAlgEquiv_mul (A B : Matrix n n R) : Matrix.toLinAlgEquiv v₁ (A * B) = (Matrix.toLinAlgEquiv v₁ A).comp (Matrix.toLinAlgEquiv v₁ B) := by convert Matrix.toLin_mul v₁ v₁ v₁ A B @[simp] theorem LinearMap.isUnit_toMatrix_iff {f : M₁ →ₗ[R] M₁} : IsUnit (f.toMatrix v₁ v₁) ↔ IsUnit f := isUnit_map_iff (LinearMap.toMatrixAlgEquiv _) f @[simp] theorem Matrix.isUnit_toLin_iff {M : Matrix n n R} : IsUnit (M.toLin v₁ v₁) ↔ IsUnit M := isUnit_map_iff (LinearMap.toMatrixAlgEquiv _).symm M @[simp] theorem Matrix.toLin_finTwoProd_apply (a b c d : R) (x : R × R) : Matrix.toLin (Basis.finTwoProd R) (Basis.finTwoProd R) !![a, b; c, d] x = (a * x.fst + b * x.snd, c * x.fst + d * x.snd) := by simp [Matrix.toLin_apply, Matrix.mulVec, dotProduct] theorem Matrix.toLin_finTwoProd (a b c d : R) : Matrix.toLin (Basis.finTwoProd R) (Basis.finTwoProd R) !![a, b; c, d] = (a • LinearMap.fst R R R + b • LinearMap.snd R R R).prod (c • LinearMap.fst R R R + d • LinearMap.snd R R R) := LinearMap.ext <| Matrix.toLin_finTwoProd_apply _ _ _ _ @[simp] theorem toMatrix_distrib_mul_action_toLinearMap (x : R) : LinearMap.toMatrix v₁ v₁ (DistribMulAction.toLinearMap R M₁ x) = Matrix.diagonal fun _ ↦ x := by ext rw [LinearMap.toMatrix_apply, DistribMulAction.toLinearMap_apply, LinearEquiv.map_smul, Basis.repr_self, Finsupp.smul_single_one, Finsupp.single_eq_pi_single, Matrix.diagonal_apply, Pi.single_apply] lemma LinearMap.toMatrix_prodMap [DecidableEq m] [DecidableEq (n ⊕ m)] (φ₁ : Module.End R M₁) (φ₂ : Module.End R M₂) : toMatrix (v₁.prod v₂) (v₁.prod v₂) (φ₁.prodMap φ₂) = Matrix.fromBlocks (toMatrix v₁ v₁ φ₁) 0 0 (toMatrix v₂ v₂ φ₂) := by ext (i | i) (j | j) <;> simp [toMatrix] end ToMatrix namespace Algebra section Lmul variable {R S : Type*} [CommSemiring R] [Semiring S] [Algebra R S] variable {m : Type*} [Fintype m] [DecidableEq m] (b : Basis m R S) theorem toMatrix_lmul' (x : S) (i j) : LinearMap.toMatrix b b (lmul R S x) i j = b.repr (x * b j) i := by simp only [LinearMap.toMatrix_apply', coe_lmul_eq_mul, LinearMap.mul_apply'] @[simp] theorem toMatrix_lsmul (x : R) : LinearMap.toMatrix b b (Algebra.lsmul R R S x) = Matrix.diagonal fun _ ↦ x := toMatrix_distrib_mul_action_toLinearMap b x /-- `leftMulMatrix b x` is the matrix corresponding to the linear map `fun y ↦ x * y`. `leftMulMatrix_eq_repr_mul` gives a formula for the entries of `leftMulMatrix`. This definition is useful for doing (more) explicit computations with `LinearMap.mulLeft`, such as the trace form or norm map for algebras. -/ noncomputable def leftMulMatrix : S →ₐ[R] Matrix m m R where toFun x := LinearMap.toMatrix b b (Algebra.lmul R S x) map_zero' := by rw [map_zero, LinearEquiv.map_zero] map_one' := by rw [map_one, LinearMap.toMatrix_one] map_add' x y := by rw [map_add, LinearEquiv.map_add] map_mul' x y := by rw [map_mul, LinearMap.toMatrix_mul] commutes' r := by ext rw [lmul_algebraMap, toMatrix_lsmul, algebraMap_eq_diagonal, Pi.algebraMap_def, Algebra.algebraMap_self_apply] theorem leftMulMatrix_apply (x : S) : leftMulMatrix b x = LinearMap.toMatrix b b (lmul R S x) := rfl theorem leftMulMatrix_eq_repr_mul (x : S) (i j) : leftMulMatrix b x i j = b.repr (x * b j) i := by -- This is defeq to just `toMatrix_lmul' b x i j`, -- but the unfolding goes a lot faster with this explicit `rw`. rw [leftMulMatrix_apply, toMatrix_lmul' b x i j] theorem leftMulMatrix_mulVec_repr (x y : S) : leftMulMatrix b x *ᵥ b.repr y = b.repr (x * y) := (LinearMap.mulLeft R x).toMatrix_mulVec_repr b b y @[simp] theorem toMatrix_lmul_eq (x : S) : LinearMap.toMatrix b b (LinearMap.mulLeft R x) = leftMulMatrix b x := rfl theorem leftMulMatrix_injective : Function.Injective (leftMulMatrix b) := fun x x' h ↦ calc x = Algebra.lmul R S x 1 := (mul_one x).symm _ = Algebra.lmul R S x' 1 := by rw [(LinearMap.toMatrix b b).injective h] _ = x' := mul_one x' @[simp] theorem smul_leftMulMatrix {G} [Group G] [DistribMulAction G S] [SMulCommClass G R S] [SMulCommClass G S S] (g : G) (x) : leftMulMatrix (g • b) x = leftMulMatrix b x := by ext simp_rw [leftMulMatrix_apply, LinearMap.toMatrix_apply, coe_lmul_eq_mul, LinearMap.mul_apply', Basis.repr_smul, Basis.smul_apply, LinearEquiv.trans_apply, DistribMulAction.toLinearEquiv_symm_apply, mul_smul_comm, inv_smul_smul] variable {A M n : Type*} [Fintype n] [DecidableEq n] [CommSemiring A] [AddCommMonoid M] [Module R M] [Module A M] [Algebra R A] [IsScalarTower R A M] (bA : Basis m R A) (bM : Basis n A M) lemma _root_.LinearMap.restrictScalars_toMatrix (f : M →ₗ[A] M) : (f.restrictScalars R).toMatrix (bA.smulTower' bM) (bA.smulTower' bM) = ((f.toMatrix bM bM).map (leftMulMatrix bA)).comp _ _ _ _ _ := by ext; simp [toMatrix, Algebra.leftMulMatrix_apply, Basis.smulTower'_repr, Basis.smulTower'_apply, mul_comm] end Lmul section LmulTower variable {R S T : Type*} [CommSemiring R] [CommSemiring S] [Semiring T] variable [Algebra R S] [Algebra S T] [Algebra R T] [IsScalarTower R S T] variable {m n : Type*} [Fintype m] [Fintype n] [DecidableEq m] [DecidableEq n] variable (b : Basis m R S) (c : Basis n S T) theorem smulTower_leftMulMatrix (x) (ik jk) : leftMulMatrix (b.smulTower c) x ik jk = leftMulMatrix b (leftMulMatrix c x ik.2 jk.2) ik.1 jk.1 := by simp only [leftMulMatrix_apply, LinearMap.toMatrix_apply, mul_comm, Basis.smulTower_apply, Basis.smulTower_repr, Finsupp.smul_apply, id.smul_eq_mul, LinearEquiv.map_smul, mul_smul_comm, coe_lmul_eq_mul, LinearMap.mul_apply'] theorem smulTower_leftMulMatrix_algebraMap (x : S) : leftMulMatrix (b.smulTower c) (algebraMap _ _ x) = blockDiagonal fun _ ↦ leftMulMatrix b x := by ext ⟨i, k⟩ ⟨j, k'⟩ rw [smulTower_leftMulMatrix, AlgHom.commutes, blockDiagonal_apply, algebraMap_matrix_apply] split_ifs with h <;> simp only at h <;> simp theorem smulTower_leftMulMatrix_algebraMap_eq (x : S) (i j k) : leftMulMatrix (b.smulTower c) (algebraMap _ _ x) (i, k) (j, k) = leftMulMatrix b x i j := by rw [smulTower_leftMulMatrix_algebraMap, blockDiagonal_apply_eq] theorem smulTower_leftMulMatrix_algebraMap_ne (x : S) (i j) {k k'} (h : k ≠ k') : leftMulMatrix (b.smulTower c) (algebraMap _ _ x) (i, k) (j, k') = 0 := by rw [smulTower_leftMulMatrix_algebraMap, blockDiagonal_apply_ne _ _ _ h] end LmulTower end Algebra section variable {R S : Type*} [CommSemiring R] {n : Type*} [DecidableEq n] variable {M M₁ M₂ : Type*} [AddCommMonoid M] [Module R M] variable [AddCommMonoid M₁] [Module R M₁] [AddCommMonoid M₂] [Module R M₂] variable [Semiring S] [Module S M₁] [Module S M₂] [SMulCommClass S R M₁] [SMulCommClass S R M₂] variable [SMul R S] [IsScalarTower R S M₁] [IsScalarTower R S M₂] /-- The natural equivalence between linear endomorphisms of finite free modules and square matrices is compatible with the algebra structures. -/ def algEquivMatrix' [Fintype n] : Module.End R (n → R) ≃ₐ[R] Matrix n n R := LinearMap.toMatrixAlgEquiv' variable (R) in /-- A linear equivalence of two modules induces an equivalence of algebras of their endomorphisms. -/ @[simps!] def LinearEquiv.algConj (e : M₁ ≃ₗ[S] M₂) : Module.End S M₁ ≃ₐ[R] Module.End S M₂ where __ := e.conjRingEquiv commutes' := fun _ ↦ by ext; change e.restrictScalars R _ = _; simp /-- A basis of a module induces an equivalence of algebras from the endomorphisms of the module to square matrices. -/ def algEquivMatrix [Fintype n] (h : Basis n R M) : Module.End R M ≃ₐ[R] Matrix n n R := (h.equivFun.algConj R).trans algEquivMatrix' end namespace Module.Basis variable {R M M₁ M₂ ι ι₁ ι₂ : Type*} [CommSemiring R] variable [AddCommMonoid M] [AddCommMonoid M₁] [AddCommMonoid M₂] variable [Module R M] [Module R M₁] [Module R M₂] variable [Fintype ι] [Fintype ι₁] [Fintype ι₂] variable [DecidableEq ι] [DecidableEq ι₁] variable (b : Basis ι R M) (b₁ : Basis ι₁ R M₁) (b₂ : Basis ι₂ R M₂) /-- The standard basis of the space linear maps between two modules induced by a basis of the domain and codomain. If `M₁` and `M₂` are modules with basis `b₁` and `b₂` respectively indexed by finite types `ι₁` and `ι₂`, then `Basis.linearMap b₁ b₂` is the basis of `M₁ →ₗ[R] M₂` indexed by `ι₂ × ι₁` where `(i, j)` indexes the linear map that sends `b j` to `b i` and sends all other basis vectors to `0`. -/ @[simps! -isSimp repr_apply repr_symm_apply] noncomputable def linearMap (b₁ : Basis ι₁ R M₁) (b₂ : Basis ι₂ R M₂) : Basis (ι₂ × ι₁) R (M₁ →ₗ[R] M₂) := (Matrix.stdBasis R ι₂ ι₁).map (LinearMap.toMatrix b₁ b₂).symm attribute [simp] linearMap_repr_apply lemma linearMap_apply (ij : ι₂ × ι₁) : (b₁.linearMap b₂ ij) = (Matrix.toLin b₁ b₂) (Matrix.stdBasis R ι₂ ι₁ ij) := by simp [linearMap] lemma linearMap_apply_apply (ij : ι₂ × ι₁) (k : ι₁) : (b₁.linearMap b₂ ij) (b₁ k) = if ij.2 = k then b₂ ij.1 else 0 := by have := Classical.decEq ι₂ rw [linearMap_apply, Matrix.stdBasis_eq_single, Matrix.toLin_self] dsimp only [Matrix.single, of_apply] simp_rw [ite_smul, one_smul, zero_smul, ite_and, Finset.sum_ite_eq, Finset.mem_univ, if_true] /-- The standard basis of the endomorphism algebra of a module induced by a basis of the module. If `M` is a module with basis `b` indexed by a finite type `ι`, then `Basis.end b` is the basis of `Module.End R M` indexed by `ι × ι` where `(i, j)` indexes the linear map that sends `b j` to `b i` and sends all other basis vectors to `0`. -/ @[simps! -isSimp repr_apply repr_symm_apply] noncomputable abbrev «end» (b : Basis ι R M) : Basis (ι × ι) R (Module.End R M) := b.linearMap b attribute [simp] end_repr_apply lemma end_apply (ij : ι × ι) : (b.end ij) = (Matrix.toLin b b) (Matrix.stdBasis R ι ι ij) := linearMap_apply b b ij lemma end_apply_apply (ij : ι × ι) (k : ι) : (b.end ij) (b k) = if ij.2 = k then b ij.1 else 0 := linearMap_apply_apply b b ij k end Module.Basis section variable (ι : Type*) [Fintype ι] [DecidableEq ι] variable (R : Type*) [CommSemiring R] variable (A : Type*) [Semiring A] [Algebra R A] variable (M : Type*) [AddCommMonoid M] [Module R M] [Module A M] [IsScalarTower R A M] /-- Let `M` be an `A`-module. Every `A`-linear map `Mⁿ → Mⁿ` corresponds to a `n×n`-matrix whose entries are `A`-linear maps `M → M`. In another word, we have`End(Mⁿ) ≅ Matₙₓₙ(End(M))` defined by: `(f : Mⁿ → Mⁿ) ↦ (x ↦ f (0, ..., x at j-th position, ..., 0) i)ᵢⱼ` and `m : Matₙₓₙ(End(M)) ↦ (v ↦ ∑ⱼ mᵢⱼ(vⱼ))`. See also `LinearMap.toMatrix'` -/ @[simp] def endVecRingEquivMatrixEnd : Module.End A (ι → M) ≃+* Matrix ι ι (Module.End A M) where toFun f i j := { toFun := fun x ↦ f (Pi.single j x) i map_add' := fun x y ↦ by simp [Pi.single_add] map_smul' := fun x y ↦ by simp [Pi.single_smul] } invFun m := { toFun := fun x i ↦ ∑ j, m i j (x j) map_add' := by intros; ext; simp [Finset.sum_add_distrib] map_smul' := by intros; ext; simp [Finset.smul_sum] } left_inv f := by ext i x j simp only [LinearMap.coe_mk, AddHom.coe_mk, coe_comp, coe_single, Function.comp_apply] rw [← Fintype.sum_apply, ← map_sum] exact congr_arg₂ _ (by aesop) rfl right_inv m := by ext; simp [Pi.single_apply, apply_ite] map_mul' f g := by ext simp only [Module.End.mul_apply, LinearMap.coe_mk, AddHom.coe_mk, Matrix.mul_apply, coeFn_sum, Finset.sum_apply] rw [← Fintype.sum_apply, ← map_sum] exact congr_arg₂ _ (by aesop) rfl map_add' f g := by ext; simp /-- Let `M` be an `A`-module. Every `A`-linear map `Mⁿ → Mⁿ` corresponds to a `n×n`-matrix whose entries are `R`-linear maps `M → M`. In another word, we have`End(Mⁿ) ≅ Matₙₓₙ(End(M))` defined by: `(f : Mⁿ → Mⁿ) ↦ (x ↦ f (0, ..., x at j-th position, ..., 0) i)ᵢⱼ` and `m : Matₙₓₙ(End(M)) ↦ (v ↦ ∑ⱼ mᵢⱼ(vⱼ))`. See also `LinearMap.toMatrix'` -/ @[simps!] def endVecAlgEquivMatrixEnd : Module.End A (ι → M) ≃ₐ[R] Matrix ι ι (Module.End A M) where __ := endVecRingEquivMatrixEnd ι A M commutes' r := by ext simp only [endVecRingEquivMatrixEnd, RingEquiv.toEquiv_eq_coe, Module.algebraMap_end_eq_smul_id, Equiv.toFun_as_coe, EquivLike.coe_coe, RingEquiv.coe_mk, Equiv.coe_fn_mk, LinearMap.smul_apply, id_coe, id_eq, Pi.smul_apply, Pi.single_apply, smul_ite, smul_zero, LinearMap.coe_mk, AddHom.coe_mk, algebraMap_matrix_apply] split_ifs <;> rfl end
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Ideal.lean
import Mathlib.Data.Matrix.Basis import Mathlib.GroupTheory.Congruence.BigOperators import Mathlib.RingTheory.Ideal.Lattice import Mathlib.RingTheory.TwoSidedIdeal.Operations import Mathlib.RingTheory.Jacobson.Ideal /-! # Ideals in a matrix ring This file defines left (resp. two-sided) ideals in a matrix semiring (resp. ring) over left (resp. two-sided) ideals in the base semiring (resp. ring). We also characterize Jacobson radicals of ideals in such rings. ## Main results * `TwoSidedIdeal.equivMatrix` and `TwoSidedIdeal.orderIsoMatrix` establish an order isomorphism between two-sided ideals in $R$ and those in $Mₙ(R)$. * `TwoSidedIdeal.jacobson_matrix` shows that $J(Mₙ(I)) = Mₙ(J(I))$ for any two-sided ideal $I ≤ R$. -/ /-! ### Left ideals in a matrix semiring -/ namespace Ideal open Matrix variable {R : Type*} [Semiring R] (n : Type*) [Fintype n] [DecidableEq n] /-- The left ideal of matrices with entries in `I ≤ R`. -/ def matrix (I : Ideal R) : Ideal (Matrix n n R) where __ := I.toAddSubmonoid.matrix smul_mem' M N hN := by intro i j rw [smul_eq_mul, mul_apply] apply sum_mem intro k _ apply I.mul_mem_left _ (hN k j) @[deprecated (since := "2025-07-28")] alias matricesOver := matrix @[simp] theorem mem_matrix (I : Ideal R) (M : Matrix n n R) : M ∈ I.matrix n ↔ ∀ i j, M i j ∈ I := by rfl @[deprecated (since := "2025-07-28")] alias mem_matricesOver := mem_matrix theorem matrix_monotone : Monotone (matrix (R := R) n) := fun _ _ IJ _ MI i j => IJ (MI i j) @[deprecated (since := "2025-07-28")] alias matricesOver_monotone := matrix_monotone theorem matrix_strictMono_of_nonempty [Nonempty n] : StrictMono (matrix (R := R) n) := matrix_monotone n |>.strictMono_of_injective <| fun I J eq => by ext x have : (∀ _ _, x ∈ I) ↔ (∀ _ _, x ∈ J) := congr((Matrix.of fun _ _ => x) ∈ $eq) simpa only [forall_const] using this @[deprecated (since := "2025-07-28")] alias matricesOver_strictMono_of_nonempty := matrix_strictMono_of_nonempty @[simp] theorem matrix_bot : (⊥ : Ideal R).matrix n = ⊥ := by ext M simp only [mem_matrix, mem_bot] constructor · intro H; ext; apply H · intro H; simp [H] @[deprecated (since := "2025-07-28")] alias matricesOver_bot := matrix_bot @[simp] theorem matrix_top : (⊤ : Ideal R).matrix n = ⊤ := by ext; simp @[deprecated (since := "2025-07-28")] alias matricesOver_top := matrix_top end Ideal /-! ### Jacobson radicals of left ideals in a matrix ring -/ namespace Ideal open Matrix variable {R : Type*} [Ring R] {n : Type*} [Fintype n] [DecidableEq n] /-- A standard basis matrix is in $J(Mₙ(I))$ as long as its one possibly non-zero entry is in $J(I)$. -/ theorem single_mem_jacobson_matrix (I : Ideal R) : ∀ x ∈ I.jacobson, ∀ (i j : n), single i j x ∈ (I.matrix n).jacobson := by -- Proof generalized from example 8 in -- https://ysharifi.wordpress.com/2022/08/16/the-jacobson-radical-basic-examples/ simp_rw [Ideal.mem_jacobson_iff] intro x xIJ p q M have ⟨z, zMx⟩ := xIJ (M q p) let N : Matrix n n R := 1 - ∑ i, single i q (if i = q then 1 - z else (M i p) * x * z) use N intro i j obtain rfl | qj := eq_or_ne q j · by_cases iq : i = q · simp [iq, N, zMx, single, mul_apply, sum_apply, ite_and, sub_mul] · convert I.mul_mem_left (-M i p * x) zMx simp [iq, N, single, mul_apply, sum_apply, ite_and, sub_mul] simp [sub_add, mul_add, mul_sub, mul_assoc] · simp [N, qj, sum_apply, mul_apply] @[deprecated (since := "2025-05-05")] alias stdBasisMatrix_mem_jacobson_matricesOver := single_mem_jacobson_matrix @[deprecated (since := "2025-07-28")] alias single_mem_jacobson_matricesOver := single_mem_jacobson_matrix /-- For any left ideal $I ≤ R$, we have $Mₙ(J(I)) ≤ J(Mₙ(I))$. -/ theorem matrix_jacobson_le (I : Ideal R) : I.jacobson.matrix n ≤ (I.matrix n).jacobson := by intro M MI rw [matrix_eq_sum_single M] apply sum_mem intro i _ apply sum_mem intro j _ apply single_mem_jacobson_matrix I _ (MI i j) @[deprecated (since := "2025-07-28")] alias matricesOver_jacobson_le := matrix_jacobson_le end Ideal /-! ### Two-sided ideals in a matrix ring -/ namespace RingCon variable {R n : Type*} section NonUnitalNonAssocSemiring variable [NonUnitalNonAssocSemiring R] [Fintype n] variable (n) /-- The ring congruence of matrices with entries related by `c`. -/ def matrix (c : RingCon R) : RingCon (Matrix n n R) where r M N := ∀ i j, c (M i j) (N i j) -- note: kept `fun` to distinguish `RingCon`'s binders from `r`'s binders. iseqv.refl _ := fun _ _ ↦ c.refl _ iseqv.symm h := fun _ _ ↦ c.symm <| h _ _ iseqv.trans h₁ h₂ := fun _ _ ↦ c.trans (h₁ _ _) (h₂ _ _) add' h₁ h₂ := fun _ _ ↦ c.add (h₁ _ _) (h₂ _ _) mul' h₁ h₂ := fun _ _ ↦ c.finset_sum _ fun _ _ => c.mul (h₁ _ _) (h₂ _ _) @[simp low] theorem matrix_apply {c : RingCon R} {M N : Matrix n n R} : c.matrix n M N ↔ ∀ i j, c (M i j) (N i j) := Iff.rfl @[simp] theorem matrix_apply_single [DecidableEq n] {c : RingCon R} {i j : n} {x y : R} : c.matrix n (Matrix.single i j x) (Matrix.single i j y) ↔ c x y := by refine ⟨fun h ↦ by simpa using h i j, fun h i' j' ↦ ?_⟩ obtain hi | rfl := ne_or_eq i i' · simpa [hi] using c.refl 0 obtain hj | rfl := ne_or_eq j j' · simpa [hj] using c.refl _ simpa using h @[deprecated (since := "2025-05-05")] alias matrix_apply_stdBasisMatrix := matrix_apply_single theorem matrix_monotone : Monotone (matrix (R := R) n) := fun _ _ hc _ _ h _ _ ↦ hc (h _ _) theorem matrix_injective [Nonempty n] : Function.Injective (matrix (R := R) n) := fun I J eq ↦ RingCon.ext fun r s ↦ by have := congr_fun (DFunLike.congr_fun eq (Matrix.of fun _ _ ↦ r)) (Matrix.of fun _ _ ↦ s) simpa using this theorem matrix_strictMono_of_nonempty [Nonempty n] : StrictMono (matrix (R := R) n) := matrix_monotone n |>.strictMono_of_injective <| matrix_injective _ @[simp] theorem matrix_bot : (⊥ : RingCon R).matrix n = ⊥ := eq_bot_iff.2 fun _ _ h ↦ Matrix.ext h @[simp] theorem matrix_top : (⊤ : RingCon R).matrix n = ⊤ := eq_top_iff.2 fun _ _ _ _ _ ↦ by simp open Matrix variable {n} /-- The congruence relation induced by `c` on `single i j`. -/ def ofMatrix [DecidableEq n] (c : RingCon (Matrix n n R)) : RingCon R where r x y := ∀ i j, c (single i j x) (single i j y) iseqv.refl _ := fun _ _ ↦ c.refl _ iseqv.symm h := fun _ _ ↦ c.symm <| h _ _ iseqv.trans h₁ h₂ := fun _ _ ↦ c.trans (h₁ _ _) (h₂ _ _) add' h₁ h₂ := fun _ _ ↦ by simpa [single_add] using c.add (h₁ _ _) (h₂ _ _) mul' h₁ h₂ := fun i j ↦ by simpa using c.mul (h₁ i i) (h₂ i j) @[simp] theorem ofMatrix_rel [DecidableEq n] {c : RingCon (Matrix n n R)} {x y : R} : ofMatrix c x y ↔ ∀ i j, c (single i j x) (single i j y) := Iff.rfl @[simp] theorem ofMatrix_matrix [DecidableEq n] [Nonempty n] (c : RingCon R) : ofMatrix (matrix n c) = c := by ext x y classical constructor · intro h inhabit n simpa using h default default default default · intro h i j rwa [matrix_apply_single] end NonUnitalNonAssocSemiring section NonAssocSemiring variable [NonAssocSemiring R] [Fintype n] open Matrix /-- Note that this does not apply to a non-unital ring, with counterexample where the elementwise congruence relation `!![⊤,⊤;⊤,(· ≡ · [PMOD 4])]` is a ring congruence over `Matrix (Fin 2) (Fin 2) 2ℤ`. -/ @[simp] theorem matrix_ofMatrix [DecidableEq n] (c : RingCon (Matrix n n R)) : matrix n (ofMatrix c) = c := by ext x y classical constructor · intro h rw [matrix_eq_sum_single x, matrix_eq_sum_single y] refine c.finset_sum _ fun i _ ↦ c.finset_sum _ fun j _ ↦ h i j i j · intro h i' j' i j simpa using c.mul (c.mul (c.refl <| single i i' 1) h) (c.refl <| single j' j 1) /-- A version of `ofMatrix_rel` for a single matrix index, rather than all indices. -/ theorem ofMatrix_rel' [DecidableEq n] {c : RingCon (Matrix n n R)} {x y : R} (i j : n) : ofMatrix c x y ↔ c (single i j x) (single i j y) := by refine ⟨fun h ↦ h i j, fun h i' j' ↦ ?_⟩ simpa using c.mul (c.mul (c.refl <| single i' i 1) h) (c.refl <| single j j' 1) theorem coe_ofMatrix_eq_relationMap [DecidableEq n] {c : RingCon (Matrix n n R)} (i j : n) : ⇑(ofMatrix c) = Relation.Map c (· i j) (· i j) := by ext x y constructor · intro h refine ⟨_,_, h i j, ?_⟩ simp · rintro ⟨X, Y, h, rfl, rfl⟩ i' j' simpa using c.mul (c.mul (c.refl <| single i' i 1) h) (c.refl <| single j j' 1) end NonAssocSemiring end RingCon namespace TwoSidedIdeal open Matrix variable {R : Type*} (n : Type*) section NonUnitalNonAssocRing variable [NonUnitalNonAssocRing R] [Fintype n] /-- The two-sided ideal of matrices with entries in `I ≤ R`. -/ @[simps] def matrix (I : TwoSidedIdeal R) : TwoSidedIdeal (Matrix n n R) where ringCon := I.ringCon.matrix n @[deprecated (since := "2025-07-28")] alias matricesOver := matrix @[simp] lemma mem_matrix (I : TwoSidedIdeal R) (M : Matrix n n R) : M ∈ I.matrix n ↔ ∀ i j, M i j ∈ I := Iff.rfl @[deprecated (since := "2025-07-28")] alias mem_matricesOver := mem_matrix theorem matrix_monotone : Monotone (matrix (R := R) n) := fun _ _ IJ _ MI i j => IJ (MI i j) @[deprecated (since := "2025-07-28")] alias matricesOver_monotone := matrix_monotone theorem matrix_strictMono_of_nonempty [h : Nonempty n] : StrictMono (matrix (R := R) n) := matrix_monotone n |>.strictMono_of_injective <| .comp (fun _ _ => mk.inj) <| (RingCon.matrix_injective n).comp ringCon_injective @[deprecated (since := "2025-07-28")] alias matricesOver_strictMono_of_nonempty := matrix_strictMono_of_nonempty @[simp] theorem matrix_bot : (⊥ : TwoSidedIdeal R).matrix n = ⊥ := ringCon_injective <| RingCon.matrix_bot _ @[deprecated (since := "2025-07-28")] alias matricesOver_bot := matrix_bot @[simp] theorem matrix_top : (⊤ : TwoSidedIdeal R).matrix n = ⊤ := ringCon_injective <| RingCon.matrix_top _ @[deprecated (since := "2025-07-28")] alias matricesOver_top := matrix_top end NonUnitalNonAssocRing section NonAssocRing variable [NonAssocRing R] [Fintype n] [Nonempty n] [DecidableEq n] variable {n} /-- Two-sided ideals in $R$ correspond bijectively to those in $Mₙ(R)$. Given an ideal $I ≤ R$, we send it to $Mₙ(I)$. Given an ideal $J ≤ Mₙ(R)$, we send it to $\{Nᵢⱼ ∣ ∃ N ∈ J\}$. -/ @[simps] def equivMatrix [Nonempty n] [DecidableEq n] : TwoSidedIdeal R ≃ TwoSidedIdeal (Matrix n n R) where toFun I := I.matrix n invFun J := { ringCon := J.ringCon.ofMatrix } right_inv _ := ringCon_injective <| RingCon.matrix_ofMatrix _ left_inv _ := ringCon_injective <| RingCon.ofMatrix_matrix _ @[deprecated (since := "2025-07-28")] alias equivMatricesOver := equivMatrix theorem coe_equivMatrix_symm_apply (I : TwoSidedIdeal (Matrix n n R)) (i j : n) : equivMatrix.symm I = {N i j | N ∈ I} := by ext r constructor · intro h exact ⟨single i j r, by simpa using h i j, by simp⟩ · rintro ⟨n, hn, rfl⟩ rw [SetLike.mem_coe, mem_iff, equivMatrix_symm_apply_ringCon, RingCon.coe_ofMatrix_eq_relationMap i j] exact ⟨n, 0, (I.mem_iff n).mp hn, rfl, rfl⟩ @[deprecated (since := "2025-07-28")] alias coe_equivMatricesOver_symm_apply := coe_equivMatrix_symm_apply /-- Two-sided ideals in $R$ are order-isomorphic with those in $Mₙ(R)$. See also `equivMatrix`. -/ @[simps!] def orderIsoMatrix : TwoSidedIdeal R ≃o TwoSidedIdeal (Matrix n n R) where __ := equivMatrix map_rel_iff' {I J} := by simp only [equivMatrix_apply] constructor · intro le x xI specialize @le (of fun _ _ => x) (by simp [xI]) simpa using le · intro IJ M MI i j exact IJ <| MI i j @[deprecated (since := "2025-07-28")] alias orderIsoMatricesOver := orderIsoMatrix end NonAssocRing section Ring variable [Ring R] [Fintype n] theorem asIdeal_matrix [DecidableEq n] (I : TwoSidedIdeal R) : asIdeal (I.matrix n) = (asIdeal I).matrix n := by ext; simp @[deprecated (since := "2025-07-28")] alias asIdeal_matricesOver := asIdeal_matrix end Ring end TwoSidedIdeal /-! ### Jacobson radicals of two-sided ideals in a matrix ring -/ namespace TwoSidedIdeal open Matrix variable {R : Type*} [Ring R] {n : Type*} [Fintype n] [DecidableEq n] private lemma jacobson_matrix_le (I : TwoSidedIdeal R) : (I.matrix n).jacobson ≤ I.jacobson.matrix n := by -- Proof generalized from example 8 in -- https://ysharifi.wordpress.com/2022/08/16/the-jacobson-radical-basic-examples/ intro M Mmem p q simp only [zero_apply, ← mem_iff] rw [mem_jacobson_iff] replace Mmem := mul_mem_right _ _ (single q p 1) Mmem rw [mem_jacobson_iff] at Mmem intro y specialize Mmem (y • single p p 1) have ⟨N, NxMI⟩ := Mmem use N p p simpa [mul_apply, single, ite_and] using NxMI p p @[deprecated (since := "2025-07-28")] alias jacobson_matricesOver_le := jacobson_matrix_le /-- For any two-sided ideal $I ≤ R$, we have $J(Mₙ(I)) = Mₙ(J(I))$. -/ theorem jacobson_matrix (I : TwoSidedIdeal R) : (I.matrix n).jacobson = I.jacobson.matrix n := by apply le_antisymm · apply jacobson_matrix_le · change asIdeal (I.matrix n).jacobson ≥ asIdeal (I.jacobson.matrix n) simp [asIdeal_jacobson, asIdeal_matrix, Ideal.matrix_jacobson_le] @[deprecated (since := "2025-07-28")] alias jacobson_matricesOver := jacobson_matrix theorem matrix_jacobson_bot : (⊥ : TwoSidedIdeal R).jacobson.matrix n = (⊥ : TwoSidedIdeal (Matrix n n R)).jacobson := matrix_bot n (R := R) ▸ (jacobson_matrix _).symm @[deprecated (since := "2025-07-28")] alias matricesOver_jacobson_bot := matrix_jacobson_bot end TwoSidedIdeal
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Unique.lean
import Mathlib.Data.Matrix.Basic /-! # One by one matrices This file proves that one by one matrices over a base are equivalent to the base itself under the canonical map that sends a one by one matrix `!![a]` to `a`. ## Main results - `Matrix.uniqueRingEquiv` - `Matrix.uniqueAlgEquiv` ## Tags Matrix, Unique, AlgEquiv -/ namespace Matrix variable {m n A R : Type*} [Unique m] [Unique n] /-- The isomorphism between the type of all one by one matrices and the base type. -/ @[simps] def uniqueEquiv : Matrix m n A ≃ A where toFun M := M default default invFun a := .of fun _ _ => a left_inv M := by ext i j; simp [Subsingleton.elim i default, Subsingleton.elim j default] right_inv a := by simp /-- The obvious additive isomorphism between M₁(A) and A, if A has an addition. -/ @[simps!] def uniqueAddEquiv [Add A] : Matrix m n A ≃+ A where __ := uniqueEquiv map_add' := by simp /-- `M₁(A)` is linearly equivalent to `A` as an `R`-module where `R` is a semiring. -/ @[simps] def uniqueLinearEquiv [Semiring R] [AddCommMonoid A] [Module R A] : Matrix m n A ≃ₗ[R] A where __ := uniqueAddEquiv map_smul' := by simp /-- `M₁(A)` and `A` are equivalent as rings. -/ @[simps!] def uniqueRingEquiv [NonUnitalNonAssocSemiring A] : Matrix m m A ≃+* A where __ := uniqueAddEquiv map_mul' := by simp [mul_apply] /-- `M₁(A)` is equivalent to `A` as an `R`-algebra. -/ @[simps!] def uniqueAlgEquiv [Semiring A] [CommSemiring R] [Algebra R A] : Matrix m m A ≃ₐ[R] A where __ := uniqueRingEquiv commutes' r := by aesop end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/SemiringInverse.lean
import Mathlib.Algebra.Group.Embedding import Mathlib.Data.Matrix.Mul import Mathlib.GroupTheory.Perm.Sign /-! # Nonsingular inverses over semirings This file proves `A * B = 1 ↔ B * A = 1` for square matrices over a commutative semiring. -/ open Equiv Equiv.Perm Finset variable {n m R : Type*} [Fintype m] [Fintype n] [DecidableEq m] [DecidableEq n] [CommSemiring R] variable (s : ℤˣ) (A B : Matrix n n R) (i j : n) namespace Matrix /-- The determinant, but only the terms of a given sign. -/ def detp : R := ∑ σ ∈ ofSign s, ∏ k, A k (σ k) @[simp] lemma detp_one_one : detp 1 (1 : Matrix n n R) = 1 := by rw [detp, sum_eq_single_of_mem 1] · simp [one_apply] · simp [ofSign] · rintro σ - hσ1 obtain ⟨i, hi⟩ := not_forall.mp (mt Perm.ext_iff.mpr hσ1) exact prod_eq_zero (mem_univ i) (one_apply_ne' hi) @[simp] lemma detp_neg_one_one : detp (-1) (1 : Matrix n n R) = 0 := by rw [detp, sum_eq_zero] intro σ hσ have hσ1 : σ ≠ 1 := by contrapose! hσ rw [hσ, mem_ofSign, sign_one] decide obtain ⟨i, hi⟩ := not_forall.mp (mt Perm.ext_iff.mpr hσ1) exact prod_eq_zero (mem_univ i) (one_apply_ne' hi) /-- The adjugate matrix, but only the terms of a given sign. -/ def adjp : Matrix n n R := of fun i j ↦ ∑ σ ∈ (ofSign s).filter (· j = i), ∏ k ∈ {j}ᶜ, A k (σ k) lemma adjp_apply (i j : n) : adjp s A i j = ∑ σ ∈ (ofSign s).filter (· j = i), ∏ k ∈ {j}ᶜ, A k (σ k) := rfl theorem detp_mul : detp 1 (A * B) + (detp 1 A * detp (-1) B + detp (-1) A * detp 1 B) = detp (-1) (A * B) + (detp 1 A * detp 1 B + detp (-1) A * detp (-1) B) := by have hf {s t} {σ : Perm n} (hσ : σ ∈ ofSign s) : ofSign (t * s) = (ofSign t).map (mulRightEmbedding σ) := by ext τ simp_rw [mem_map, mulRightEmbedding_apply, ← eq_mul_inv_iff_mul_eq, exists_eq_right, mem_ofSign, map_mul, map_inv, mul_inv_eq_iff_eq_mul, mem_ofSign.mp hσ] have h {s t} : detp s A * detp t B = ∑ σ ∈ ofSign s, ∑ τ ∈ ofSign (t * s), ∏ k, A k (σ k) * B (σ k) (τ k) := by simp_rw [detp, sum_mul_sum, prod_mul_distrib] refine sum_congr rfl fun σ hσ ↦ ?_ simp_rw [hf hσ, sum_map, mulRightEmbedding_apply, Perm.mul_apply] exact sum_congr rfl fun τ hτ ↦ (congr_arg (_ * ·) (Equiv.prod_comp σ _).symm) let ι : Perm n ↪ (n → n) := ⟨_, coe_fn_injective⟩ have hι {σ x} : ι σ x = σ x := rfl let bij : Finset (n → n) := (disjUnion (ofSign 1) (ofSign (-1)) ofSign_disjoint).map ι replace h (s) : detp s (A * B) = ∑ σ ∈ bijᶜ, ∑ τ ∈ ofSign s, ∏ i : n, A i (σ i) * B (σ i) (τ i) + (detp 1 A * detp s B + detp (-1) A * detp (-s) B) := by simp_rw [h, neg_mul_neg, mul_one, detp, mul_apply, prod_univ_sum, Fintype.piFinset_univ] rw [sum_comm, ← sum_compl_add_sum bij, sum_map, sum_disjUnion] simp_rw [hι] rw [h, h, neg_neg, add_assoc] conv_rhs => rw [add_assoc] refine congr_arg₂ (· + ·) (sum_congr rfl fun σ hσ ↦ ?_) (add_comm _ _) replace hσ : ¬ Function.Injective σ := by contrapose! hσ rw [notMem_compl, mem_map, ofSign_disjUnion] exact ⟨Equiv.ofBijective σ hσ.bijective_of_finite, mem_univ _, rfl⟩ obtain ⟨i, j, hσ, hij⟩ := Function.not_injective_iff.mp hσ replace hσ k : σ (swap i j k) = σ k := by rw [swap_apply_def] split_ifs with h h <;> simp only [hσ, h] rw [← mul_neg_one, hf (mem_ofSign.mpr (sign_swap hij)), sum_map] simp_rw [prod_mul_distrib, mulRightEmbedding_apply, Perm.mul_apply] refine sum_congr rfl fun τ hτ ↦ congr_arg (_ * ·) ?_ rw [← Equiv.prod_comp (swap i j)] simp only [hσ] theorem mul_adjp_apply_eq : (A * adjp s A) i i = detp s A := by have key := sum_fiberwise_eq_sum_filter (ofSign s) univ (· i) fun σ ↦ ∏ k, A k (σ k) simp_rw [mem_univ, filter_true] at key simp_rw [mul_apply, adjp_apply, mul_sum, detp, ← key] refine sum_congr rfl fun x hx ↦ sum_congr rfl fun σ hσ ↦ ?_ rw [← prod_mul_prod_compl ({i} : Finset n), prod_singleton, (mem_filter.mp hσ).2] theorem mul_adjp_apply_ne (h : i ≠ j) : (A * adjp 1 A) i j = (A * adjp (-1) A) i j := by simp_rw [mul_apply, adjp_apply, mul_sum, sum_sigma'] let f : (Σ x : n, Perm n) → (Σ x : n, Perm n) := fun ⟨x, σ⟩ ↦ ⟨σ i, σ * swap i j⟩ let t s : Finset (Σ x : n, Perm n) := univ.sigma fun x ↦ (ofSign s).filter fun σ ↦ σ j = x have hf {s} : ∀ p ∈ t s, f (f p) = p := by intro ⟨x, σ⟩ hp rw [mem_sigma, mem_filter, mem_ofSign] at hp simp_rw [f, Perm.mul_apply, swap_apply_left, hp.2.2, mul_swap_mul_self] refine sum_bij' (fun p _ ↦ f p) (fun p _ ↦ f p) ?_ ?_ hf hf ?_ · intro ⟨x, σ⟩ hp rw [mem_sigma, mem_filter, mem_ofSign] at hp ⊢ rw [Perm.mul_apply, sign_mul, hp.2.1, sign_swap h, swap_apply_right] exact ⟨mem_univ (σ i), rfl, rfl⟩ · intro ⟨x, σ⟩ hp rw [mem_sigma, mem_filter, mem_ofSign] at hp ⊢ rw [Perm.mul_apply, sign_mul, hp.2.1, sign_swap h, swap_apply_right] exact ⟨mem_univ (σ i), rfl, rfl⟩ · intro ⟨x, σ⟩ hp rw [mem_sigma, mem_filter, mem_ofSign] at hp have key : ({j}ᶜ : Finset n) = disjUnion ({i} : Finset n) ({i, j} : Finset n)ᶜ (by simp) := by rw [singleton_disjUnion, cons_eq_insert, compl_insert, insert_erase] rwa [mem_compl, mem_singleton] simp_rw [key, prod_disjUnion, prod_singleton, f, Perm.mul_apply, swap_apply_left, ← mul_assoc] rw [mul_comm (A i x) (A i (σ i)), hp.2.2] refine congr_arg _ (prod_congr rfl fun x hx ↦ ?_) rw [mem_compl, mem_insert, mem_singleton, not_or] at hx rw [swap_apply_of_ne_of_ne hx.1 hx.2] theorem mul_adjp_add_detp : A * adjp 1 A + detp (-1) A • 1 = A * adjp (-1) A + detp 1 A • 1 := by ext i j rcases eq_or_ne i j with rfl | h <;> simp_rw [add_apply, smul_apply, smul_eq_mul] · simp_rw [mul_adjp_apply_eq, one_apply_eq, mul_one, add_comm] · simp_rw [mul_adjp_apply_ne A i j h, one_apply_ne h, mul_zero] variable {A B} theorem isAddUnit_mul (hAB : A * B = 1) (i j k : n) (hij : i ≠ j) : IsAddUnit (A i k * B k j) := by revert k rw [← IsAddUnit.sum_univ_iff, ← mul_apply, hAB, one_apply_ne hij] exact isAddUnit_zero theorem isAddUnit_detp_mul_detp (hAB : A * B = 1) : IsAddUnit (detp 1 A * detp (-1) B + detp (-1) A * detp 1 B) := by suffices h : ∀ {s t}, s ≠ t → IsAddUnit (detp s A * detp t B) from (h (by decide)).add (h (by decide)) intro s t h simp_rw [detp, sum_mul_sum, IsAddUnit.sum_iff] intro σ hσ τ hτ rw [mem_ofSign] at hσ hτ rw [← hσ, ← hτ, ← sign_inv] at h replace h := ne_of_apply_ne sign h rw [ne_eq, eq_comm, eq_inv_iff_mul_eq_one, eq_comm] at h simp_rw [Equiv.ext_iff, not_forall, Perm.mul_apply, Perm.one_apply] at h obtain ⟨k, hk⟩ := h rw [mul_comm, ← Equiv.prod_comp σ, mul_comm, ← prod_mul_distrib, ← mul_prod_erase univ _ (mem_univ k), ← smul_eq_mul] exact (isAddUnit_mul hAB k (τ (σ k)) (σ k) hk).smul_right _ theorem isAddUnit_detp_smul_mul_adjp (hAB : A * B = 1) : IsAddUnit (detp 1 A • (B * adjp (-1) B) + detp (-1) A • (B * adjp 1 B)) := by suffices h : ∀ {s t}, s ≠ t → IsAddUnit (detp s A • (B * adjp t B)) from (h (by decide)).add (h (by decide)) intro s t h rw [isAddUnit_iff] intro i j simp_rw [smul_apply, smul_eq_mul, mul_apply, detp, adjp_apply, mul_sum, sum_mul, IsAddUnit.sum_iff] intro k hk σ hσ τ hτ rw [mem_filter] at hσ rw [mem_ofSign] at hσ hτ rw [← hσ.1, ← hτ, ← sign_inv] at h replace h := ne_of_apply_ne sign h rw [ne_eq, eq_comm, eq_inv_iff_mul_eq_one] at h obtain ⟨l, hl1, hl2⟩ := exists_mem_ne (one_lt_card_support_of_ne_one h) (τ⁻¹ j) rw [mem_support, ne_comm] at hl1 rw [ne_eq, ← mem_singleton, ← mem_compl] at hl2 rw [← prod_mul_prod_compl {τ⁻¹ j}, mul_mul_mul_comm, mul_comm, ← smul_eq_mul] apply IsAddUnit.smul_right have h0 : ∀ k, k ∈ ({τ⁻¹ j} : Finset n)ᶜ ↔ τ k ∈ ({j} : Finset n)ᶜ := by simp [inv_def, eq_symm_apply] rw [← prod_equiv τ h0 fun _ _ ↦ rfl, ← prod_mul_distrib, ← mul_prod_erase _ _ hl2, ← smul_eq_mul] exact (isAddUnit_mul hAB l (σ (τ l)) (τ l) hl1).smul_right _ theorem detp_smul_add_adjp (hAB : A * B = 1) : detp 1 B • A + adjp (-1) B = detp (-1) B • A + adjp 1 B := by have key := congr(A * $(mul_adjp_add_detp B)) simp_rw [mul_add, ← mul_assoc, hAB, one_mul, mul_smul, mul_one] at key rwa [add_comm, eq_comm, add_comm] theorem detp_smul_adjp (hAB : A * B = 1) : A + (detp 1 A • adjp (-1) B + detp (-1) A • adjp 1 B) = detp 1 A • adjp 1 B + detp (-1) A • adjp (-1) B := by have h0 := detp_mul A B rw [hAB, detp_one_one, detp_neg_one_one, zero_add] at h0 have h := detp_smul_add_adjp hAB replace h := congr(detp 1 A • $h + detp (-1) A • $h.symm) simp only [smul_add, smul_smul] at h rwa [add_add_add_comm, ← add_smul, add_add_add_comm, ← add_smul, ← h0, add_smul, one_smul, add_comm A, add_assoc, ((isAddUnit_detp_mul_detp hAB).smul_right _).add_right_inj] at h theorem mul_eq_one_comm : A * B = 1 ↔ B * A = 1 := by suffices h : ∀ A B : Matrix n n R, A * B = 1 → B * A = 1 from ⟨h A B, h B A⟩ intro A B hAB have h0 := detp_mul A B rw [hAB, detp_one_one, detp_neg_one_one, zero_add] at h0 replace h := congr(B * $(detp_smul_adjp hAB)) simp only [mul_add, mul_smul] at h replace h := congr($h + (detp 1 A * detp (-1) B + detp (-1) A * detp 1 B) • 1) simp_rw [add_smul, ← smul_smul] at h rwa [add_assoc, add_add_add_comm, ← smul_add, ← smul_add, add_add_add_comm, ← smul_add, ← smul_add, smul_add, smul_add, mul_adjp_add_detp, smul_add, ← mul_adjp_add_detp, smul_add, ← smul_add, ← smul_add, add_add_add_comm, smul_smul, smul_smul, ← add_smul, ← h0, add_smul, one_smul, ← add_assoc _ 1, add_comm _ 1, add_assoc, smul_add, smul_add, add_add_add_comm, smul_smul, smul_smul, ← add_smul, ((isAddUnit_detp_smul_mul_adjp hAB).add ((isAddUnit_detp_mul_detp hAB).smul_right _)).add_left_inj] at h variable (A B) /-- We can construct an instance of invertible A if A has a left inverse. -/ def invertibleOfLeftInverse (h : B * A = 1) : Invertible A := ⟨B, h, mul_eq_one_comm.mp h⟩ /-- We can construct an instance of invertible A if A has a right inverse. -/ def invertibleOfRightInverse (h : A * B = 1) : Invertible A := ⟨B, mul_eq_one_comm.mp h, h⟩ variable {A B} theorem isUnit_of_left_inverse (h : B * A = 1) : IsUnit A := ⟨⟨A, B, mul_eq_one_comm.mp h, h⟩, rfl⟩ theorem exists_left_inverse_iff_isUnit : (∃ B, B * A = 1) ↔ IsUnit A := ⟨fun ⟨_, h⟩ ↦ isUnit_of_left_inverse h, fun h ↦ have := h.invertible; ⟨⅟A, invOf_mul_self' A⟩⟩ theorem isUnit_of_right_inverse (h : A * B = 1) : IsUnit A := ⟨⟨A, B, h, mul_eq_one_comm.mp h⟩, rfl⟩ theorem exists_right_inverse_iff_isUnit : (∃ B, A * B = 1) ↔ IsUnit A := ⟨fun ⟨_, h⟩ ↦ isUnit_of_right_inverse h, fun h ↦ have := h.invertible; ⟨⅟A, mul_invOf_self' A⟩⟩ /-- A version of `mul_eq_one_comm` that works for square matrices with rectangular types. -/ theorem mul_eq_one_comm_of_equiv {A : Matrix m n R} {B : Matrix n m R} (e : m ≃ n) : A * B = 1 ↔ B * A = 1 := by refine (reindex e e).injective.eq_iff.symm.trans ?_ rw [reindex_apply, reindex_apply, submatrix_one_equiv, ← submatrix_mul_equiv _ _ _ (.refl _), mul_eq_one_comm, submatrix_mul_equiv, coe_refl, submatrix_id_id] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Notation.lean
import Mathlib.Algebra.Group.Fin.Tuple import Mathlib.Data.Fin.VecNotation import Mathlib.LinearAlgebra.Matrix.RowCol import Mathlib.Tactic.FinCases import Mathlib.Algebra.BigOperators.Fin /-! # Matrix and vector notation This file includes `simp` lemmas for applying operations in `Data.Matrix.Basic` to values built out of the matrix notation `![a, b] = vecCons a (vecCons b vecEmpty)` defined in `Data.Fin.VecNotation`. This also provides the new notation `!![a, b; c, d] = Matrix.of ![![a, b], ![c, d]]`. This notation also works for empty matrices; `!![,,,] : Matrix (Fin 0) (Fin 3)` and `!![;;;] : Matrix (Fin 3) (Fin 0)`. ## Implementation notes The `simp` lemmas require that one of the arguments is of the form `vecCons _ _`. This ensures `simp` works with entries only when (some) entries are already given. In other words, this notation will only appear in the output of `simp` if it already appears in the input. ## Notation This file provide notation `!![a, b; c, d]` for matrices, which corresponds to `Matrix.of ![![a, b], ![c, d]]`. ## Examples Examples of usage can be found in the `MathlibTest/matrix.lean` file. -/ namespace Matrix universe u uₘ uₙ uₒ variable {α : Type u} {o n m : ℕ} {m' : Type uₘ} {n' : Type uₙ} {o' : Type uₒ} open Matrix section toExpr open Lean Qq open Qq in /-- `Matrix.mkLiteralQ !![a, b; c, d]` produces the term `q(!![$a, $b; $c, $d])`. -/ def mkLiteralQ {u : Level} {α : Q(Type u)} {m n : Nat} (elems : Matrix (Fin m) (Fin n) Q($α)) : Q(Matrix (Fin $m) (Fin $n) $α) := let elems := PiFin.mkLiteralQ (α := q(Fin $n → $α)) fun i => PiFin.mkLiteralQ fun j => elems i j q(Matrix.of $elems) /-- Matrices can be reflected whenever their entries can. We insert a `Matrix.of` to prevent immediate decay to a function. -/ protected instance toExpr [ToLevel.{u}] [ToLevel.{uₘ}] [ToLevel.{uₙ}] [Lean.ToExpr α] [Lean.ToExpr m'] [Lean.ToExpr n'] [Lean.ToExpr (m' → n' → α)] : Lean.ToExpr (Matrix m' n' α) := have eα : Q(Type $(toLevel.{u})) := toTypeExpr α have em' : Q(Type $(toLevel.{uₘ})) := toTypeExpr m' have en' : Q(Type $(toLevel.{uₙ})) := toTypeExpr n' { toTypeExpr := q(Matrix $eα $em' $en') toExpr := fun M => have eM : Q($em' → $en' → $eα) := toExpr (show m' → n' → α from M) q(Matrix.of $eM) } end toExpr section Parser open Lean Meta Elab Term Macro TSyntax PrettyPrinter.Delaborator SubExpr /-- Notation for m×n matrices, aka `Matrix (Fin m) (Fin n) α`. For instance: * `!![a, b, c; d, e, f]` is the matrix with two rows and three columns, of type `Matrix (Fin 2) (Fin 3) α` * `!![a, b, c]` is a row vector of type `Matrix (Fin 1) (Fin 3) α` (see also `Matrix.row`). * `!![a; b; c]` is a column vector of type `Matrix (Fin 3) (Fin 1) α` (see also `Matrix.col`). This notation implements some special cases: * `![,,]`, with `n` `,`s, is a term of type `Matrix (Fin 0) (Fin n) α` * `![;;]`, with `m` `;`s, is a term of type `Matrix (Fin m) (Fin 0) α` * `![]` is the 0×0 matrix Note that vector notation is provided elsewhere (by `Matrix.vecNotation`) as `![a, b, c]`. Under the hood, `!![a, b, c; d, e, f]` is syntax for `Matrix.of ![![a, b, c], ![d, e, f]]`. -/ syntax (name := matrixNotation) "!![" ppRealGroup(sepBy1(ppGroup(term,+,?), ";", "; ", allowTrailingSep)) "]" : term @[inherit_doc matrixNotation] syntax (name := matrixNotationRx0) "!![" ";"+ "]" : term @[inherit_doc matrixNotation] syntax (name := matrixNotation0xC) "!![" ","* "]" : term macro_rules | `(!![$[$[$rows],*];*]) => do let m := rows.size let n := if h : 0 < m then rows[0].size else 0 let rowVecs ← rows.mapM fun row : Array Term => do unless row.size = n do Macro.throwErrorAt (mkNullNode row) s!"\ Rows must be of equal length; this row has {row.size} items, \ the previous rows have {n}" `(![$row,*]) `(@Matrix.of (Fin $(quote m)) (Fin $(quote n)) _ ![$rowVecs,*]) | `(!![$[;%$semicolons]*]) => do let emptyVec ← `(![]) let emptyVecs := semicolons.map (fun _ => emptyVec) `(@Matrix.of (Fin $(quote semicolons.size)) (Fin 0) _ ![$emptyVecs,*]) | `(!![$[,%$commas]*]) => `(@Matrix.of (Fin 0) (Fin $(quote commas.size)) _ ![]) /-- Delaborator for the `!![]` notation. -/ @[app_delab DFunLike.coe] def delabMatrixNotation : Delab := whenNotPPOption getPPExplicit <| whenPPOption getPPNotation <| withOverApp 6 do let mkApp3 (.const ``Matrix.of _) (.app (.const ``Fin _) em) (.app (.const ``Fin _) en) _ := (← getExpr).appFn!.appArg! | failure let some m ← withNatValue em (pure ∘ some) | failure let some n ← withNatValue en (pure ∘ some) | failure withAppArg do if m = 0 then guard <| (← getExpr).isAppOfArity ``vecEmpty 1 let commas := .replicate n (mkAtom ",") `(!![$[,%$commas]*]) else if n = 0 then let `(![$[![]%$evecs],*]) ← delab | failure `(!![$[;%$evecs]*]) else let `(![$[![$[$melems],*]],*]) ← delab | failure `(!![$[$[$melems],*];*]) end Parser variable (a b : ℕ) /-- Use `![...]` notation for displaying a `Fin`-indexed matrix, for example: ``` #eval !![1, 2; 3, 4] + !![3, 4; 5, 6] -- !![4, 6; 8, 10] ``` -/ instance repr [Repr α] : Repr (Matrix (Fin m) (Fin n) α) where reprPrec f _p := (Std.Format.bracket "!![" · "]") <| (Std.Format.joinSep · (";" ++ Std.Format.line)) <| (List.finRange m).map fun i => Std.Format.fill <| -- wrap line in a single place rather than all at once (Std.Format.joinSep · ("," ++ Std.Format.line)) <| (List.finRange n).map fun j => _root_.repr (f i j) @[simp] theorem cons_val' (v : n' → α) (B : Fin m → n' → α) (i j) : vecCons v B i j = vecCons (v j) (fun i => B i j) i := by refine Fin.cases ?_ ?_ i <;> simp @[simp] theorem head_val' (B : Fin m.succ → n' → α) (j : n') : (vecHead fun i => B i j) = vecHead B j := rfl @[simp] theorem tail_val' (B : Fin m.succ → n' → α) (j : n') : (vecTail fun i => B i j) = fun i => vecTail B i j := rfl section DotProduct variable [AddCommMonoid α] [Mul α] @[simp] theorem dotProduct_of_isEmpty [Fintype n'] [IsEmpty n'] (v w : n' → α) : v ⬝ᵥ w = 0 := Finset.sum_of_isEmpty _ @[deprecated "Use Matrix.dotProduct_of_isEmpty instead." (since := "2025-09-07")] theorem dotProduct_empty (v w : Fin 0 → α) : v ⬝ᵥ w = 0 := Finset.sum_empty @[simp] theorem cons_dotProduct (x : α) (v : Fin n → α) (w : Fin n.succ → α) : vecCons x v ⬝ᵥ w = x * vecHead w + v ⬝ᵥ vecTail w := by simp [dotProduct, Fin.sum_univ_succ, vecHead, vecTail] @[simp] theorem dotProduct_cons (v : Fin n.succ → α) (x : α) (w : Fin n → α) : v ⬝ᵥ vecCons x w = vecHead v * x + vecTail v ⬝ᵥ w := by simp [dotProduct, Fin.sum_univ_succ, vecHead, vecTail] theorem cons_dotProduct_cons (x : α) (v : Fin n → α) (y : α) (w : Fin n → α) : vecCons x v ⬝ᵥ vecCons y w = x * y + v ⬝ᵥ w := by simp end DotProduct section ColRow variable {ι : Type*} @[simp] theorem replicateCol_empty (v : Fin 0 → α) : replicateCol ι v = vecEmpty := empty_eq _ @[simp] theorem replicateCol_cons (x : α) (u : Fin m → α) : replicateCol ι (vecCons x u) = of (vecCons (fun _ => x) (replicateCol ι u)) := by ext i j refine Fin.cases ?_ ?_ i <;> simp @[simp] theorem replicateRow_empty : replicateRow ι (vecEmpty : Fin 0 → α) = of fun _ => vecEmpty := rfl @[simp] theorem replicateRow_cons (x : α) (u : Fin m → α) : replicateRow ι (vecCons x u) = of fun _ => vecCons x u := rfl end ColRow section Transpose @[simp] theorem transpose_empty_rows (A : Matrix m' (Fin 0) α) : Aᵀ = of ![] := empty_eq _ @[simp] theorem transpose_empty_cols (A : Matrix (Fin 0) m' α) : Aᵀ = of fun _ => ![] := funext fun _ => empty_eq _ @[simp] theorem cons_transpose (v : n' → α) (A : Matrix (Fin m) n' α) : (of (vecCons v A))ᵀ = of fun i => vecCons (v i) (Aᵀ i) := by ext i j refine Fin.cases ?_ ?_ j <;> simp @[simp] theorem head_transpose (A : Matrix m' (Fin n.succ) α) : vecHead (of.symm Aᵀ) = vecHead ∘ of.symm A := rfl @[simp] theorem tail_transpose (A : Matrix m' (Fin n.succ) α) : vecTail (of.symm Aᵀ) = (vecTail ∘ A)ᵀ := by ext i j rfl end Transpose section Mul variable [NonUnitalNonAssocSemiring α] @[simp] theorem empty_mul [Fintype n'] (A : Matrix (Fin 0) n' α) (B : Matrix n' o' α) : A * B = of ![] := empty_eq _ @[simp] theorem empty_mul_empty (A : Matrix m' (Fin 0) α) (B : Matrix (Fin 0) o' α) : A * B = 0 := rfl @[simp] theorem mul_empty [Fintype n'] (A : Matrix m' n' α) (B : Matrix n' (Fin 0) α) : A * B = of fun _ => ![] := funext fun _ => empty_eq _ theorem mul_val_succ [Fintype n'] (A : Matrix (Fin m.succ) n' α) (B : Matrix n' o' α) (i : Fin m) (j : o') : (A * B) i.succ j = (of (vecTail (of.symm A)) * B) i j := rfl @[simp] theorem cons_mul [Fintype n'] (v : n' → α) (A : Fin m → n' → α) (B : Matrix n' o' α) : of (vecCons v A) * B = of (vecCons (v ᵥ* B) (of.symm (of A * B))) := by ext i j refine Fin.cases ?_ ?_ i · rfl simp [mul_val_succ] end Mul section VecMul variable [NonUnitalNonAssocSemiring α] @[simp] theorem empty_vecMul (v : Fin 0 → α) (B : Matrix (Fin 0) o' α) : v ᵥ* B = 0 := rfl @[simp] theorem vecMul_empty [Fintype n'] (v : n' → α) (B : Matrix n' (Fin 0) α) : v ᵥ* B = ![] := empty_eq _ @[simp] theorem cons_vecMul (x : α) (v : Fin n → α) (B : Fin n.succ → o' → α) : vecCons x v ᵥ* of B = x • vecHead B + v ᵥ* of (vecTail B) := by ext i simp [vecMul] @[simp] theorem vecMul_cons (v : Fin n.succ → α) (w : o' → α) (B : Fin n → o' → α) : v ᵥ* of (vecCons w B) = vecHead v • w + vecTail v ᵥ* of B := by ext i simp [vecMul] theorem cons_vecMul_cons (x : α) (v : Fin n → α) (w : o' → α) (B : Fin n → o' → α) : vecCons x v ᵥ* of (vecCons w B) = x • w + v ᵥ* of B := by simp end VecMul section MulVec variable [NonUnitalNonAssocSemiring α] @[simp] theorem empty_mulVec [Fintype n'] (A : Matrix (Fin 0) n' α) (v : n' → α) : A *ᵥ v = ![] := empty_eq _ @[simp] theorem mulVec_empty (A : Matrix m' (Fin 0) α) (v : Fin 0 → α) : A *ᵥ v = 0 := rfl @[simp] theorem cons_mulVec [Fintype n'] (v : n' → α) (A : Fin m → n' → α) (w : n' → α) : (of <| vecCons v A) *ᵥ w = vecCons (v ⬝ᵥ w) (of A *ᵥ w) := by ext i refine Fin.cases ?_ ?_ i <;> simp [mulVec] @[simp] theorem mulVec_cons {α} [NonUnitalCommSemiring α] (A : m' → Fin n.succ → α) (x : α) (v : Fin n → α) : (of A) *ᵥ (vecCons x v) = x • vecHead ∘ A + (of (vecTail ∘ A)) *ᵥ v := by ext i simp [mulVec, mul_comm] end MulVec section VecMulVec variable [NonUnitalNonAssocSemiring α] @[simp] theorem empty_vecMulVec (v : Fin 0 → α) (w : n' → α) : vecMulVec v w = ![] := empty_eq _ @[simp] theorem vecMulVec_empty (v : m' → α) (w : Fin 0 → α) : vecMulVec v w = of fun _ => ![] := funext fun _ => empty_eq _ @[simp] theorem cons_vecMulVec (x : α) (v : Fin m → α) (w : n' → α) : vecMulVec (vecCons x v) w = vecCons (x • w) (vecMulVec v w) := by ext i refine Fin.cases ?_ ?_ i <;> simp [vecMulVec] @[simp] theorem vecMulVec_cons (v : m' → α) (x : α) (w : Fin n → α) : vecMulVec v (vecCons x w) = of fun i => v i • vecCons x w := rfl end VecMulVec section SMul variable [NonUnitalNonAssocSemiring α] theorem smul_mat_empty {m' : Type*} (x : α) (A : Fin 0 → m' → α) : x • A = ![] := empty_eq _ @[deprecated (since := "2025-11-07")] alias smul_mat_cons := smul_cons end SMul section Submatrix @[simp] theorem submatrix_empty (A : Matrix m' n' α) (row : Fin 0 → m') (col : o' → n') : submatrix A row col = ![] := empty_eq _ @[simp] theorem submatrix_cons_row (A : Matrix m' n' α) (i : m') (row : Fin m → m') (col : o' → n') : submatrix A (vecCons i row) col = vecCons (fun j => A i (col j)) (submatrix A row col) := by ext i j refine Fin.cases ?_ ?_ i <;> simp [submatrix] /-- Updating a row then removing it is the same as removing it. -/ @[simp] theorem submatrix_updateRow_succAbove (A : Matrix (Fin m.succ) n' α) (v : n' → α) (f : o' → n') (i : Fin m.succ) : (A.updateRow i v).submatrix i.succAbove f = A.submatrix i.succAbove f := ext fun r s => (congr_fun (updateRow_ne (Fin.succAbove_ne i r) : _ = A _) (f s) :) /-- Updating a column then removing it is the same as removing it. -/ @[simp] theorem submatrix_updateCol_succAbove (A : Matrix m' (Fin n.succ) α) (v : m' → α) (f : o' → m') (i : Fin n.succ) : (A.updateCol i v).submatrix f i.succAbove = A.submatrix f i.succAbove := ext fun _r s => updateCol_ne (Fin.succAbove_ne i s) end Submatrix section Vec2AndVec3 section One variable [Zero α] [One α] theorem one_fin_two : (1 : Matrix (Fin 2) (Fin 2) α) = !![1, 0; 0, 1] := by ext i j fin_cases i <;> fin_cases j <;> rfl theorem one_fin_three : (1 : Matrix (Fin 3) (Fin 3) α) = !![1, 0, 0; 0, 1, 0; 0, 0, 1] := by ext i j fin_cases i <;> fin_cases j <;> rfl end One section AddMonoidWithOne variable [AddMonoidWithOne α] theorem natCast_fin_two (n : ℕ) : (n : Matrix (Fin 2) (Fin 2) α) = !![↑n, 0; 0, ↑n] := by ext i j fin_cases i <;> fin_cases j <;> rfl theorem natCast_fin_three (n : ℕ) : (n : Matrix (Fin 3) (Fin 3) α) = !![↑n, 0, 0; 0, ↑n, 0; 0, 0, ↑n] := by ext i j fin_cases i <;> fin_cases j <;> rfl theorem ofNat_fin_two (n : ℕ) [n.AtLeastTwo] : (ofNat(n) : Matrix (Fin 2) (Fin 2) α) = !![ofNat(n), 0; 0, ofNat(n)] := natCast_fin_two _ theorem ofNat_fin_three (n : ℕ) [n.AtLeastTwo] : (ofNat(n) : Matrix (Fin 3) (Fin 3) α) = !![ofNat(n), 0, 0; 0, ofNat(n), 0; 0, 0, ofNat(n)] := natCast_fin_three _ end AddMonoidWithOne theorem eta_fin_two (A : Matrix (Fin 2) (Fin 2) α) : A = !![A 0 0, A 0 1; A 1 0, A 1 1] := by ext i j fin_cases i <;> fin_cases j <;> rfl theorem eta_fin_three (A : Matrix (Fin 3) (Fin 3) α) : A = !![A 0 0, A 0 1, A 0 2; A 1 0, A 1 1, A 1 2; A 2 0, A 2 1, A 2 2] := by ext i j fin_cases i <;> fin_cases j <;> rfl theorem mul_fin_two [AddCommMonoid α] [Mul α] (a₁₁ a₁₂ a₂₁ a₂₂ b₁₁ b₁₂ b₂₁ b₂₂ : α) : !![a₁₁, a₁₂; a₂₁, a₂₂] * !![b₁₁, b₁₂; b₂₁, b₂₂] = !![a₁₁ * b₁₁ + a₁₂ * b₂₁, a₁₁ * b₁₂ + a₁₂ * b₂₂; a₂₁ * b₁₁ + a₂₂ * b₂₁, a₂₁ * b₁₂ + a₂₂ * b₂₂] := by ext i j fin_cases i <;> fin_cases j <;> simp [Matrix.mul_apply, Fin.sum_univ_succ] set_option linter.style.commandStart false in -- Preserve the formatting of the matrices. theorem mul_fin_three [AddCommMonoid α] [Mul α] (a₁₁ a₁₂ a₁₃ a₂₁ a₂₂ a₂₃ a₃₁ a₃₂ a₃₃ b₁₁ b₁₂ b₁₃ b₂₁ b₂₂ b₂₃ b₃₁ b₃₂ b₃₃ : α) : !![a₁₁, a₁₂, a₁₃; a₂₁, a₂₂, a₂₃; a₃₁, a₃₂, a₃₃] * !![b₁₁, b₁₂, b₁₃; b₂₁, b₂₂, b₂₃; b₃₁, b₃₂, b₃₃] = !![a₁₁*b₁₁ + a₁₂*b₂₁ + a₁₃*b₃₁, a₁₁*b₁₂ + a₁₂*b₂₂ + a₁₃*b₃₂, a₁₁*b₁₃ + a₁₂*b₂₃ + a₁₃*b₃₃; a₂₁*b₁₁ + a₂₂*b₂₁ + a₂₃*b₃₁, a₂₁*b₁₂ + a₂₂*b₂₂ + a₂₃*b₃₂, a₂₁*b₁₃ + a₂₂*b₂₃ + a₂₃*b₃₃; a₃₁*b₁₁ + a₃₂*b₂₁ + a₃₃*b₃₁, a₃₁*b₁₂ + a₃₂*b₂₂ + a₃₃*b₃₂, a₃₁*b₁₃ + a₃₂*b₂₃ + a₃₃*b₃₃] := by ext i j fin_cases i <;> fin_cases j <;> simp [Matrix.mul_apply, Fin.sum_univ_succ, ← add_assoc] theorem vec2_eq {a₀ a₁ b₀ b₁ : α} (h₀ : a₀ = b₀) (h₁ : a₁ = b₁) : ![a₀, a₁] = ![b₀, b₁] := by simp [h₀, h₁] theorem vec3_eq {a₀ a₁ a₂ b₀ b₁ b₂ : α} (h₀ : a₀ = b₀) (h₁ : a₁ = b₁) (h₂ : a₂ = b₂) : ![a₀, a₁, a₂] = ![b₀, b₁, b₂] := by simp [h₀, h₁, h₂] theorem vec2_add [Add α] (a₀ a₁ b₀ b₁ : α) : ![a₀, a₁] + ![b₀, b₁] = ![a₀ + b₀, a₁ + b₁] := by simp theorem vec3_add [Add α] (a₀ a₁ a₂ b₀ b₁ b₂ : α) : ![a₀, a₁, a₂] + ![b₀, b₁, b₂] = ![a₀ + b₀, a₁ + b₁, a₂ + b₂] := by simp theorem smul_vec2 {R : Type*} [SMul R α] (x : R) (a₀ a₁ : α) : x • ![a₀, a₁] = ![x • a₀, x • a₁] := by simp theorem smul_vec3 {R : Type*} [SMul R α] (x : R) (a₀ a₁ a₂ : α) : x • ![a₀, a₁, a₂] = ![x • a₀, x • a₁, x • a₂] := by simp variable [AddCommMonoid α] [Mul α] theorem vec2_dotProduct' {a₀ a₁ b₀ b₁ : α} : ![a₀, a₁] ⬝ᵥ ![b₀, b₁] = a₀ * b₀ + a₁ * b₁ := by simp @[simp] theorem vec2_dotProduct (v w : Fin 2 → α) : v ⬝ᵥ w = v 0 * w 0 + v 1 * w 1 := vec2_dotProduct' theorem vec3_dotProduct' {a₀ a₁ a₂ b₀ b₁ b₂ : α} : ![a₀, a₁, a₂] ⬝ᵥ ![b₀, b₁, b₂] = a₀ * b₀ + a₁ * b₁ + a₂ * b₂ := by simp [add_assoc] -- This is not tagged `@[simp]` because it does not mesh well with simp lemmas for -- dot and cross products in dimension 3. theorem vec3_dotProduct (v w : Fin 3 → α) : v ⬝ᵥ w = v 0 * w 0 + v 1 * w 1 + v 2 * w 2 := vec3_dotProduct' end Vec2AndVec3 end Matrix @[simp] lemma injective_pair_iff_ne {α : Type*} {x y : α} : Function.Injective ![x, y] ↔ x ≠ y := by refine ⟨fun h ↦ ?_, fun h a b h' ↦ ?_⟩ · simpa using h.ne Fin.zero_ne_one · fin_cases a <;> fin_cases b <;> aesop
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Defs.lean
import Mathlib.Algebra.Module.Pi /-! # Matrices This file defines basic properties of matrices up to the module structure. Matrices with rows indexed by `m`, columns indexed by `n`, and entries of type `α` are represented with `Matrix m n α`. For the typical approach of counting rows and columns, `Matrix (Fin m) (Fin n) α` can be used. ## Main definitions * `Matrix.transpose`: transpose of a matrix, turning rows into columns and vice versa * `Matrix.submatrix`: take a submatrix by reindexing rows and columns * `Matrix.module`: matrices are a module over the ring of entries * `Set.matrix`: set of matrices with entries in a given set ## Notation The scope `Matrix` gives the following notation: * `ᵀ` for `Matrix.transpose` See `Mathlib/Data/Matrix/ConjTranspose.lean` for * `ᴴ` for `Matrix.conjTranspose` ## Implementation notes For convenience, `Matrix m n α` is defined as `m → n → α`, as this allows elements of the matrix to be accessed with `A i j`. However, it is not advisable to _construct_ matrices using terms of the form `fun i j ↦ _` or even `(fun i j ↦ _ : Matrix m n α)`, as these are not recognized by Lean as having the right type. Instead, `Matrix.of` should be used. -/ assert_not_exists Algebra TrivialStar universe u u' v w /-- `Matrix m n R` is the type of matrices with entries in `R`, whose rows are indexed by `m` and whose columns are indexed by `n`. -/ def Matrix (m : Type u) (n : Type u') (α : Type v) : Type max u u' v := m → n → α variable {l m n o : Type*} {m' : o → Type*} {n' : o → Type*} variable {R : Type*} {S : Type*} {α : Type v} {β : Type w} {γ : Type*} namespace Matrix section Ext variable {M N : Matrix m n α} theorem ext_iff : (∀ i j, M i j = N i j) ↔ M = N := ⟨fun h => funext fun i => funext <| h i, fun h => by simp [h]⟩ @[ext] theorem ext : (∀ i j, M i j = N i j) → M = N := ext_iff.mp end Ext /-- Cast a function into a matrix. The two sides of the equivalence are definitionally equal types. We want to use an explicit cast to distinguish the types because `Matrix` has different instances to pi types (such as `Pi.mul`, which performs elementwise multiplication, vs `Matrix.mul`). If you are defining a matrix, in terms of its entries, use `of (fun i j ↦ _)`. The purpose of this approach is to ensure that terms of the form `(fun i j ↦ _) * (fun i j ↦ _)` do not appear, as the type of `*` can be misleading. -/ def of : (m → n → α) ≃ Matrix m n α := Equiv.refl _ @[simp] theorem of_apply (f : m → n → α) (i j) : of f i j = f i j := rfl @[simp] theorem of_symm_apply (f : Matrix m n α) (i j) : of.symm f i j = f i j := rfl /-- `M.map f` is the matrix obtained by applying `f` to each entry of the matrix `M`. This is available in bundled forms as: * `AddMonoidHom.mapMatrix` * `LinearMap.mapMatrix` * `RingHom.mapMatrix` * `AlgHom.mapMatrix` * `Equiv.mapMatrix` * `AddEquiv.mapMatrix` * `LinearEquiv.mapMatrix` * `RingEquiv.mapMatrix` * `AlgEquiv.mapMatrix` -/ def map (M : Matrix m n α) (f : α → β) : Matrix m n β := of fun i j => f (M i j) @[simp] theorem map_apply {M : Matrix m n α} {f : α → β} {i : m} {j : n} : M.map f i j = f (M i j) := rfl @[simp] theorem map_id (M : Matrix m n α) : M.map id = M := by ext rfl @[simp] theorem map_id' (M : Matrix m n α) : M.map (·) = M := map_id M @[simp] theorem map_map {M : Matrix m n α} {β γ : Type*} {f : α → β} {g : β → γ} : (M.map f).map g = M.map (g ∘ f) := by ext rfl theorem map_injective {f : α → β} (hf : Function.Injective f) : Function.Injective fun M : Matrix m n α => M.map f := fun _ _ h => ext fun i j => hf <| ext_iff.mpr h i j /-- The transpose of a matrix. -/ def transpose (M : Matrix m n α) : Matrix n m α := of fun x y => M y x -- TODO: set as an equation lemma for `transpose`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem transpose_apply (M : Matrix m n α) (i j) : transpose M i j = M j i := rfl @[inherit_doc] scoped postfix:1024 "ᵀ" => Matrix.transpose instance inhabited [Inhabited α] : Inhabited (Matrix m n α) := inferInstanceAs <| Inhabited <| m → n → α instance add [Add α] : Add (Matrix m n α) := Pi.instAdd instance addSemigroup [AddSemigroup α] : AddSemigroup (Matrix m n α) := Pi.addSemigroup instance addCommSemigroup [AddCommSemigroup α] : AddCommSemigroup (Matrix m n α) := Pi.addCommSemigroup instance zero [Zero α] : Zero (Matrix m n α) := Pi.instZero instance addZeroClass [AddZeroClass α] : AddZeroClass (Matrix m n α) := Pi.addZeroClass instance addMonoid [AddMonoid α] : AddMonoid (Matrix m n α) := Pi.addMonoid instance addCommMonoid [AddCommMonoid α] : AddCommMonoid (Matrix m n α) := Pi.addCommMonoid instance neg [Neg α] : Neg (Matrix m n α) := Pi.instNeg instance sub [Sub α] : Sub (Matrix m n α) := Pi.instSub instance addGroup [AddGroup α] : AddGroup (Matrix m n α) := Pi.addGroup instance addCommGroup [AddCommGroup α] : AddCommGroup (Matrix m n α) := Pi.addCommGroup instance unique [Unique α] : Unique (Matrix m n α) := Pi.unique instance subsingleton [Subsingleton α] : Subsingleton (Matrix m n α) := inferInstanceAs <| Subsingleton <| m → n → α instance nonempty [Nonempty m] [Nonempty n] [Nontrivial α] : Nontrivial (Matrix m n α) := Function.nontrivial instance smul [SMul R α] : SMul R (Matrix m n α) := Pi.instSMul instance smulCommClass [SMul R α] [SMul S α] [SMulCommClass R S α] : SMulCommClass R S (Matrix m n α) := Pi.smulCommClass instance isScalarTower [SMul R S] [SMul R α] [SMul S α] [IsScalarTower R S α] : IsScalarTower R S (Matrix m n α) := Pi.isScalarTower instance isCentralScalar [SMul R α] [SMul Rᵐᵒᵖ α] [IsCentralScalar R α] : IsCentralScalar R (Matrix m n α) := Pi.isCentralScalar instance mulAction [Monoid R] [MulAction R α] : MulAction R (Matrix m n α) := Pi.mulAction _ instance distribMulAction [Monoid R] [AddMonoid α] [DistribMulAction R α] : DistribMulAction R (Matrix m n α) := Pi.distribMulAction _ instance module [Semiring R] [AddCommMonoid α] [Module R α] : Module R (Matrix m n α) := Pi.module _ _ _ section @[simp] theorem zero_apply [Zero α] (i : m) (j : n) : (0 : Matrix m n α) i j = 0 := rfl @[simp] theorem add_apply [Add α] (A B : Matrix m n α) (i : m) (j : n) : (A + B) i j = (A i j) + (B i j) := rfl @[simp] theorem smul_apply [SMul β α] (r : β) (A : Matrix m n α) (i : m) (j : n) : (r • A) i j = r • (A i j) := rfl @[simp] theorem sub_apply [Sub α] (A B : Matrix m n α) (i : m) (j : n) : (A - B) i j = (A i j) - (B i j) := rfl @[simp] theorem neg_apply [Neg α] (A : Matrix m n α) (i : m) (j : n) : (-A) i j = -(A i j) := rfl protected theorem dite_apply (P : Prop) [Decidable P] (A : P → Matrix m n α) (B : ¬P → Matrix m n α) (i : m) (j : n) : dite P A B i j = dite P (A · i j) (B · i j) := by rw [dite_apply, dite_apply] protected theorem ite_apply (P : Prop) [Decidable P] (A : Matrix m n α) (B : Matrix m n α) (i : m) (j : n) : (if P then A else B) i j = if P then A i j else B i j := Matrix.dite_apply _ _ _ _ _ end /-! simp-normal form pulls `of` to the outside. -/ @[simp] theorem of_zero [Zero α] : of (0 : m → n → α) = 0 := rfl @[simp] theorem of_add_of [Add α] (f g : m → n → α) : of f + of g = of (f + g) := rfl @[simp] theorem of_sub_of [Sub α] (f g : m → n → α) : of f - of g = of (f - g) := rfl @[simp] theorem neg_of [Neg α] (f : m → n → α) : -of f = of (-f) := rfl @[simp] theorem smul_of [SMul R α] (r : R) (f : m → n → α) : r • of f = of (r • f) := rfl @[simp] protected theorem map_zero [Zero α] [Zero β] (f : α → β) (h : f 0 = 0) : (0 : Matrix m n α).map f = 0 := by ext simp [h] protected theorem map_add [Add α] [Add β] (f : α → β) (hf : ∀ a₁ a₂, f (a₁ + a₂) = f a₁ + f a₂) (M N : Matrix m n α) : (M + N).map f = M.map f + N.map f := ext fun _ _ => hf _ _ protected theorem map_sub [Sub α] [Sub β] (f : α → β) (hf : ∀ a₁ a₂, f (a₁ - a₂) = f a₁ - f a₂) (M N : Matrix m n α) : (M - N).map f = M.map f - N.map f := ext fun _ _ => hf _ _ protected theorem map_smul [SMul R α] [SMul R β] (f : α → β) (r : R) (hf : ∀ a, f (r • a) = r • f a) (M : Matrix m n α) : (r • M).map f = r • M.map f := ext fun _ _ => hf _ protected theorem map_smulₛₗ [SMul R α] [SMul S β] (f : α → β) (σ : R → S) (r : R) (hf : ∀ a, f (r • a) = σ r • f a) (M : Matrix m n α) : (r • M).map f = σ r • M.map f := ext fun _ _ => hf _ /-- The scalar action via `Mul.toSMul` is transformed by the same map as the elements of the matrix, when `f` preserves multiplication. -/ theorem map_smul' [Mul α] [Mul β] (f : α → β) (r : α) (A : Matrix n n α) (hf : ∀ a₁ a₂, f (a₁ * a₂) = f a₁ * f a₂) : (r • A).map f = f r • A.map f := ext fun _ _ => hf _ _ /-- The scalar action via `mul.toOppositeSMul` is transformed by the same map as the elements of the matrix, when `f` preserves multiplication. -/ theorem map_op_smul' [Mul α] [Mul β] (f : α → β) (r : α) (A : Matrix n n α) (hf : ∀ a₁ a₂, f (a₁ * a₂) = f a₁ * f a₂) : (MulOpposite.op r • A).map f = MulOpposite.op (f r) • A.map f := ext fun _ _ => hf _ _ theorem _root_.IsSMulRegular.matrix [SMul R S] {k : R} (hk : IsSMulRegular S k) : IsSMulRegular (Matrix m n S) k := IsSMulRegular.pi fun _ => IsSMulRegular.pi fun _ => hk theorem _root_.IsLeftRegular.matrix [Mul α] {k : α} (hk : IsLeftRegular k) : IsSMulRegular (Matrix m n α) k := hk.isSMulRegular.matrix instance subsingleton_of_empty_left [IsEmpty m] : Subsingleton (Matrix m n α) := ⟨fun M N => by ext i exact isEmptyElim i⟩ instance subsingleton_of_empty_right [IsEmpty n] : Subsingleton (Matrix m n α) := ⟨fun M N => by ext i j exact isEmptyElim j⟩ /-- This is `Matrix.of` bundled as an additive equivalence. -/ def ofAddEquiv [Add α] : (m → n → α) ≃+ Matrix m n α where __ := of map_add' _ _ := rfl @[simp] lemma coe_ofAddEquiv [Add α] : ⇑(ofAddEquiv : (m → n → α) ≃+ Matrix m n α) = of := rfl @[simp] lemma coe_ofAddEquiv_symm [Add α] : ⇑(ofAddEquiv.symm : Matrix m n α ≃+ (m → n → α)) = of.symm := rfl @[simp] lemma isAddUnit_iff [AddMonoid α] {A : Matrix m n α} : IsAddUnit A ↔ ∀ i j, IsAddUnit (A i j) := by simp_rw [isAddUnit_iff_exists, Classical.skolem, forall_and, ← Matrix.ext_iff, add_apply, zero_apply] rfl end Matrix open Matrix namespace Matrix section Transpose @[simp] theorem transpose_transpose (M : Matrix m n α) : Mᵀᵀ = M := by ext rfl theorem transpose_injective : Function.Injective (transpose : Matrix m n α → Matrix n m α) := fun _ _ h => ext fun i j => ext_iff.2 h j i @[simp] theorem transpose_inj {A B : Matrix m n α} : Aᵀ = Bᵀ ↔ A = B := transpose_injective.eq_iff @[simp] theorem transpose_zero [Zero α] : (0 : Matrix m n α)ᵀ = 0 := rfl @[simp] theorem transpose_eq_zero [Zero α] {M : Matrix m n α} : Mᵀ = 0 ↔ M = 0 := transpose_inj @[simp] theorem transpose_add [Add α] (M : Matrix m n α) (N : Matrix m n α) : (M + N)ᵀ = Mᵀ + Nᵀ := by ext simp @[simp] theorem transpose_sub [Sub α] (M : Matrix m n α) (N : Matrix m n α) : (M - N)ᵀ = Mᵀ - Nᵀ := by ext simp @[simp] theorem transpose_smul {R : Type*} [SMul R α] (c : R) (M : Matrix m n α) : (c • M)ᵀ = c • Mᵀ := by ext rfl @[simp] theorem transpose_neg [Neg α] (M : Matrix m n α) : (-M)ᵀ = -Mᵀ := by ext rfl theorem transpose_map {f : α → β} {M : Matrix m n α} : Mᵀ.map f = (M.map f)ᵀ := by ext rfl end Transpose /-- Given maps `(r : l → m)` and `(c : o → n)` reindexing the rows and columns of a matrix `M : Matrix m n α`, the matrix `M.submatrix r c : Matrix l o α` is defined by `(M.submatrix r c) i j = M (r i) (c j)` for `(i,j) : l × o`. Note that the total number of row and columns does not have to be preserved. -/ def submatrix (A : Matrix m n α) (r : l → m) (c : o → n) : Matrix l o α := of fun i j => A (r i) (c j) @[simp] theorem submatrix_apply (A : Matrix m n α) (r : l → m) (c : o → n) (i j) : A.submatrix r c i j = A (r i) (c j) := rfl @[simp] theorem submatrix_id_id (A : Matrix m n α) : A.submatrix id id = A := ext fun _ _ => rfl @[simp] theorem submatrix_submatrix {l₂ o₂ : Type*} (A : Matrix m n α) (r₁ : l → m) (c₁ : o → n) (r₂ : l₂ → l) (c₂ : o₂ → o) : (A.submatrix r₁ c₁).submatrix r₂ c₂ = A.submatrix (r₁ ∘ r₂) (c₁ ∘ c₂) := ext fun _ _ => rfl @[simp] theorem transpose_submatrix (A : Matrix m n α) (r : l → m) (c : o → n) : (A.submatrix r c)ᵀ = Aᵀ.submatrix c r := ext fun _ _ => rfl theorem submatrix_add [Add α] (A B : Matrix m n α) : ((A + B).submatrix : (l → m) → (o → n) → Matrix l o α) = A.submatrix + B.submatrix := rfl theorem submatrix_neg [Neg α] (A : Matrix m n α) : ((-A).submatrix : (l → m) → (o → n) → Matrix l o α) = -A.submatrix := rfl theorem submatrix_sub [Sub α] (A B : Matrix m n α) : ((A - B).submatrix : (l → m) → (o → n) → Matrix l o α) = A.submatrix - B.submatrix := rfl @[simp] theorem submatrix_zero [Zero α] : ((0 : Matrix m n α).submatrix : (l → m) → (o → n) → Matrix l o α) = 0 := rfl theorem submatrix_smul {R : Type*} [SMul R α] (r : R) (A : Matrix m n α) : ((r • A : Matrix m n α).submatrix : (l → m) → (o → n) → Matrix l o α) = r • A.submatrix := rfl theorem submatrix_map (f : α → β) (e₁ : l → m) (e₂ : o → n) (A : Matrix m n α) : (A.map f).submatrix e₁ e₂ = (A.submatrix e₁ e₂).map f := rfl /-- The natural map that reindexes a matrix's rows and columns with equivalent types is an equivalence. -/ def reindex (eₘ : m ≃ l) (eₙ : n ≃ o) : Matrix m n α ≃ Matrix l o α where toFun M := M.submatrix eₘ.symm eₙ.symm invFun M := M.submatrix eₘ eₙ left_inv M := by simp right_inv M := by simp @[simp] theorem reindex_apply (eₘ : m ≃ l) (eₙ : n ≃ o) (M : Matrix m n α) : reindex eₘ eₙ M = M.submatrix eₘ.symm eₙ.symm := rfl theorem reindex_refl_refl (A : Matrix m n α) : reindex (Equiv.refl _) (Equiv.refl _) A = A := A.submatrix_id_id @[simp] theorem reindex_symm (eₘ : m ≃ l) (eₙ : n ≃ o) : (reindex eₘ eₙ).symm = (reindex eₘ.symm eₙ.symm : Matrix l o α ≃ _) := rfl @[simp] theorem reindex_trans {l₂ o₂ : Type*} (eₘ : m ≃ l) (eₙ : n ≃ o) (eₘ₂ : l ≃ l₂) (eₙ₂ : o ≃ o₂) : (reindex eₘ eₙ).trans (reindex eₘ₂ eₙ₂) = (reindex (eₘ.trans eₘ₂) (eₙ.trans eₙ₂) : Matrix m n α ≃ _) := Equiv.ext fun A => (A.submatrix_submatrix eₘ.symm eₙ.symm eₘ₂.symm eₙ₂.symm :) theorem transpose_reindex (eₘ : m ≃ l) (eₙ : n ≃ o) (M : Matrix m n α) : (reindex eₘ eₙ M)ᵀ = reindex eₙ eₘ Mᵀ := rfl /-- The left `n × l` part of an `n × (l+r)` matrix. -/ abbrev subLeft {m l r : Nat} (A : Matrix (Fin m) (Fin (l + r)) α) : Matrix (Fin m) (Fin l) α := submatrix A id (Fin.castAdd r) /-- The right `n × r` part of an `n × (l+r)` matrix. -/ abbrev subRight {m l r : Nat} (A : Matrix (Fin m) (Fin (l + r)) α) : Matrix (Fin m) (Fin r) α := submatrix A id (Fin.natAdd l) /-- The top `u × n` part of a `(u+d) × n` matrix. -/ abbrev subUp {d u n : Nat} (A : Matrix (Fin (u + d)) (Fin n) α) : Matrix (Fin u) (Fin n) α := submatrix A (Fin.castAdd d) id /-- The bottom `d × n` part of a `(u+d) × n` matrix. -/ abbrev subDown {d u n : Nat} (A : Matrix (Fin (u + d)) (Fin n) α) : Matrix (Fin d) (Fin n) α := submatrix A (Fin.natAdd u) id /-- The top-right `u × r` part of a `(u+d) × (l+r)` matrix. -/ abbrev subUpRight {d u l r : Nat} (A : Matrix (Fin (u + d)) (Fin (l + r)) α) : Matrix (Fin u) (Fin r) α := subUp (subRight A) /-- The bottom-right `d × r` part of a `(u+d) × (l+r)` matrix. -/ abbrev subDownRight {d u l r : Nat} (A : Matrix (Fin (u + d)) (Fin (l + r)) α) : Matrix (Fin d) (Fin r) α := subDown (subRight A) /-- The top-left `u × l` part of a `(u+d) × (l+r)` matrix. -/ abbrev subUpLeft {d u l r : Nat} (A : Matrix (Fin (u + d)) (Fin (l + r)) α) : Matrix (Fin u) (Fin l) α := subUp (subLeft A) /-- The bottom-left `d × l` part of a `(u+d) × (l+r)` matrix. -/ abbrev subDownLeft {d u l r : Nat} (A : Matrix (Fin (u + d)) (Fin (l + r)) α) : Matrix (Fin d) (Fin l) α := subDown (subLeft A) section RowCol /-- For an `m × n` `α`-matrix `A`, `A.row i` is the `i`th row of `A` as a vector in `n → α`. `A.row` is defeq to `A`, but explicitly refers to the 'row function' of `A` while avoiding defeq abuse and noisy eta-expansions, such as in expressions like `Set.Injective A.row` and `Set.range A.row`. (Note 2025-04-07 : the identifier `Matrix.row` used to refer to a matrix with all rows equal; this is now called `Matrix.replicateRow`) -/ def row (A : Matrix m n α) : m → n → α := A /-- For an `m × n` `α`-matrix `A`, `A.col j` is the `j`th column of `A` as a vector in `m → α`. `A.col` is defeq to `Aᵀ`, but refers to the 'column function' of `A` while avoiding defeq abuse and noisy eta-expansions (and without the simplifier unfolding transposes) in expressions like `Set.Injective A.col` and `Set.range A.col`. (Note 2025-04-07 : the identifier `Matrix.col` used to refer to a matrix with all columns equal; this is now called `Matrix.replicateCol`) -/ def col (A : Matrix m n α) : n → m → α := Aᵀ lemma row_eq_self (A : Matrix m n α) : A.row = of.symm A := rfl lemma col_eq_transpose (A : Matrix m n α) : A.col = of.symm Aᵀ := rfl @[simp] lemma of_row (f : m → n → α) : (Matrix.of f).row = f := rfl @[simp] lemma of_col (f : m → n → α) : (Matrix.of f)ᵀ.col = f := rfl lemma row_def (A : Matrix m n α) : A.row = fun i ↦ A i := rfl lemma col_def (A : Matrix m n α) : A.col = fun j ↦ Aᵀ j := rfl @[simp] lemma row_apply (A : Matrix m n α) (i : m) (j : n) : A.row i j = A i j := rfl /-- A partially applied version of `Matrix.row_apply` -/ lemma row_apply' (A : Matrix m n α) (i : m) : A.row i = A i := rfl @[simp] lemma col_apply (A : Matrix m n α) (i : n) (j : m) : A.col i j = A j i := rfl /-- A partially applied version of `Matrix.col_apply` -/ lemma col_apply' (A : Matrix m n α) (i : n) : A.col i = fun j ↦ A j i := rfl section /-- Two matrices agree if their rows agree. -/ @[local ext] lemma ext_row {A B : Matrix m n α} (h : ∀ i, A.row i = B.row i) : A = B := ext fun i j => congr_fun (h i) j /-- Two matrices agree if their columns agree. -/ @[local ext] lemma ext_col {A B : Matrix m n α} (h : ∀ j, A.col j = B.col j) : A = B := ext fun i j => congr_fun (h j) i end lemma row_submatrix {m₀ n₀ : Type*} (A : Matrix m n α) (r : m₀ → m) (c : n₀ → n) (i : m₀) : (A.submatrix r c).row i = (A.submatrix id c).row (r i) := rfl lemma row_submatrix_eq_comp {m₀ n₀ : Type*} (A : Matrix m n α) (r : m₀ → m) (c : n₀ → n) (i : m₀) : (A.submatrix r c).row i = A.row (r i) ∘ c := rfl lemma col_submatrix {m₀ n₀ : Type*} (A : Matrix m n α) (r : m₀ → m) (c : n₀ → n) (j : n₀) : (A.submatrix r c).col j = (A.submatrix r id).col (c j) := rfl lemma col_submatrix_eq_comp {m₀ n₀ : Type*} (A : Matrix m n α) (r : m₀ → m) (c : n₀ → n) (j : n₀) : (A.submatrix r c).col j = A.col (c j) ∘ r := rfl lemma row_map (A : Matrix m n α) (f : α → β) (i : m) : (A.map f).row i = f ∘ A.row i := rfl lemma col_map (A : Matrix m n α) (f : α → β) (j : n) : (A.map f).col j = f ∘ A.col j := rfl @[simp] lemma row_transpose (A : Matrix m n α) : Aᵀ.row = A.col := rfl @[simp] lemma col_transpose (A : Matrix m n α) : Aᵀ.col = A.row := rfl end RowCol end Matrix namespace Set /-- Given a set `S`, `S.matrix` is the set of matrices `M` all of whose entries `M i j` belong to `S`. -/ def matrix (S : Set α) : Set (Matrix m n α) := {M | ∀ i j, M i j ∈ S} theorem mem_matrix {S : Set α} {M : Matrix m n α} : M ∈ S.matrix ↔ ∀ i j, M i j ∈ S := .rfl theorem matrix_eq_pi {S : Set α} : S.matrix = of.symm ⁻¹' Set.univ.pi fun (_ : m) ↦ Set.univ.pi fun (_ : n) ↦ S := by ext simp [Set.mem_matrix] end Set namespace Matrix variable {S : Set α} @[simp] theorem transpose_mem_matrix_iff {M : Matrix m n α} : Mᵀ ∈ S.matrix ↔ M ∈ S.matrix := forall_comm theorem submatrix_mem_matrix {M : Matrix m n α} {r : l → m} {c : o → n} (hM : M ∈ S.matrix) : M.submatrix r c ∈ S.matrix := by simp_all [Set.mem_matrix] theorem submatrix_mem_matrix_iff {M : Matrix m n α} {r : l → m} {c : o → n} (hr : Function.Surjective r) (hc : Function.Surjective c) : M.submatrix r c ∈ S.matrix ↔ M ∈ S.matrix := ⟨(hr.forall.mpr fun _ => hc.forall.mpr fun _ => · _ _), submatrix_mem_matrix⟩ end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Trace.lean
import Mathlib.Data.Matrix.Basis import Mathlib.Data.Matrix.Block import Mathlib.LinearAlgebra.Matrix.Notation import Mathlib.LinearAlgebra.Matrix.RowCol /-! # Trace of a matrix This file defines the trace of a matrix, the map sending a matrix to the sum of its diagonal entries. See also `LinearAlgebra.Trace` for the trace of an endomorphism. ## Tags matrix, trace, diagonal -/ open Matrix namespace Matrix variable {ι m n p : Type*} {α R S : Type*} variable [Fintype m] [Fintype n] [Fintype p] section AddCommMonoid variable [AddCommMonoid R] /-- The trace of a square matrix. For more bundled versions, see: * `Matrix.traceAddMonoidHom` * `Matrix.traceLinearMap` -/ def trace (A : Matrix n n R) : R := ∑ i, diag A i @[simp] lemma trace_diagonal {o} [Fintype o] [DecidableEq o] (d : o → R) : trace (diagonal d) = ∑ i, d i := by simp only [trace, diag_apply, diagonal_apply_eq] variable (n R) @[simp] theorem trace_zero : trace (0 : Matrix n n R) = 0 := (Finset.sum_const (0 : R)).trans <| smul_zero _ variable {n R} @[simp] lemma trace_eq_zero_of_isEmpty [IsEmpty n] (A : Matrix n n R) : trace A = 0 := by simp [trace] @[simp] theorem trace_add (A B : Matrix n n R) : trace (A + B) = trace A + trace B := Finset.sum_add_distrib @[simp] theorem trace_smul [DistribSMul α R] (r : α) (A : Matrix n n R) : trace (r • A) = r • trace A := Finset.smul_sum.symm @[simp] theorem trace_transpose (A : Matrix n n R) : trace Aᵀ = trace A := rfl @[simp] theorem trace_conjTranspose [StarAddMonoid R] (A : Matrix n n R) : trace Aᴴ = star (trace A) := (star_sum _ _).symm variable (n α R) /-- `Matrix.trace` as an `AddMonoidHom` -/ @[simps] def traceAddMonoidHom : Matrix n n R →+ R where toFun := trace map_zero' := trace_zero n R map_add' := trace_add /-- `Matrix.trace` as a `LinearMap` -/ @[simps] def traceLinearMap [Semiring α] [Module α R] : Matrix n n R →ₗ[α] R where toFun := trace map_add' := trace_add map_smul' := trace_smul variable {n α R} @[simp] theorem trace_list_sum (l : List (Matrix n n R)) : trace l.sum = (l.map trace).sum := map_list_sum (traceAddMonoidHom n R) l @[simp] theorem trace_multiset_sum (s : Multiset (Matrix n n R)) : trace s.sum = (s.map trace).sum := map_multiset_sum (traceAddMonoidHom n R) s @[simp] theorem trace_sum (s : Finset ι) (f : ι → Matrix n n R) : trace (∑ i ∈ s, f i) = ∑ i ∈ s, trace (f i) := map_sum (traceAddMonoidHom n R) f s theorem _root_.AddMonoidHom.map_trace [AddCommMonoid S] {F : Type*} [FunLike F R S] [AddMonoidHomClass F R S] (f : F) (A : Matrix n n R) : f (trace A) = trace (A.map f) := map_sum f (fun i => diag A i) Finset.univ lemma trace_blockDiagonal [DecidableEq p] (M : p → Matrix n n R) : trace (blockDiagonal M) = ∑ i, trace (M i) := by simp [blockDiagonal, trace, Finset.sum_comm (γ := n), Fintype.sum_prod_type] lemma trace_blockDiagonal' [DecidableEq p] {m : p → Type*} [∀ i, Fintype (m i)] (M : ∀ i, Matrix (m i) (m i) R) : trace (blockDiagonal' M) = ∑ i, trace (M i) := by simp [blockDiagonal', trace, Finset.sum_sigma'] end AddCommMonoid section AddCommGroup variable [AddCommGroup R] @[simp] theorem trace_sub (A B : Matrix n n R) : trace (A - B) = trace A - trace B := Finset.sum_sub_distrib .. @[simp] theorem trace_neg (A : Matrix n n R) : trace (-A) = -trace A := Finset.sum_neg_distrib .. end AddCommGroup section One variable [DecidableEq n] [AddCommMonoidWithOne R] @[simp] theorem trace_one : trace (1 : Matrix n n R) = Fintype.card n := by simp_rw [trace, diag_one, Pi.one_def, Finset.sum_const, nsmul_one, Finset.card_univ] end One section Mul @[simp] theorem trace_transpose_mul [AddCommMonoid R] [Mul R] (A : Matrix m n R) (B : Matrix n m R) : trace (Aᵀ * Bᵀ) = trace (A * B) := Finset.sum_comm theorem trace_mul_comm [AddCommMonoid R] [CommMagma R] (A : Matrix m n R) (B : Matrix n m R) : trace (A * B) = trace (B * A) := by rw [← trace_transpose, ← trace_transpose_mul, transpose_mul] theorem trace_mul_cycle [NonUnitalCommSemiring R] (A : Matrix m n R) (B : Matrix n p R) (C : Matrix p m R) : trace (A * B * C) = trace (C * A * B) := by rw [trace_mul_comm, Matrix.mul_assoc] theorem trace_mul_cycle' [NonUnitalCommSemiring R] (A : Matrix m n R) (B : Matrix n p R) (C : Matrix p m R) : trace (A * (B * C)) = trace (C * (A * B)) := by rw [← Matrix.mul_assoc, trace_mul_comm] @[simp] theorem trace_replicateCol_mul_replicateRow {ι : Type*} [Unique ι] [NonUnitalNonAssocSemiring R] (a b : n → R) : trace (replicateCol ι a * replicateRow ι b) = a ⬝ᵥ b := by apply Finset.sum_congr rfl simp [mul_apply] @[simp] theorem trace_vecMulVec [NonUnitalNonAssocSemiring R] (a b : n → R) : trace (vecMulVec a b) = a ⬝ᵥ b := by rw [vecMulVec_eq Unit, trace_replicateCol_mul_replicateRow] end Mul lemma trace_submatrix_succ {n : ℕ} [AddCommMonoid R] (M : Matrix (Fin n.succ) (Fin n.succ) R) : M 0 0 + trace (submatrix M Fin.succ Fin.succ) = trace M := by delta trace rw [← (finSuccEquiv n).symm.sum_comp] simp section CommSemiring variable [DecidableEq m] [CommSemiring R] -- TODO(https://github.com/leanprover-community/mathlib4/issues/6607): fix elaboration so that the ascription isn't needed theorem trace_units_conj (M : (Matrix m m R)ˣ) (N : Matrix m m R) : trace ((M : Matrix _ _ _) * N * (↑M⁻¹ : Matrix _ _ _)) = trace N := by rw [trace_mul_cycle, Units.inv_mul, one_mul] set_option linter.docPrime false in -- TODO(https://github.com/leanprover-community/mathlib4/issues/6607): fix elaboration so that the ascription isn't needed theorem trace_units_conj' (M : (Matrix m m R)ˣ) (N : Matrix m m R) : trace ((↑M⁻¹ : Matrix _ _ _) * N * (↑M : Matrix _ _ _)) = trace N := trace_units_conj M⁻¹ N end CommSemiring section Fin variable [AddCommMonoid R] /-! ### Special cases for `Fin n` for low values of `n` -/ @[simp] theorem trace_fin_zero (A : Matrix (Fin 0) (Fin 0) R) : trace A = 0 := rfl theorem trace_fin_one (A : Matrix (Fin 1) (Fin 1) R) : trace A = A 0 0 := add_zero _ theorem trace_fin_two (A : Matrix (Fin 2) (Fin 2) R) : trace A = A 0 0 + A 1 1 := congr_arg (_ + ·) (add_zero (A 1 1)) theorem trace_fin_three (A : Matrix (Fin 3) (Fin 3) R) : trace A = A 0 0 + A 1 1 + A 2 2 := by rw [← add_zero (A 2 2), add_assoc] rfl @[simp] theorem trace_fin_one_of (a : R) : trace !![a] = a := trace_fin_one _ @[simp] theorem trace_fin_two_of (a b c d : R) : trace !![a, b; c, d] = a + d := trace_fin_two _ @[simp] theorem trace_fin_three_of (a b c d e f g h i : R) : trace !![a, b, c; d, e, f; g, h, i] = a + e + i := trace_fin_three _ end Fin section single variable {l m n : Type*} {R α : Type*} [DecidableEq l] [DecidableEq m] [DecidableEq n] variable [Fintype n] [AddCommMonoid α] (i j : n) (c : α) @[simp] theorem trace_single_eq_of_ne (h : i ≠ j) : trace (single i j c) = 0 := by simp [trace, h] @[deprecated (since := "2025-05-05")] alias StdBasisMatrix.trace_zero := trace_single_eq_of_ne @[simp] theorem trace_single_eq_same : trace (single i i c) = c := by simp [trace] @[deprecated (since := "2025-05-05")] alias StdBasisMatrix.trace_eq := trace_single_eq_same theorem trace_single_mul [NonUnitalNonAssocSemiring R] [Fintype m] (i : n) (j : m) (a : R) (x : Matrix m n R) : (single i j a * x).trace = a • x j i := by simp [trace, mul_apply, single, ite_and] theorem trace_mul_single [NonUnitalNonAssocSemiring R] [Fintype m] (x : Matrix m n R) (i : n) (j : m) (a : R) : (x * single i j a).trace = MulOpposite.op a • x j i := by simp [trace, mul_apply, single, ite_and] end single /-- Matrices `A` and `B` are equal iff `(x * A).trace = (x * B).trace` for all `x`. -/ theorem ext_iff_trace_mul_left [NonAssocSemiring R] {A B : Matrix m n R} : A = B ↔ ∀ x, (x * A).trace = (x * B).trace := by refine ⟨fun h x => h ▸ rfl, fun h => ?_⟩ ext i j classical simpa [trace_single_mul] using h (single j i (1 : R)) /-- Matrices `A` and `B` are equal iff `(A * x).trace = (B * x).trace` for all `x`. -/ theorem ext_iff_trace_mul_right [NonAssocSemiring R] {A B : Matrix m n R} : A = B ↔ ∀ x, (A * x).trace = (B * x).trace := by refine ⟨fun h x => h ▸ rfl, fun h => ?_⟩ ext i j classical simpa [trace_mul_single] using h (single j i (1 : R)) end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Block.lean
import Mathlib.LinearAlgebra.Matrix.Transvection import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.Tactic.FinCases /-! # Block matrices and their determinant This file defines a predicate `Matrix.BlockTriangular` saying a matrix is block triangular, and proves the value of the determinant for various matrices built out of blocks. ## Main definitions * `Matrix.BlockTriangular` expresses that an `o` by `o` matrix is block triangular, if the rows and columns are ordered according to some order `b : o → α` ## Main results * `Matrix.det_of_blockTriangular`: the determinant of a block triangular matrix is equal to the product of the determinants of all the blocks * `Matrix.det_of_upperTriangular` and `Matrix.det_of_lowerTriangular`: the determinant of a triangular matrix is the product of the entries along the diagonal ## Tags matrix, diagonal, det, block triangular -/ open Finset Function OrderDual open Matrix universe v variable {α β m n o : Type*} {m' n' : α → Type*} variable {R : Type v} {M N : Matrix m m R} {b : m → α} namespace Matrix section LT variable [LT α] section Zero variable [Zero R] /-- Let `b` map rows and columns of a square matrix `M` to blocks indexed by `α`s. Then `BlockTriangular M n b` says the matrix is block triangular. -/ def BlockTriangular (M : Matrix m m R) (b : m → α) : Prop := ∀ ⦃i j⦄, b j < b i → M i j = 0 @[simp] protected theorem BlockTriangular.submatrix {f : n → m} (h : M.BlockTriangular b) : (M.submatrix f f).BlockTriangular (b ∘ f) := fun _ _ hij => h hij theorem blockTriangular_reindex_iff {b : n → α} {e : m ≃ n} : (reindex e e M).BlockTriangular b ↔ M.BlockTriangular (b ∘ e) := by refine ⟨fun h => ?_, fun h => ?_⟩ · convert h.submatrix simp only [reindex_apply, submatrix_submatrix, submatrix_id_id, Equiv.symm_comp_self] · convert h.submatrix simp only [comp_assoc b e e.symm, Equiv.self_comp_symm, comp_id] protected theorem BlockTriangular.transpose : M.BlockTriangular b → Mᵀ.BlockTriangular (toDual ∘ b) := swap @[simp] protected theorem blockTriangular_transpose_iff {b : m → αᵒᵈ} : Mᵀ.BlockTriangular b ↔ M.BlockTriangular (ofDual ∘ b) := forall_swap @[simp] theorem blockTriangular_zero : BlockTriangular (0 : Matrix m m R) b := fun _ _ _ => rfl end Zero protected theorem BlockTriangular.neg [NegZeroClass R] {M : Matrix m m R} (hM : BlockTriangular M b) : BlockTriangular (-M) b := fun _ _ h => by rw [neg_apply, hM h, neg_zero] theorem BlockTriangular.add [AddZeroClass R] (hM : BlockTriangular M b) (hN : BlockTriangular N b) : BlockTriangular (M + N) b := fun i j h => by simp_rw [Matrix.add_apply, hM h, hN h, zero_add] theorem BlockTriangular.sub [SubNegZeroMonoid R] (hM : BlockTriangular M b) (hN : BlockTriangular N b) : BlockTriangular (M - N) b := fun i j h => by simp_rw [Matrix.sub_apply, hM h, hN h, sub_zero] lemma BlockTriangular.add_iff_right [AddGroup R] (hM : BlockTriangular M b) : BlockTriangular (M + N) b ↔ BlockTriangular N b := ⟨(by simpa using hM.neg.add ·), hM.add⟩ lemma BlockTriangular.add_iff_left [AddGroup R] (hN : BlockTriangular N b) : BlockTriangular (M + N) b ↔ BlockTriangular M b := ⟨(by simpa using ·.sub hN), (·.add hN)⟩ lemma BlockTriangular.sub_iff_right [AddGroup R] (hM : BlockTriangular M b) : BlockTriangular (M - N) b ↔ BlockTriangular N b := ⟨(by simpa using ·.neg.add hM), hM.sub⟩ lemma BlockTriangular.sub_iff_left [AddGroup R] (hN : BlockTriangular N b) : BlockTriangular (M - N) b ↔ BlockTriangular M b := ⟨(by simpa using ·.add hN), (·.sub hN)⟩ lemma BlockTriangular.map {S F} [FunLike F R S] [Zero R] [Zero S] [ZeroHomClass F R S] (f : F) (h : BlockTriangular M b) : BlockTriangular (M.map f) b := fun i j lt ↦ by simp [h lt] lemma BlockTriangular.comp [Zero R] {M : Matrix m m (Matrix n n R)} (h : BlockTriangular M b) : BlockTriangular (M.comp m m n n R) fun i ↦ b i.1 := fun i j lt ↦ by simp [h lt] end LT section Preorder variable [Preorder α] section Zero variable [Zero R] theorem blockTriangular_diagonal [DecidableEq m] (d : m → R) : BlockTriangular (diagonal d) b := fun _ _ h => diagonal_apply_ne' d fun h' => ne_of_lt h (congr_arg _ h') theorem blockTriangular_blockDiagonal' [DecidableEq α] (d : ∀ i : α, Matrix (m' i) (m' i) R) : BlockTriangular (blockDiagonal' d) Sigma.fst := by rintro ⟨i, i'⟩ ⟨j, j'⟩ h apply blockDiagonal'_apply_ne d i' j' fun h' => ne_of_lt h h'.symm theorem blockTriangular_blockDiagonal [DecidableEq α] (d : α → Matrix m m R) : BlockTriangular (blockDiagonal d) Prod.snd := by rintro ⟨i, i'⟩ ⟨j, j'⟩ h rw [blockDiagonal'_eq_blockDiagonal, blockTriangular_blockDiagonal'] exact h variable [DecidableEq m] theorem blockTriangular_one [One R] : BlockTriangular (1 : Matrix m m R) b := blockTriangular_diagonal _ theorem blockTriangular_single {i j : m} (hij : b i ≤ b j) (c : R) : BlockTriangular (single i j c) b := by intro r s hrs apply single_apply_of_ne rintro ⟨rfl, rfl⟩ exact (hij.trans_lt hrs).false @[deprecated (since := "2025-05-05")] alias blockTriangular_stdBasisMatrix := blockTriangular_single theorem blockTriangular_single' {i j : m} (hij : b j ≤ b i) (c : R) : BlockTriangular (single i j c) (toDual ∘ b) := blockTriangular_single (by exact toDual_le_toDual.mpr hij) _ @[deprecated (since := "2025-05-05")] alias blockTriangular_stdBasisMatrix' := blockTriangular_single' end Zero variable [CommRing R] [DecidableEq m] theorem blockTriangular_transvection {i j : m} (hij : b i ≤ b j) (c : R) : BlockTriangular (transvection i j c) b := blockTriangular_one.add (blockTriangular_single hij c) theorem blockTriangular_transvection' {i j : m} (hij : b j ≤ b i) (c : R) : BlockTriangular (transvection i j c) (OrderDual.toDual ∘ b) := blockTriangular_one.add (blockTriangular_single' hij c) end Preorder section LinearOrder variable [LinearOrder α] theorem BlockTriangular.mul [Fintype m] [NonUnitalNonAssocSemiring R] {M N : Matrix m m R} (hM : BlockTriangular M b) (hN : BlockTriangular N b) : BlockTriangular (M * N) b := by intro i j hij apply Finset.sum_eq_zero intro k _ by_cases! hki : b k < b i · simp_rw [hM hki, zero_mul] · simp_rw [hN (lt_of_lt_of_le hij hki), mul_zero] end LinearOrder theorem upper_two_blockTriangular [Zero R] [Preorder α] (A : Matrix m m R) (B : Matrix m n R) (D : Matrix n n R) {a b : α} (hab : a < b) : BlockTriangular (fromBlocks A B 0 D) (Sum.elim (fun _ => a) fun _ => b) := by rintro (c | c) (d | d) hcd <;> first | simp [hab.not_gt] at hcd ⊢ /-! ### Determinant -/ variable [CommRing R] [DecidableEq m] [Fintype m] [DecidableEq n] [Fintype n] theorem equiv_block_det (M : Matrix m m R) {p q : m → Prop} [DecidablePred p] [DecidablePred q] (e : ∀ x, q x ↔ p x) : (toSquareBlockProp M p).det = (toSquareBlockProp M q).det := by convert Matrix.det_reindex_self (Equiv.subtypeEquivRight e) (toSquareBlockProp M q) -- Removed `@[simp]` attribute, -- as the LHS simplifies already to `M.toSquareBlock id i ⟨i, ⋯⟩ ⟨i, ⋯⟩` theorem det_toSquareBlock_id (M : Matrix m m R) (i : m) : (M.toSquareBlock id i).det = M i i := letI : Unique { a // id a = i } := ⟨⟨⟨i, rfl⟩⟩, fun j => Subtype.ext j.property⟩ (det_unique _).trans rfl theorem det_toBlock (M : Matrix m m R) (p : m → Prop) [DecidablePred p] : M.det = (fromBlocks (toBlock M p p) (toBlock M p fun j => ¬p j) (toBlock M (fun j => ¬p j) p) <| toBlock M (fun j => ¬p j) fun j => ¬p j).det := by rw [← Matrix.det_reindex_self (Equiv.sumCompl p).symm M] rw [det_apply', det_apply'] congr; ext σ; congr; ext x generalize hy : σ x = y cases x <;> cases y <;> simp only [Matrix.reindex_apply, toBlock_apply, Equiv.symm_symm, Equiv.sumCompl_apply_inr, Equiv.sumCompl_apply_inl, fromBlocks_apply₁₁, fromBlocks_apply₁₂, fromBlocks_apply₂₁, fromBlocks_apply₂₂, Matrix.submatrix_apply] theorem twoBlockTriangular_det (M : Matrix m m R) (p : m → Prop) [DecidablePred p] (h : ∀ i, ¬p i → ∀ j, p j → M i j = 0) : M.det = (toSquareBlockProp M p).det * (toSquareBlockProp M fun i => ¬p i).det := by rw [det_toBlock M p] convert det_fromBlocks_zero₂₁ (toBlock M p p) (toBlock M p fun j => ¬p j) (toBlock M (fun j => ¬p j) fun j => ¬p j) ext i j exact h (↑i) i.2 (↑j) j.2 theorem twoBlockTriangular_det' (M : Matrix m m R) (p : m → Prop) [DecidablePred p] (h : ∀ i, p i → ∀ j, ¬p j → M i j = 0) : M.det = (toSquareBlockProp M p).det * (toSquareBlockProp M fun i => ¬p i).det := by rw [M.twoBlockTriangular_det fun i => ¬p i, mul_comm] · congr 1 exact equiv_block_det _ fun _ => not_not.symm · simpa only [Classical.not_not] using h protected theorem BlockTriangular.det [DecidableEq α] [LinearOrder α] (hM : BlockTriangular M b) : M.det = ∏ a ∈ univ.image b, (M.toSquareBlock b a).det := by suffices ∀ hs : Finset α, univ.image b = hs → M.det = ∏ a ∈ hs, (M.toSquareBlock b a).det by exact this _ rfl intro s hs induction s using Finset.eraseInduction generalizing m with | H s ih => subst hs cases isEmpty_or_nonempty m · simp let k := (univ.image b).max' (univ_nonempty.image _) rw [twoBlockTriangular_det' M fun i => b i = k] · have : univ.image b = insert k ((univ.image b).erase k) := by rw [insert_erase] apply max'_mem rw [this, prod_insert (notMem_erase _ _)] refine congr_arg _ ?_ let b' := fun i : { a // b a ≠ k } => b ↑i have h' : BlockTriangular (M.toSquareBlockProp fun i => b i ≠ k) b' := hM.submatrix have hb' : image b' univ = (image b univ).erase k := by convert image_subtype_ne_univ_eq_image_erase k b rw [ih _ (max'_mem _ _) h' hb'] refine Finset.prod_congr rfl fun l hl => ?_ let he : { a // b' a = l } ≃ { a // b a = l } := haveI hc : ∀ i, b i = l → b i ≠ k := fun i hi => ne_of_eq_of_ne hi (ne_of_mem_erase hl) Equiv.subtypeSubtypeEquivSubtype @(hc) simp only [toSquareBlock_def] erw [← Matrix.det_reindex_self he.symm fun i j : { a // b a = l } => M ↑i ↑j] rfl · intro i hi j hj apply hM rw [hi] apply lt_of_le_of_ne _ hj exact Finset.le_max' (univ.image b) _ (mem_image_of_mem _ (mem_univ _)) theorem BlockTriangular.det_fintype [DecidableEq α] [Fintype α] [LinearOrder α] (h : BlockTriangular M b) : M.det = ∏ k : α, (M.toSquareBlock b k).det := by refine h.det.trans (prod_subset (subset_univ _) fun a _ ha => ?_) have : IsEmpty { i // b i = a } := ⟨fun i => ha <| mem_image.2 ⟨i, mem_univ _, i.2⟩⟩ exact det_isEmpty theorem det_of_upperTriangular [LinearOrder m] (h : M.BlockTriangular id) : M.det = ∏ i : m, M i i := by haveI : DecidableEq R := Classical.decEq _ simp_rw [h.det, image_id, det_toSquareBlock_id] theorem det_of_lowerTriangular [LinearOrder m] (M : Matrix m m R) (h : M.BlockTriangular toDual) : M.det = ∏ i : m, M i i := by rw [← det_transpose] exact det_of_upperTriangular h.transpose open Polynomial theorem matrixOfPolynomials_blockTriangular {R} [Semiring R] {n : ℕ} (p : Fin n → R[X]) (h_deg : ∀ i, (p i).natDegree ≤ i) : Matrix.BlockTriangular (Matrix.of (fun (i j : Fin n) => (p j).coeff i)) id := fun _ j h => by exact coeff_eq_zero_of_natDegree_lt <| Nat.lt_of_le_of_lt (h_deg j) h theorem det_matrixOfPolynomials {n : ℕ} (p : Fin n → R[X]) (h_deg : ∀ i, (p i).natDegree = i) (h_monic : ∀ i, Monic <| p i) : (Matrix.of (fun (i j : Fin n) => (p j).coeff i)).det = 1 := by rw [Matrix.det_of_upperTriangular (Matrix.matrixOfPolynomials_blockTriangular p (fun i ↦ Nat.le_of_eq (h_deg i)))] convert prod_const_one with x _ rw [Matrix.of_apply, ← h_deg, coeff_natDegree, (h_monic x).leadingCoeff] /-! ### Invertible -/ theorem BlockTriangular.toBlock_inverse_mul_toBlock_eq_one [LinearOrder α] [Invertible M] (hM : BlockTriangular M b) (k : α) : ((M⁻¹.toBlock (fun i => b i < k) fun i => b i < k) * M.toBlock (fun i => b i < k) fun i => b i < k) = 1 := by let p i := b i < k have h_sum : M⁻¹.toBlock p p * M.toBlock p p + (M⁻¹.toBlock p fun i => ¬p i) * M.toBlock (fun i => ¬p i) p = 1 := by rw [← toBlock_mul_eq_add, inv_mul_of_invertible M, toBlock_one_self] have h_zero : M.toBlock (fun i => ¬p i) p = 0 := by ext i j simpa using hM (lt_of_lt_of_le j.2 (le_of_not_gt i.2)) simpa [h_zero] using h_sum /-- The inverse of an upper-left subblock of a block-triangular matrix `M` is the upper-left subblock of `M⁻¹`. -/ theorem BlockTriangular.inv_toBlock [LinearOrder α] [Invertible M] (hM : BlockTriangular M b) (k : α) : (M.toBlock (fun i => b i < k) fun i => b i < k)⁻¹ = M⁻¹.toBlock (fun i => b i < k) fun i => b i < k := inv_eq_left_inv <| hM.toBlock_inverse_mul_toBlock_eq_one k /-- An upper-left subblock of an invertible block-triangular matrix is invertible. -/ def BlockTriangular.invertibleToBlock [LinearOrder α] [Invertible M] (hM : BlockTriangular M b) (k : α) : Invertible (M.toBlock (fun i => b i < k) fun i => b i < k) := invertibleOfLeftInverse _ ((⅟M).toBlock (fun i => b i < k) fun i => b i < k) <| by simpa only [invOf_eq_nonsing_inv] using hM.toBlock_inverse_mul_toBlock_eq_one k /-- A lower-left subblock of the inverse of a block-triangular matrix is zero. This is a first step towards `BlockTriangular.inv_toBlock` below. -/ theorem toBlock_inverse_eq_zero [LinearOrder α] [Invertible M] (hM : BlockTriangular M b) (k : α) : (M⁻¹.toBlock (fun i => k ≤ b i) fun i => b i < k) = 0 := by let p i := b i < k let q i := ¬b i < k have h_sum : M⁻¹.toBlock q p * M.toBlock p p + M⁻¹.toBlock q q * M.toBlock q p = 0 := by rw [← toBlock_mul_eq_add, inv_mul_of_invertible M, toBlock_one_disjoint] rw [disjoint_iff_inf_le] exact fun i h => h.1 h.2 have h_zero : M.toBlock q p = 0 := by ext i j simpa using hM (lt_of_lt_of_le j.2 <| le_of_not_gt i.2) have h_mul_eq_zero : M⁻¹.toBlock q p * M.toBlock p p = 0 := by simpa [h_zero] using h_sum haveI : Invertible (M.toBlock p p) := hM.invertibleToBlock k have : (fun i => k ≤ b i) = q := by ext exact not_lt.symm rw [this, ← Matrix.zero_mul (M.toBlock p p)⁻¹, ← h_mul_eq_zero, mul_inv_cancel_right_of_invertible] /-- The inverse of a block-triangular matrix is block-triangular. -/ theorem blockTriangular_inv_of_blockTriangular [LinearOrder α] [Invertible M] (hM : BlockTriangular M b) : BlockTriangular M⁻¹ b := by suffices ∀ hs : Finset α, univ.image b = hs → BlockTriangular M⁻¹ b by exact this _ rfl intro s hs induction s using Finset.strongInduction generalizing m with | H s ih => subst hs intro i j hij haveI : Inhabited m := ⟨i⟩ let k := (univ.image b).max' (univ_nonempty.image _) let b' := fun i : { a // b a < k } => b ↑i let A := M.toBlock (fun i => b i < k) fun j => b j < k obtain hbi | hi : b i = k ∨ _ := (le_max' _ (b i) <| mem_image_of_mem _ <| mem_univ _).eq_or_lt · have : M⁻¹.toBlock (fun i => k ≤ b i) (fun i => b i < k) ⟨i, hbi.ge⟩ ⟨j, hbi ▸ hij⟩ = 0 := by simp only [toBlock_inverse_eq_zero hM k, Matrix.zero_apply] simp [this.symm] haveI : Invertible A := hM.invertibleToBlock _ have hA : A.BlockTriangular b' := hM.submatrix have hb' : image b' univ ⊂ image b univ := by convert image_subtype_univ_ssubset_image_univ k b _ (fun a => a < k) (lt_irrefl _) convert max'_mem (α := α) _ _ have hij' : b' ⟨j, hij.trans hi⟩ < b' ⟨i, hi⟩ := by simp_rw [b', hij] simp [A, hM.inv_toBlock k, (ih (image b' univ) hb' hA rfl hij').symm] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/MvPolynomial.lean
import Mathlib.Algebra.MvPolynomial.Eval import Mathlib.Algebra.MvPolynomial.CommRing import Mathlib.LinearAlgebra.Matrix.Determinant.Basic /-! # Matrices of multivariate polynomials In this file, we prove results about matrices over an mv_polynomial ring. In particular, we provide `Matrix.mvPolynomialX` which associates every entry of a matrix with a unique variable. ## Tags matrix determinant, multivariate polynomial -/ variable {m n R S : Type*} namespace Matrix variable (m n R) /-- The matrix with variable `X (i,j)` at location `(i,j)`. -/ noncomputable def mvPolynomialX [CommSemiring R] : Matrix m n (MvPolynomial (m × n) R) := of fun i j => MvPolynomial.X (i, j) -- TODO: set as an equation lemma for `mv_polynomial_X`, see https://github.com/leanprover-community/mathlib4/pull/3024 @[simp] theorem mvPolynomialX_apply [CommSemiring R] (i j) : mvPolynomialX m n R i j = MvPolynomial.X (i, j) := rfl variable {m n R} /-- Any matrix `A` can be expressed as the evaluation of `Matrix.mvPolynomialX`. This is of particular use when `MvPolynomial (m × n) R` is an integral domain but `S` is not, as if the `MvPolynomial.eval₂` can be pulled to the outside of a goal, it can be solved in under cancellative assumptions. -/ theorem mvPolynomialX_map_eval₂ [CommSemiring R] [CommSemiring S] (f : R →+* S) (A : Matrix m n S) : (mvPolynomialX m n R).map (MvPolynomial.eval₂ f fun p : m × n => A p.1 p.2) = A := ext fun i j => MvPolynomial.eval₂_X _ (fun p : m × n => A p.1 p.2) (i, j) /-- A variant of `Matrix.mvPolynomialX_map_eval₂` with a bundled `RingHom` on the LHS. -/ theorem mvPolynomialX_mapMatrix_eval [Fintype m] [DecidableEq m] [CommSemiring R] (A : Matrix m m R) : (MvPolynomial.eval fun p : m × m => A p.1 p.2).mapMatrix (mvPolynomialX m m R) = A := mvPolynomialX_map_eval₂ _ A variable (R) /-- A variant of `Matrix.mvPolynomialX_map_eval₂` with a bundled `AlgHom` on the LHS. -/ theorem mvPolynomialX_mapMatrix_aeval [Fintype m] [DecidableEq m] [CommSemiring R] [CommSemiring S] [Algebra R S] (A : Matrix m m S) : (MvPolynomial.aeval fun p : m × m => A p.1 p.2).mapMatrix (mvPolynomialX m m R) = A := mvPolynomialX_map_eval₂ _ A variable (m) /-- In a nontrivial ring, `Matrix.mvPolynomialX m m R` has non-zero determinant. -/ theorem det_mvPolynomialX_ne_zero [DecidableEq m] [Fintype m] [CommRing R] [Nontrivial R] : det (mvPolynomialX m m R) ≠ 0 := by intro h_det have := congr_arg Matrix.det (mvPolynomialX_mapMatrix_eval (1 : Matrix m m R)) rw [det_one, ← RingHom.map_det, h_det, RingHom.map_zero] at this exact zero_ne_one this end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Vec.lean
import Mathlib.LinearAlgebra.Matrix.Hadamard import Mathlib.LinearAlgebra.Matrix.Kronecker import Mathlib.LinearAlgebra.Matrix.Trace /-! # Vectorization of matrices This file defines `Matrix.vec A`, the vectorization of a matrix `A`, formed by stacking the columns of A into a single large column vector. Since mathlib indices matrices by arbitrary types rather than `Fin n`, the result of `Matrix.vec` on `A : Matrix m n R` is indexed by `n × m`. The `Fin (n * m)` interpretation can be restored by composing with `finProdFinEquiv.symm`: ```lean -- ![1, 2, 3, 4] #eval vec !![1, 3; 2, 4] ∘ finProdFinEquiv.symm ``` While it may seem more natural to index by `m × n`, keeping the indices in the same order, this would amount to stacking the rows into one long row, and goes against the literature. If you want this function, you can write `Matrix.vec Aᵀ` instead. ### References * [Wikipedia](https://en.wikipedia.org/wiki/Vectorization_(mathematics)) -/ namespace Matrix variable {ι l m n p R S} /-- All the matrix entries, arranged into one column. -/ @[simp] def vec (A : Matrix m n R) : n × m → R := fun ij => A ij.2 ij.1 @[simp] theorem vec_of (f : m → n → R) : vec (of f) = Function.uncurry (flip f) := rfl theorem vec_transpose (A : Matrix m n R) : vec Aᵀ = vec A ∘ Prod.swap := rfl theorem vec_eq_uncurry (A : Matrix m n R) : vec A = Function.uncurry fun i j => A j i := rfl theorem vec_inj {A B : Matrix m n R} : A.vec = B.vec ↔ A = B := by simp_rw [← Matrix.ext_iff, funext_iff, Prod.forall, @forall_comm m n, vec] theorem vec_bijective : Function.Bijective (vec : Matrix m n R → _) := Equiv.curry _ _ _ |>.symm.bijective.comp Function.swap_bijective theorem vec_map (A : Matrix m n R) (f : R → S) : vec (A.map f) = f ∘ vec A := rfl @[simp] theorem vec_zero [Zero R] : vec (0 : Matrix m n R) = 0 := rfl @[simp] theorem vec_eq_zero_iff [Zero R] {A : Matrix m n R} : vec A = 0 ↔ A = 0 := vec_inj (B := 0) @[simp] theorem vec_add [Add R] (A B : Matrix m n R) : vec (A + B) = vec A + vec B := rfl theorem vec_neg [Neg R] (A : Matrix m n R) : vec (-A) = -vec A := rfl @[simp] theorem vec_sub [Sub R] (A B : Matrix m n R) : vec (A - B) = vec A - vec B := rfl @[simp] theorem vec_smul {α} [SMul α R] (r : α) (A : Matrix m n R) : vec (r • A) = r • vec A := rfl theorem vec_sum [AddCommMonoid R] (s : Finset ι) (A : ι → Matrix m n R) : vec (∑ i ∈ s, A i) = ∑ i ∈ s, vec (A i) := by ext simp_rw [vec, Finset.sum_apply, vec, Matrix.sum_apply] theorem vec_dotProduct_vec [AddCommMonoid R] [Mul R] [Fintype m] [Fintype n] (A B : Matrix m n R) : vec A ⬝ᵥ vec B = (Aᵀ * B).trace := by simp_rw [Matrix.trace, Matrix.diag, Matrix.mul_apply, dotProduct, vec, transpose_apply, ← Finset.univ_product_univ, Finset.sum_product] theorem star_vec [Star R] (x : Matrix m n R) : star x.vec = (x.map star).vec := rfl theorem star_vec_dotProduct_vec [AddCommMonoid R] [Mul R] [Star R] [Fintype m] [Fintype n] (A B : Matrix m n R) : star (vec A) ⬝ᵥ vec B = (Aᴴ * B).trace := by simp_rw [star_vec, vec_dotProduct_vec, ← conjTranspose_transpose, transpose_transpose] theorem vec_hadamard [Mul R] (A B : Matrix m n R) : vec (A ⊙ B) = vec A * vec B := rfl @[simp] theorem vec_single [DecidableEq m] [DecidableEq n] [Zero R] (i : m) (j : n) (r : R) : vec (Matrix.single i j r) = Pi.single (j, i) r := by rw [single_eq_of_single_single, vec_of, Function.uncurry_flip, Pi.uncurry_single_single] exact Pi.single_comp_equiv (Equiv.prodComm _ _) _ _ section Kronecker open scoped Kronecker section NonUnitalSemiring variable [NonUnitalSemiring R] [Fintype m] [Fintype n] /-- Technical lemma shared with `kronecker_mulVec_vec` and `vec_mul_eq_mulVec`. -/ theorem kronecker_mulVec_vec_of_commute (A : Matrix l m R) (X : Matrix m n R) (B : Matrix p n R) (hB : ∀ x i j, Commute x (B i j)) : (B ⊗ₖ A) *ᵥ vec X = vec (A * X * Bᵀ) := by ext ⟨k, l⟩ simp_rw [vec, mulVec, mul_apply, dotProduct, kroneckerMap_apply, Finset.sum_mul, transpose_apply, ← Finset.univ_product_univ, Finset.sum_product, (hB ..).right_comm, vec, (hB ..).eq] /-- Technical lemma shared with `vec_vecMul_kronecker` and `vec_mul_eq_vecMul`. -/ theorem vec_vecMul_kronecker_of_commute (A : Matrix m l R) (X : Matrix m n R) (B : Matrix n p R) (hA : ∀ x i j, Commute (A i j) x) : vec X ᵥ* (B ⊗ₖ A) = vec (Aᵀ * X * B) := by ext ⟨k, l⟩ simp_rw [vec, vecMul, mul_apply, dotProduct, kroneckerMap_apply, Finset.sum_mul, transpose_apply, ← Finset.univ_product_univ, Finset.sum_product, (hA ..).eq, (hA ..).right_comm, mul_assoc, vec] end NonUnitalSemiring section NonUnitalCommSemiring variable [NonUnitalCommSemiring R] [Fintype m] [Fintype n] theorem kronecker_mulVec_vec (A : Matrix l m R) (X : Matrix m n R) (B : Matrix p n R) : (B ⊗ₖ A) *ᵥ vec X = vec (A * X * Bᵀ) := kronecker_mulVec_vec_of_commute _ _ _ fun _ _ _ => Commute.all _ _ theorem vec_vecMul_kronecker (A : Matrix m l R) (X : Matrix m n R) (B : Matrix n p R) : vec X ᵥ* (B ⊗ₖ A) = vec (Aᵀ * X * B) := vec_vecMul_kronecker_of_commute _ _ _ fun _ _ _=> Commute.all _ _ end NonUnitalCommSemiring section Semiring variable [Semiring R] [Fintype m] [Fintype n] theorem vec_mul_eq_mulVec [DecidableEq n] (A : Matrix l m R) (B : Matrix m n R) : vec (A * B) = (1 ⊗ₖ A) *ᵥ vec B := by rw [kronecker_mulVec_vec_of_commute, transpose_one, Matrix.mul_one] intro x i j obtain rfl | hij := eq_or_ne i j <;> simp [*] theorem vec_mul_eq_vecMul [DecidableEq m] (A : Matrix m n R) (B : Matrix n p R) : vec (A * B) = A.vec ᵥ* (B ⊗ₖ 1) := by rw [vec_vecMul_kronecker_of_commute, transpose_one, Matrix.one_mul] intro x i j obtain rfl | hij := eq_or_ne i j <;> simp [*] end Semiring end Kronecker end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Transvection.lean
import Mathlib.Data.Matrix.Basis import Mathlib.Data.Matrix.DMatrix import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.LinearAlgebra.Matrix.Reindex import Mathlib.Tactic.Field /-! # Transvections Transvections are matrices of the form `1 + single i j c`, where `single i j c` is the basic matrix with a `c` at position `(i, j)`. Multiplying by such a transvection on the left (resp. on the right) amounts to adding `c` times the `j`-th row to the `i`-th row (resp `c` times the `i`-th column to the `j`-th column). Therefore, they are useful to present algorithms operating on rows and columns. Transvections are a special case of *elementary matrices* (according to most references, these also contain the matrices exchanging rows, and the matrices multiplying a row by a constant). We show that, over a field, any matrix can be written as `L * D * L'`, where `L` and `L'` are products of transvections and `D` is diagonal. In other words, one can reduce a matrix to diagonal form by operations on its rows and columns, a variant of Gauss' pivot algorithm. ## Main definitions and results * `transvection i j c` is the matrix equal to `1 + single i j c`. * `TransvectionStruct n R` is a structure containing the data of `i, j, c` and a proof that `i ≠ j`. These are often easier to manipulate than straight matrices, especially in inductive arguments. * `exists_list_transvec_mul_diagonal_mul_list_transvec` states that any matrix `M` over a field can be written in the form `t_1 * ... * t_k * D * t'_1 * ... * t'_l`, where `D` is diagonal and the `t_i`, `t'_j` are transvections. * `diagonal_transvection_induction` shows that a property which is true for diagonal matrices and transvections, and invariant under product, is true for all matrices. * `diagonal_transvection_induction_of_det_ne_zero` is the same statement over invertible matrices. ## Implementation details The proof of the reduction results is done inductively on the size of the matrices, reducing an `(r + 1) × (r + 1)` matrix to a matrix whose last row and column are zeroes, except possibly for the last diagonal entry. This step is done as follows. If all the coefficients on the last row and column are zero, there is nothing to do. Otherwise, one can put a nonzero coefficient in the last diagonal entry by a row or column operation, and then subtract this last diagonal entry from the other entries in the last row and column to make them vanish. This step is done in the type `Fin r ⊕ Unit`, where `Fin r` is useful to choose arbitrarily some order in which we cancel the coefficients, and the sum structure is useful to use the formalism of block matrices. To proceed with the induction, we reindex our matrices to reduce to the above situation. -/ universe u₁ u₂ namespace Matrix variable (n p : Type*) (R : Type u₂) {𝕜 : Type*} [Field 𝕜] variable [DecidableEq n] [DecidableEq p] variable [CommRing R] section Transvection variable {R n} (i j : n) /-- The transvection matrix `transvection i j c` is equal to the identity plus `c` at position `(i, j)`. Multiplying by it on the left (as in `transvection i j c * M`) corresponds to adding `c` times the `j`-th row of `M` to its `i`-th row. Multiplying by it on the right corresponds to adding `c` times the `i`-th column to the `j`-th column. -/ def transvection (c : R) : Matrix n n R := 1 + Matrix.single i j c @[simp] theorem transvection_zero : transvection i j (0 : R) = 1 := by simp [transvection] section /-- A transvection matrix is obtained from the identity by adding `c` times the `j`-th row to the `i`-th row. -/ theorem updateRow_eq_transvection [Finite n] (c : R) : updateRow (1 : Matrix n n R) i ((1 : Matrix n n R) i + c • (1 : Matrix n n R) j) = transvection i j c := by cases nonempty_fintype n ext a b by_cases ha : i = a · by_cases hb : j = b · simp only [ha, updateRow_self, Pi.add_apply, one_apply, Pi.smul_apply, hb, ↓reduceIte, smul_eq_mul, mul_one, transvection, add_apply, single_apply_same] · simp only [ha, updateRow_self, Pi.add_apply, one_apply, Pi.smul_apply, hb, ↓reduceIte, smul_eq_mul, mul_zero, add_zero, transvection, add_apply, and_false, not_false_eq_true, single_apply_of_ne] · simp only [updateRow_ne, transvection, ha, Ne.symm ha, single_apply_of_ne, add_zero, Ne, not_false_iff, false_and, add_apply] variable [Fintype n] theorem transvection_mul_transvection_same (h : i ≠ j) (c d : R) : transvection i j c * transvection i j d = transvection i j (c + d) := by simp [transvection, Matrix.add_mul, Matrix.mul_add, h.symm, add_assoc, single_add] @[simp] theorem transvection_mul_apply_same (b : n) (c : R) (M : Matrix n n R) : (transvection i j c * M) i b = M i b + c * M j b := by simp [transvection, Matrix.add_mul] @[simp] theorem mul_transvection_apply_same (a : n) (c : R) (M : Matrix n n R) : (M * transvection i j c) a j = M a j + c * M a i := by simp [transvection, Matrix.mul_add, mul_comm] @[simp] theorem transvection_mul_apply_of_ne (a b : n) (ha : a ≠ i) (c : R) (M : Matrix n n R) : (transvection i j c * M) a b = M a b := by simp [transvection, Matrix.add_mul, ha] @[simp] theorem mul_transvection_apply_of_ne (a b : n) (hb : b ≠ j) (c : R) (M : Matrix n n R) : (M * transvection i j c) a b = M a b := by simp [transvection, Matrix.mul_add, hb] @[simp] theorem det_transvection_of_ne (h : i ≠ j) (c : R) : det (transvection i j c) = 1 := by rw [← updateRow_eq_transvection i j, det_updateRow_add_smul_self _ h, det_one] end variable (R n) /-- A structure containing all the information from which one can build a nontrivial transvection. This structure is easier to manipulate than transvections as one has a direct access to all the relevant fields. -/ structure TransvectionStruct where (i j : n) hij : i ≠ j c : R instance [Nontrivial n] : Nonempty (TransvectionStruct n R) := by choose x y hxy using exists_pair_ne n exact ⟨⟨x, y, hxy, 0⟩⟩ namespace TransvectionStruct variable {R n} /-- Associating to a `transvection_struct` the corresponding transvection matrix. -/ def toMatrix (t : TransvectionStruct n R) : Matrix n n R := transvection t.i t.j t.c @[simp] theorem toMatrix_mk (i j : n) (hij : i ≠ j) (c : R) : TransvectionStruct.toMatrix ⟨i, j, hij, c⟩ = transvection i j c := rfl @[simp] protected theorem det [Fintype n] (t : TransvectionStruct n R) : det t.toMatrix = 1 := det_transvection_of_ne _ _ t.hij _ @[simp] theorem det_toMatrix_prod [Fintype n] (L : List (TransvectionStruct n 𝕜)) : det (L.map toMatrix).prod = 1 := by induction L with | nil => simp | cons _ _ IH => simp [IH] /-- The inverse of a `TransvectionStruct`, designed so that `t.inv.toMatrix` is the inverse of `t.toMatrix`. -/ @[simps] protected def inv (t : TransvectionStruct n R) : TransvectionStruct n R where i := t.i j := t.j hij := t.hij c := -t.c section variable [Fintype n] theorem inv_mul (t : TransvectionStruct n R) : t.inv.toMatrix * t.toMatrix = 1 := by rcases t with ⟨_, _, t_hij⟩ simp [toMatrix, transvection_mul_transvection_same, t_hij] theorem mul_inv (t : TransvectionStruct n R) : t.toMatrix * t.inv.toMatrix = 1 := by rcases t with ⟨_, _, t_hij⟩ simp [toMatrix, transvection_mul_transvection_same, t_hij] theorem reverse_inv_prod_mul_prod (L : List (TransvectionStruct n R)) : (L.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod * (L.map toMatrix).prod = 1 := by induction L with | nil => simp | cons t L IH => suffices (L.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod * (t.inv.toMatrix * t.toMatrix) * (L.map toMatrix).prod = 1 by simpa [Matrix.mul_assoc] simpa [inv_mul] using IH theorem prod_mul_reverse_inv_prod (L : List (TransvectionStruct n R)) : (L.map toMatrix).prod * (L.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod = 1 := by induction L with | nil => simp | cons t L IH => suffices t.toMatrix * ((L.map toMatrix).prod * (L.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod) * t.inv.toMatrix = 1 by simpa [Matrix.mul_assoc] simp_rw [IH, Matrix.mul_one, t.mul_inv] /-- `M` is a scalar matrix if it commutes with every nontrivial transvection (elementary matrix). -/ theorem _root_.Matrix.mem_range_scalar_of_commute_transvectionStruct {M : Matrix n n R} (hM : ∀ t : TransvectionStruct n R, Commute t.toMatrix M) : M ∈ Set.range (Matrix.scalar n) := by refine mem_range_scalar_of_commute_single ?_ intro i j hij simpa [transvection, mul_add, add_mul] using (hM ⟨i, j, hij, 1⟩).eq theorem _root_.Matrix.mem_range_scalar_iff_commute_transvectionStruct {M : Matrix n n R} : M ∈ Set.range (Matrix.scalar n) ↔ ∀ t : TransvectionStruct n R, Commute t.toMatrix M := by refine ⟨fun h t => ?_, mem_range_scalar_of_commute_transvectionStruct⟩ rw [mem_range_scalar_iff_commute_single] at h refine (Commute.one_left M).add_left ?_ convert (h _ _ t.hij).smul_left t.c using 1 rw [smul_single, smul_eq_mul, mul_one] end open Sum /-- Given a `TransvectionStruct` on `n`, define the corresponding `TransvectionStruct` on `n ⊕ p` using the identity on `p`. -/ def sumInl (t : TransvectionStruct n R) : TransvectionStruct (n ⊕ p) R where i := inl t.i j := inl t.j hij := by simp [t.hij] c := t.c theorem toMatrix_sumInl (t : TransvectionStruct n R) : (t.sumInl p).toMatrix = fromBlocks t.toMatrix 0 0 1 := by cases t ext a b rcases a with a | a <;> rcases b with b | b · by_cases h : a = b <;> simp [TransvectionStruct.sumInl, transvection, h, single] · simp [TransvectionStruct.sumInl, transvection] · simp [TransvectionStruct.sumInl, transvection] · by_cases h : a = b <;> simp [TransvectionStruct.sumInl, transvection, h] @[simp] theorem sumInl_toMatrix_prod_mul [Fintype n] [Fintype p] (M : Matrix n n R) (L : List (TransvectionStruct n R)) (N : Matrix p p R) : (L.map (toMatrix ∘ sumInl p)).prod * fromBlocks M 0 0 N = fromBlocks ((L.map toMatrix).prod * M) 0 0 N := by induction L with | nil => simp | cons t L IH => simp [Matrix.mul_assoc, IH, toMatrix_sumInl, fromBlocks_multiply] @[simp] theorem mul_sumInl_toMatrix_prod [Fintype n] [Fintype p] (M : Matrix n n R) (L : List (TransvectionStruct n R)) (N : Matrix p p R) : fromBlocks M 0 0 N * (L.map (toMatrix ∘ sumInl p)).prod = fromBlocks (M * (L.map toMatrix).prod) 0 0 N := by induction L generalizing M N with | nil => simp | cons t L IH => simp [IH, toMatrix_sumInl, fromBlocks_multiply] variable {p} /-- Given a `TransvectionStruct` on `n` and an equivalence between `n` and `p`, define the corresponding `TransvectionStruct` on `p`. -/ def reindexEquiv (e : n ≃ p) (t : TransvectionStruct n R) : TransvectionStruct p R where i := e t.i j := e t.j hij := by simp [t.hij] c := t.c variable [Fintype n] [Fintype p] theorem toMatrix_reindexEquiv (e : n ≃ p) (t : TransvectionStruct n R) : (t.reindexEquiv e).toMatrix = reindexAlgEquiv R _ e t.toMatrix := by rcases t with ⟨t_i, t_j, _⟩ ext a b simp only [reindexEquiv, transvection, toMatrix_mk, submatrix_apply, reindex_apply, reindexAlgEquiv_apply] by_cases ha : e t_i = a <;> by_cases hb : e t_j = b <;> by_cases hab : a = b <;> simp [ha, hb, hab, ← e.apply_eq_iff_eq_symm_apply, single] theorem toMatrix_reindexEquiv_prod (e : n ≃ p) (L : List (TransvectionStruct n R)) : (L.map (toMatrix ∘ reindexEquiv e)).prod = reindexAlgEquiv R _ e (L.map toMatrix).prod := by induction L with | nil => simp | cons t L IH => simp only [toMatrix_reindexEquiv, IH, Function.comp_apply, List.prod_cons, reindexAlgEquiv_apply, List.map] exact (reindexAlgEquiv_mul R _ _ _ _).symm end TransvectionStruct end Transvection /-! ### Reducing matrices by left and right multiplication by transvections In this section, we show that any matrix can be reduced to diagonal form by left and right multiplication by transvections (or, equivalently, by elementary operations on lines and columns). The main step is to kill the last row and column of a matrix in `Fin r ⊕ Unit` with nonzero last coefficient, by subtracting this coefficient from the other ones. The list of these operations is recorded in `list_transvec_col M` and `list_transvec_row M`. We have to analyze inductively how these operations affect the coefficients in the last row and the last column to conclude that they have the desired effect. Once this is done, one concludes the reduction by induction on the size of the matrices, through a suitable reindexing to identify any fintype with `Fin r ⊕ Unit`. -/ namespace Pivot variable {R} {r : ℕ} (M : Matrix (Fin r ⊕ Unit) (Fin r ⊕ Unit) 𝕜) open Unit Sum Fin TransvectionStruct /-- A list of transvections such that multiplying on the left with these transvections will replace the last column with zeroes. -/ def listTransvecCol : List (Matrix (Fin r ⊕ Unit) (Fin r ⊕ Unit) 𝕜) := List.ofFn fun i : Fin r => transvection (inl i) (inr unit) <| -M (inl i) (inr unit) / M (inr unit) (inr unit) /-- A list of transvections such that multiplying on the right with these transvections will replace the last row with zeroes. -/ def listTransvecRow : List (Matrix (Fin r ⊕ Unit) (Fin r ⊕ Unit) 𝕜) := List.ofFn fun i : Fin r => transvection (inr unit) (inl i) <| -M (inr unit) (inl i) / M (inr unit) (inr unit) @[simp] theorem length_listTransvecCol : (listTransvecCol M).length = r := by simp [listTransvecCol] theorem listTransvecCol_getElem {i : ℕ} (h : i < (listTransvecCol M).length) : (listTransvecCol M)[i] = letI i' : Fin r := ⟨i, length_listTransvecCol M ▸ h⟩ transvection (inl i') (inr unit) <| -M (inl i') (inr unit) / M (inr unit) (inr unit) := by simp [listTransvecCol] @[simp] theorem length_listTransvecRow : (listTransvecRow M).length = r := by simp [listTransvecRow] theorem listTransvecRow_getElem {i : ℕ} (h : i < (listTransvecRow M).length) : (listTransvecRow M)[i] = letI i' : Fin r := ⟨i, length_listTransvecRow M ▸ h⟩ transvection (inr unit) (inl i') <| -M (inr unit) (inl i') / M (inr unit) (inr unit) := by simp [listTransvecRow] /-- Multiplying by some of the matrices in `listTransvecCol M` does not change the last row. -/ theorem listTransvecCol_mul_last_row_drop (i : Fin r ⊕ Unit) {k : ℕ} (hk : k ≤ r) : (((listTransvecCol M).drop k).prod * M) (inr unit) i = M (inr unit) i := by induction hk using Nat.decreasingInduction with | of_succ n hn IH => have hn' : n < (listTransvecCol M).length := by simpa [listTransvecCol] using hn rw [List.drop_eq_getElem_cons hn'] simpa [listTransvecCol, Matrix.mul_assoc] | self => simp only [length_listTransvecCol, le_refl, List.drop_eq_nil_of_le, List.prod_nil, Matrix.one_mul] /-- Multiplying by all the matrices in `listTransvecCol M` does not change the last row. -/ theorem listTransvecCol_mul_last_row (i : Fin r ⊕ Unit) : ((listTransvecCol M).prod * M) (inr unit) i = M (inr unit) i := by simpa using listTransvecCol_mul_last_row_drop M i (zero_le _) /-- Multiplying by all the matrices in `listTransvecCol M` kills all the coefficients in the last column but the last one. -/ theorem listTransvecCol_mul_last_col (hM : M (inr unit) (inr unit) ≠ 0) (i : Fin r) : ((listTransvecCol M).prod * M) (inl i) (inr unit) = 0 := by suffices H : ∀ k : ℕ, k ≤ r → (((listTransvecCol M).drop k).prod * M) (inl i) (inr unit) = if k ≤ i then 0 else M (inl i) (inr unit) by simpa only [List.drop, _root_.zero_le, ite_true] using H 0 (zero_le _) intro k hk induction hk using Nat.decreasingInduction with | of_succ n hn IH => have hn' : n < (listTransvecCol M).length := by simpa [listTransvecCol] using hn let n' : Fin r := ⟨n, hn⟩ rw [List.drop_eq_getElem_cons hn'] have A : (listTransvecCol M)[n] = transvection (inl n') (inr unit) (-M (inl n') (inr unit) / M (inr unit) (inr unit)) := by simp [n', listTransvecCol] simp only [Matrix.mul_assoc, A, List.prod_cons] by_cases h : n' = i · have hni : n = i := by cases i simp only [n', Fin.mk_eq_mk] at h simp [h] simp only [h, transvection_mul_apply_same, IH, ← hni, add_le_iff_nonpos_right, listTransvecCol_mul_last_row_drop _ _ hn] simp [field] · have hni : n ≠ i := by rintro rfl cases i simp [n'] at h simp only [ne_eq, inl.injEq, Ne.symm h, not_false_eq_true, transvection_mul_apply_of_ne] rw [IH] rcases le_or_gt (n + 1) i with (hi | hi) · simp only [hi, n.le_succ.trans hi, if_true] · rw [if_neg, if_neg] · simpa only [hni.symm, not_le, or_false] using Nat.lt_succ_iff_lt_or_eq.1 hi · simpa only [not_le] using hi | self => simp only [length_listTransvecCol, le_refl, List.drop_eq_nil_of_le, List.prod_nil, Matrix.one_mul] rw [if_neg] simpa only [not_le] using i.2 /-- Multiplying by some of the matrices in `listTransvecRow M` does not change the last column. -/ theorem mul_listTransvecRow_last_col_take (i : Fin r ⊕ Unit) {k : ℕ} (hk : k ≤ r) : (M * ((listTransvecRow M).take k).prod) i (inr unit) = M i (inr unit) := by induction k with | zero => simp only [Matrix.mul_one, List.prod_nil, List.take, Matrix.mul_one] | succ k IH => have hkr : k < r := hk let k' : Fin r := ⟨k, hkr⟩ have : (listTransvecRow M)[k]? = ↑(transvection (inr Unit.unit) (inl k') (-M (inr Unit.unit) (inl k') / M (inr Unit.unit) (inr Unit.unit))) := by simp only [k', listTransvecRow, hkr, dif_pos, List.getElem?_ofFn] simp only [List.take_succ, ← Matrix.mul_assoc, this, List.prod_append, Matrix.mul_one, List.prod_cons, List.prod_nil, Option.toList_some] rw [mul_transvection_apply_of_ne, IH hkr.le] simp only [Ne, not_false_iff, reduceCtorEq] /-- Multiplying by all the matrices in `listTransvecRow M` does not change the last column. -/ theorem mul_listTransvecRow_last_col (i : Fin r ⊕ Unit) : (M * (listTransvecRow M).prod) i (inr unit) = M i (inr unit) := by have A : (listTransvecRow M).length = r := by simp [listTransvecRow] rw [← List.take_length (l := listTransvecRow M), A] simpa using mul_listTransvecRow_last_col_take M i le_rfl /-- Multiplying by all the matrices in `listTransvecRow M` kills all the coefficients in the last row but the last one. -/ theorem mul_listTransvecRow_last_row (hM : M (inr unit) (inr unit) ≠ 0) (i : Fin r) : (M * (listTransvecRow M).prod) (inr unit) (inl i) = 0 := by suffices H : ∀ k : ℕ, k ≤ r → (M * ((listTransvecRow M).take k).prod) (inr unit) (inl i) = if k ≤ i then M (inr unit) (inl i) else 0 by have A : (listTransvecRow M).length = r := by simp [listTransvecRow] rw [← List.take_length (l := listTransvecRow M), A] have : ¬r ≤ i := by simp simpa only [this, ite_eq_right_iff] using H r le_rfl intro k hk induction k with | zero => simp only [if_true, Matrix.mul_one, List.take_zero, zero_le', List.prod_nil] | succ n IH => have hnr : n < r := hk let n' : Fin r := ⟨n, hnr⟩ have A : (listTransvecRow M)[n]? = ↑(transvection (inr unit) (inl n') (-M (inr unit) (inl n') / M (inr unit) (inr unit))) := by simp only [n', listTransvecRow, hnr, dif_pos, List.getElem?_ofFn] simp only [List.take_succ, A, ← Matrix.mul_assoc, List.prod_append, Matrix.mul_one, List.prod_cons, List.prod_nil, Option.toList_some] by_cases h : n' = i · have hni : n = i := by cases i simp only [n', Fin.mk_eq_mk] at h simp only [h] have : ¬n.succ ≤ i := by simp only [← hni, n.lt_succ_self, not_le] simp only [h, mul_transvection_apply_same, if_false, mul_listTransvecRow_last_col_take _ _ hnr.le, hni.le, this, if_true, IH hnr.le] field · have hni : n ≠ i := by rintro rfl cases i tauto simp only [IH hnr.le, Ne, mul_transvection_apply_of_ne, Ne.symm h, inl.injEq, not_false_eq_true] rcases le_or_gt (n + 1) i with (hi | hi) · simp [hi, n.le_succ.trans hi] · rw [if_neg, if_neg] · simpa only [not_le] using hi · simpa only [hni.symm, not_le, or_false] using Nat.lt_succ_iff_lt_or_eq.1 hi /-- Multiplying by all the matrices either in `listTransvecCol M` and `listTransvecRow M` kills all the coefficients in the last row but the last one. -/ theorem listTransvecCol_mul_mul_listTransvecRow_last_col (hM : M (inr unit) (inr unit) ≠ 0) (i : Fin r) : ((listTransvecCol M).prod * M * (listTransvecRow M).prod) (inr unit) (inl i) = 0 := by have : listTransvecRow M = listTransvecRow ((listTransvecCol M).prod * M) := by simp [listTransvecRow, listTransvecCol_mul_last_row] rw [this] apply mul_listTransvecRow_last_row simpa [listTransvecCol_mul_last_row] using hM /-- Multiplying by all the matrices either in `listTransvecCol M` and `listTransvecRow M` kills all the coefficients in the last column but the last one. -/ theorem listTransvecCol_mul_mul_listTransvecRow_last_row (hM : M (inr unit) (inr unit) ≠ 0) (i : Fin r) : ((listTransvecCol M).prod * M * (listTransvecRow M).prod) (inl i) (inr unit) = 0 := by have : listTransvecCol M = listTransvecCol (M * (listTransvecRow M).prod) := by simp [listTransvecCol, mul_listTransvecRow_last_col] rw [this, Matrix.mul_assoc] apply listTransvecCol_mul_last_col simpa [mul_listTransvecRow_last_col] using hM /-- Multiplying by all the matrices either in `listTransvecCol M` and `listTransvecRow M` turns the matrix in block-diagonal form. -/ theorem isTwoBlockDiagonal_listTransvecCol_mul_mul_listTransvecRow (hM : M (inr unit) (inr unit) ≠ 0) : IsTwoBlockDiagonal ((listTransvecCol M).prod * M * (listTransvecRow M).prod) := by constructor · ext i j have : j = unit := by simp only simp [toBlocks₁₂, this, listTransvecCol_mul_mul_listTransvecRow_last_row M hM] · ext i j have : i = unit := by simp only simp [toBlocks₂₁, this, listTransvecCol_mul_mul_listTransvecRow_last_col M hM] /-- There exist two lists of `TransvectionStruct` such that multiplying by them on the left and on the right makes a matrix block-diagonal, when the last coefficient is nonzero. -/ theorem exists_isTwoBlockDiagonal_of_ne_zero (hM : M (inr unit) (inr unit) ≠ 0) : ∃ L L' : List (TransvectionStruct (Fin r ⊕ Unit) 𝕜), IsTwoBlockDiagonal ((L.map toMatrix).prod * M * (L'.map toMatrix).prod) := by let L : List (TransvectionStruct (Fin r ⊕ Unit) 𝕜) := List.ofFn fun i : Fin r => ⟨inl i, inr unit, by simp, -M (inl i) (inr unit) / M (inr unit) (inr unit)⟩ let L' : List (TransvectionStruct (Fin r ⊕ Unit) 𝕜) := List.ofFn fun i : Fin r => ⟨inr unit, inl i, by simp, -M (inr unit) (inl i) / M (inr unit) (inr unit)⟩ refine ⟨L, L', ?_⟩ have A : L.map toMatrix = listTransvecCol M := by simp [L, listTransvecCol, Function.comp_def] have B : L'.map toMatrix = listTransvecRow M := by simp [L', listTransvecRow, Function.comp_def] rw [A, B] exact isTwoBlockDiagonal_listTransvecCol_mul_mul_listTransvecRow M hM /-- There exist two lists of `TransvectionStruct` such that multiplying by them on the left and on the right makes a matrix block-diagonal. -/ theorem exists_isTwoBlockDiagonal_list_transvec_mul_mul_list_transvec (M : Matrix (Fin r ⊕ Unit) (Fin r ⊕ Unit) 𝕜) : ∃ L L' : List (TransvectionStruct (Fin r ⊕ Unit) 𝕜), IsTwoBlockDiagonal ((L.map toMatrix).prod * M * (L'.map toMatrix).prod) := by by_cases H : IsTwoBlockDiagonal M · refine ⟨List.nil, List.nil, by simpa using H⟩ -- we have already proved this when the last coefficient is nonzero by_cases hM : M (inr unit) (inr unit) = 0; swap · exact exists_isTwoBlockDiagonal_of_ne_zero M hM -- when the last coefficient is zero but there is a nonzero coefficient on the last row or the -- last column, we will first put this nonzero coefficient in last position, and then argue as -- above. simp only [not_and_or, IsTwoBlockDiagonal, toBlocks₁₂, toBlocks₂₁, ← Matrix.ext_iff] at H have : ∃ i : Fin r, M (inl i) (inr unit) ≠ 0 ∨ M (inr unit) (inl i) ≠ 0 := by rcases H with H | H · contrapose! H rintro i ⟨⟩ exact (H i).1 · contrapose! H rintro ⟨⟩ j exact (H j).2 rcases this with ⟨i, h | h⟩ · let M' := transvection (inr Unit.unit) (inl i) 1 * M have hM' : M' (inr unit) (inr unit) ≠ 0 := by simpa [M', hM] rcases exists_isTwoBlockDiagonal_of_ne_zero M' hM' with ⟨L, L', hLL'⟩ rw [Matrix.mul_assoc] at hLL' refine ⟨L ++ [⟨inr unit, inl i, by simp, 1⟩], L', ?_⟩ simp only [List.map_append, List.prod_append, Matrix.mul_one, toMatrix_mk, List.prod_cons, List.prod_nil, List.map, Matrix.mul_assoc (L.map toMatrix).prod] exact hLL' · let M' := M * transvection (inl i) (inr unit) 1 have hM' : M' (inr unit) (inr unit) ≠ 0 := by simpa [M', hM] rcases exists_isTwoBlockDiagonal_of_ne_zero M' hM' with ⟨L, L', hLL'⟩ refine ⟨L, ⟨inl i, inr unit, by simp, 1⟩::L', ?_⟩ simp only [← Matrix.mul_assoc, toMatrix_mk, List.prod_cons, List.map] rw [Matrix.mul_assoc (L.map toMatrix).prod] exact hLL' /-- Inductive step for the reduction: if one knows that any size `r` matrix can be reduced to diagonal form by elementary operations, then one deduces it for matrices over `Fin r ⊕ Unit`. -/ theorem exists_list_transvec_mul_mul_list_transvec_eq_diagonal_induction (IH : ∀ M : Matrix (Fin r) (Fin r) 𝕜, ∃ (L₀ L₀' : List (TransvectionStruct (Fin r) 𝕜)) (D₀ : Fin r → 𝕜), (L₀.map toMatrix).prod * M * (L₀'.map toMatrix).prod = diagonal D₀) (M : Matrix (Fin r ⊕ Unit) (Fin r ⊕ Unit) 𝕜) : ∃ (L L' : List (TransvectionStruct (Fin r ⊕ Unit) 𝕜)) (D : Fin r ⊕ Unit → 𝕜), (L.map toMatrix).prod * M * (L'.map toMatrix).prod = diagonal D := by rcases exists_isTwoBlockDiagonal_list_transvec_mul_mul_list_transvec M with ⟨L₁, L₁', hM⟩ let M' := (L₁.map toMatrix).prod * M * (L₁'.map toMatrix).prod let M'' := toBlocks₁₁ M' rcases IH M'' with ⟨L₀, L₀', D₀, h₀⟩ set c := M' (inr unit) (inr unit) refine ⟨L₀.map (sumInl Unit) ++ L₁, L₁' ++ L₀'.map (sumInl Unit), Sum.elim D₀ fun _ => M' (inr unit) (inr unit), ?_⟩ suffices (L₀.map (toMatrix ∘ sumInl Unit)).prod * M' * (L₀'.map (toMatrix ∘ sumInl Unit)).prod = diagonal (Sum.elim D₀ fun _ => c) by simpa [M', c, Matrix.mul_assoc] have : M' = fromBlocks M'' 0 0 (diagonal fun _ => c) := by rw [← fromBlocks_toBlocks M', hM.1, hM.2] rfl rw [this] simp [h₀] variable {n p} [Fintype n] [Fintype p] /-- Reduction to diagonal form by elementary operations is invariant under reindexing. -/ theorem reindex_exists_list_transvec_mul_mul_list_transvec_eq_diagonal (M : Matrix p p 𝕜) (e : p ≃ n) (H : ∃ (L L' : List (TransvectionStruct n 𝕜)) (D : n → 𝕜), (L.map toMatrix).prod * Matrix.reindexAlgEquiv 𝕜 _ e M * (L'.map toMatrix).prod = diagonal D) : ∃ (L L' : List (TransvectionStruct p 𝕜)) (D : p → 𝕜), (L.map toMatrix).prod * M * (L'.map toMatrix).prod = diagonal D := by rcases H with ⟨L₀, L₀', D₀, h₀⟩ refine ⟨L₀.map (reindexEquiv e.symm), L₀'.map (reindexEquiv e.symm), D₀ ∘ e, ?_⟩ have : M = reindexAlgEquiv 𝕜 _ e.symm (reindexAlgEquiv 𝕜 _ e M) := by simp only [Equiv.symm_symm, submatrix_submatrix, reindex_apply, submatrix_id_id, Equiv.symm_comp_self, reindexAlgEquiv_apply] rw [this] simp only [toMatrix_reindexEquiv_prod, List.map_map, reindexAlgEquiv_apply] simp only [← reindexAlgEquiv_apply 𝕜, ← reindexAlgEquiv_mul, h₀] simp only [Equiv.symm_symm, reindex_apply, submatrix_diagonal_equiv, reindexAlgEquiv_apply] /-- Any matrix can be reduced to diagonal form by elementary operations. Formulated here on `Type 0` because we will make an induction using `Fin r`. See `exists_list_transvec_mul_mul_list_transvec_eq_diagonal` for the general version (which follows from this one and reindexing). -/ theorem exists_list_transvec_mul_mul_list_transvec_eq_diagonal_aux (n : Type) [Fintype n] [DecidableEq n] (M : Matrix n n 𝕜) : ∃ (L L' : List (TransvectionStruct n 𝕜)) (D : n → 𝕜), (L.map toMatrix).prod * M * (L'.map toMatrix).prod = diagonal D := by suffices ∀ cn, Fintype.card n = cn → ∃ (L L' : List (TransvectionStruct n 𝕜)) (D : n → 𝕜), (L.map toMatrix).prod * M * (L'.map toMatrix).prod = diagonal D by exact this _ rfl intro cn hn induction cn generalizing n M with | zero => refine ⟨List.nil, List.nil, fun _ => 1, ?_⟩ ext i j rw [Fintype.card_eq_zero_iff] at hn exact hn.elim' i | succ r IH => have e : n ≃ Fin r ⊕ Unit := by refine Fintype.equivOfCardEq ?_ rw [hn] rw [@Fintype.card_sum (Fin r) Unit _ _] simp apply reindex_exists_list_transvec_mul_mul_list_transvec_eq_diagonal M e apply exists_list_transvec_mul_mul_list_transvec_eq_diagonal_induction fun N => IH (Fin r) N (by simp) /-- Any matrix can be reduced to diagonal form by elementary operations. -/ theorem exists_list_transvec_mul_mul_list_transvec_eq_diagonal (M : Matrix n n 𝕜) : ∃ (L L' : List (TransvectionStruct n 𝕜)) (D : n → 𝕜), (L.map toMatrix).prod * M * (L'.map toMatrix).prod = diagonal D := by have e : n ≃ Fin (Fintype.card n) := Fintype.equivOfCardEq (by simp) apply reindex_exists_list_transvec_mul_mul_list_transvec_eq_diagonal M e apply exists_list_transvec_mul_mul_list_transvec_eq_diagonal_aux /-- Any matrix can be written as the product of transvections, a diagonal matrix, and transvections. -/ theorem exists_list_transvec_mul_diagonal_mul_list_transvec (M : Matrix n n 𝕜) : ∃ (L L' : List (TransvectionStruct n 𝕜)) (D : n → 𝕜), M = (L.map toMatrix).prod * diagonal D * (L'.map toMatrix).prod := by rcases exists_list_transvec_mul_mul_list_transvec_eq_diagonal M with ⟨L, L', D, h⟩ refine ⟨L.reverse.map TransvectionStruct.inv, L'.reverse.map TransvectionStruct.inv, D, ?_⟩ suffices M = (L.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod * (L.map toMatrix).prod * M * ((L'.map toMatrix).prod * (L'.reverse.map (toMatrix ∘ TransvectionStruct.inv)).prod) by simpa [← h, Matrix.mul_assoc] rw [reverse_inv_prod_mul_prod, prod_mul_reverse_inv_prod, Matrix.one_mul, Matrix.mul_one] end Pivot open Pivot TransvectionStruct variable {n} [Fintype n] /-- Induction principle for matrices based on transvections: if a property is true for all diagonal matrices, all transvections, and is stable under product, then it is true for all matrices. This is the useful way to say that matrices are generated by diagonal matrices and transvections. We state a slightly more general version: to prove a property for a matrix `M`, it suffices to assume that the diagonal matrices we consider have the same determinant as `M`. This is useful to obtain similar principles for `SLₙ` or `GLₙ`. -/ theorem diagonal_transvection_induction (P : Matrix n n 𝕜 → Prop) (M : Matrix n n 𝕜) (hdiag : ∀ D : n → 𝕜, det (diagonal D) = det M → P (diagonal D)) (htransvec : ∀ t : TransvectionStruct n 𝕜, P t.toMatrix) (hmul : ∀ A B, P A → P B → P (A * B)) : P M := by rcases exists_list_transvec_mul_diagonal_mul_list_transvec M with ⟨L, L', D, h⟩ have PD : P (diagonal D) := hdiag D (by simp [h]) suffices H : ∀ (L₁ L₂ : List (TransvectionStruct n 𝕜)) (E : Matrix n n 𝕜), P E → P ((L₁.map toMatrix).prod * E * (L₂.map toMatrix).prod) by rw [h] apply H L L' exact PD intro L₁ L₂ E PE induction L₁ with | nil => simp only [Matrix.one_mul, List.prod_nil, List.map] induction L₂ generalizing E with | nil => simpa | cons t L₂ IH => simp only [← Matrix.mul_assoc, List.prod_cons, List.map] apply IH exact hmul _ _ PE (htransvec _) | cons t L₁ IH => simp only [Matrix.mul_assoc, List.prod_cons, List.map] at IH ⊢ exact hmul _ _ (htransvec _) IH /-- Induction principle for invertible matrices based on transvections: if a property is true for all invertible diagonal matrices, all transvections, and is stable under product of invertible matrices, then it is true for all invertible matrices. This is the useful way to say that invertible matrices are generated by invertible diagonal matrices and transvections. -/ theorem diagonal_transvection_induction_of_det_ne_zero (P : Matrix n n 𝕜 → Prop) (M : Matrix n n 𝕜) (hMdet : det M ≠ 0) (hdiag : ∀ D : n → 𝕜, det (diagonal D) ≠ 0 → P (diagonal D)) (htransvec : ∀ t : TransvectionStruct n 𝕜, P t.toMatrix) (hmul : ∀ A B, det A ≠ 0 → det B ≠ 0 → P A → P B → P (A * B)) : P M := by let Q : Matrix n n 𝕜 → Prop := fun N => det N ≠ 0 ∧ P N have : Q M := by apply diagonal_transvection_induction Q M · grind · intro t exact ⟨by simp, htransvec t⟩ · intro A B QA QB exact ⟨by simp [QA.1, QB.1], hmul A B QA.1 QB.1 QA.2 QB.2⟩ exact this.2 end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/FixedDetMatrices.lean
import Mathlib.LinearAlgebra.Matrix.SpecialLinearGroup import Mathlib.Data.Int.Interval /-! # Matrices with fixed determinant This file defines the type of matrices with fixed determinant `m` and proves some basic results about them. We also prove that the subgroup of `SL(2,ℤ)` generated by `S` and `T` is the whole group. Note: Some of this was done originally in Lean 3 in the kbb (https://github.com/kim-em/kbb/tree/master) repository, so credit to those authors. -/ variable (n : Type*) [DecidableEq n] [Fintype n] (R : Type*) [CommRing R] /-- The subtype of matrices with fixed determinant `m` -/ def FixedDetMatrix (m : R) := { A : Matrix n n R // A.det = m } namespace FixedDetMatrices open Matrix hiding mul_smul open ModularGroup SpecialLinearGroup MatrixGroups /-- Extensionality theorem for `FixedDetMatrix` with respect to the underlying matrix, not entrywise. -/ lemma ext' {m : R} {A B : FixedDetMatrix n R m} (h : A.1 = B.1) : A = B := by cases A; cases B congr @[ext] lemma ext {m : R} {A B : FixedDetMatrix n R m} (h : ∀ i j, A.1 i j = B.1 i j) : A = B := by apply ext' ext i j apply h instance (m : R) : SMul (SpecialLinearGroup n R) (FixedDetMatrix n R m) where smul g A := ⟨g * A.1, by simp only [det_mul, SpecialLinearGroup.det_coe, A.2, one_mul]⟩ lemma smul_def (m : R) (g : SpecialLinearGroup n R) (A : (FixedDetMatrix n R m)) : g • A = ⟨g * A.1, by simp only [det_mul, SpecialLinearGroup.det_coe, A.2, one_mul]⟩ := rfl instance (m : R) : MulAction (SpecialLinearGroup n R) (FixedDetMatrix n R m) where one_smul b := by rw [smul_def]; simp only [coe_one, one_mul, Subtype.coe_eta] mul_smul x y b := by simp_rw [smul_def, ← mul_assoc, coe_mul] lemma smul_coe (m : R) (g : SpecialLinearGroup n R) (A : FixedDetMatrix n R m) : (g • A).1 = g * A.1 := by rw [smul_def] section IntegralFixedDetMatrices local notation:1024 "Δ" m:1024 => (FixedDetMatrix (Fin 2) ℤ m) variable {m : ℤ} /-- Set of representatives for the orbits under `S` and `T` -/ def reps (m : ℤ) : Set (Δ m) := {A : Δ m | (A.1 1 0) = 0 ∧ 0 < A.1 0 0 ∧ 0 ≤ A.1 0 1 ∧ |(A.1 0 1)| < |(A.1 1 1)|} /-- Reduction step for matrices in `Δ m` which moves the matrices towards `reps` -/ def reduceStep (A : Δ m) : Δ m := S • (T ^ (-(A.1 0 0 / A.1 1 0))) • A private lemma reduce_aux {A : Δ m} (h : (A.1 1 0) ≠ 0) : |((reduceStep A).1 1 0)| < |(A.1 1 0)| := by suffices ((reduceStep A).1 1 0) = A.1 0 0 % A.1 1 0 by rw [this, abs_eq_self.mpr (Int.emod_nonneg (A.1 0 0) h)] exact Int.emod_lt_abs (A.1 0 0) h simp_rw [Int.emod_def, sub_eq_add_neg, reduceStep, smul_coe, coe_T_zpow, S] norm_num [vecMul, vecHead, vecTail, mul_comm] /-- Reduction lemma for integral FixedDetMatrices. -/ @[elab_as_elim] def reduce_rec {C : Δ m → Sort*} (base : ∀ A : Δ m, (A.1 1 0) = 0 → C A) (step : ∀ A : Δ m, (A.1 1 0) ≠ 0 → C (reduceStep A) → C A) : ∀ A, C A := fun A => by by_cases h : (A.1 1 0) = 0 · exact base _ h · exact step A h (reduce_rec base step (reduceStep A)) termination_by A => Int.natAbs (A.1 1 0) decreasing_by zify exact reduce_aux h /-- Map from `Δ m → Δ m` which reduces a `FixedDetMatrix` towards a representative element in reps -/ def reduce : Δ m → Δ m := fun A ↦ if (A.1 1 0) = 0 then if 0 < A.1 0 0 then (T ^ (-(A.1 0 1 / A.1 1 1))) • A else (T ^ (-(-A.1 0 1 / -A.1 1 1))) • (S • (S • A)) --the -/- don't cancel with ℤ divs. else reduce (reduceStep A) termination_by b => Int.natAbs (b.1 1 0) decreasing_by next a h => zify exact reduce_aux h lemma reduce_of_pos {A : Δ m} (hc : (A.1 1 0) = 0) (ha : 0 < A.1 0 0) : reduce A = (T ^ (-(A.1 0 1 / A.1 1 1))) • A := by rw [reduce] simp only [zpow_neg, Int.ediv_neg, neg_neg] at * simp_rw [if_pos hc, if_pos ha] lemma reduce_of_not_pos {A : Δ m} (hc : (A.1 1 0) = 0) (ha : ¬ 0 < A.1 0 0) : reduce A = (T ^ (-(-A.1 0 1 / -A.1 1 1))) • (S • (S • A)) := by rw [reduce] simp only [zpow_neg, Int.ediv_neg, neg_neg] at * simp_rw [if_pos hc, if_neg ha] @[simp] lemma reduce_reduceStep {A : Δ m} (hc : (A.1 1 0) ≠ 0) : reduce (reduceStep A) = reduce A := by symm rw [reduce, if_neg hc] private lemma A_c_eq_zero {A : Δ m} (ha : A.1 1 0 = 0) : A.1 0 0 * A.1 1 1 = m := by simpa only [det_fin_two, ha, mul_zero, sub_zero] using A.2 private lemma A_d_ne_zero {A : Δ m} (ha : A.1 1 0 = 0) (hm : m ≠ 0) : A.1 1 1 ≠ 0 := right_ne_zero_of_mul (A_c_eq_zero (ha) ▸ hm) private lemma A_a_ne_zero {A : Δ m} (ha : A.1 1 0 = 0) (hm : m ≠ 0) : A.1 0 0 ≠ 0 := left_ne_zero_of_mul (A_c_eq_zero ha ▸ hm) /-- An auxiliary result bounding the size of the entries of the representatives in `reps` -/ lemma reps_entries_le_m' {A : Δ m} (h : A ∈ reps m) (i j : Fin 2) : A.1 i j ∈ Finset.Icc (-|m|) |m| := by suffices |A.1 i j| ≤ |m| from Finset.mem_Icc.mpr <| abs_le.mp this obtain ⟨h10, h00, h01, h11⟩ := h have h1 : 0 < |A.1 1 1| := (abs_nonneg _).trans_lt h11 have h2 : 0 < |A.1 0 0| := abs_pos.mpr h00.ne' fin_cases i <;> fin_cases j · simpa only [← abs_mul, A_c_eq_zero h10] using (le_mul_iff_one_le_right h2).mpr h1 · simpa only [← abs_mul, A_c_eq_zero h10] using h11.le.trans (le_mul_of_one_le_left h1.le h2) · simp_all · simpa only [← abs_mul, A_c_eq_zero h10] using (le_mul_iff_one_le_left h1).mpr h2 @[simp] lemma reps_zero_empty : reps 0 = ∅ := by rw [reps, Set.eq_empty_iff_forall_notMem] rintro A ⟨h₁, h₂, -, h₄⟩ suffices |A.1 0 1| < 0 by linarith [abs_nonneg (A.1 0 1)] have := A_c_eq_zero h₁ simp_all [h₂.ne'] noncomputable instance repsFintype (k : ℤ) : Fintype (reps k) := by let H := Finset.Icc (-|k|) |k| let H4 := Fin 2 → Fin 2 → H apply Fintype.ofInjective (β := H4) (f := fun M i j ↦ ⟨M.1.1 i j, reps_entries_le_m' M.2 i j⟩) intro M N h ext i j simpa only [Subtype.mk.injEq] using congrFun₂ h i j @[simp] lemma S_smul_four (A : Δ m) : S • S • S • S • A = A := by simp only [smul_def, ← mul_assoc, S_mul_S_eq, neg_mul, one_mul, mul_neg, neg_neg, Subtype.coe_eta] @[simp] lemma T_S_rel_smul (A : Δ m) : S • S • S • T • S • T • S • A = T⁻¹ • A := by simp_rw [← T_S_rel, ← smul_assoc] lemma reduce_mem_reps {m : ℤ} (hm : m ≠ 0) (A : Δ m) : reduce A ∈ reps m := by induction A using reduce_rec with | step A h1 h2 => simpa only [reduce_reduceStep h1] using h2 | base A h => have hd := A_d_ne_zero h hm by_cases h1 : 0 < A.1 0 0 · simp only [reduce_of_pos h h1] have h2 := Int.emod_def (A.1 0 1) (A.1 1 1) have h4 := Int.ediv_mul_le (A.1 0 1) hd set n : ℤ := A.1 0 1 / A.1 1 1 have h3 := Int.emod_lt_abs (A.1 0 1) hd rw [← abs_eq_self.mpr <| Int.emod_nonneg _ hd] at h3 simp only [smul_def, coe_T_zpow] suffices A.1 1 0 = 0 ∧ n * A.1 1 0 < A.1 0 0 ∧ n * A.1 1 1 ≤ A.1 0 1 ∧ |A.1 0 1 + -(n * A.1 1 1)| < |A.1 1 1| by simpa only [reps, Fin.isValue, cons_mul, Nat.succ_eq_add_one, Nat.reduceAdd, empty_mul, Equiv.symm_apply_apply, Set.mem_setOf_eq, of_apply, cons_val', vecMul, cons_dotProduct, vecHead, one_mul, vecTail, Function.comp_apply, Fin.succ_zero_eq_one, neg_mul, dotProduct_of_isEmpty, add_zero, zero_mul, zero_add, empty_val', cons_val_fin_one, cons_val_one, cons_val_zero, lt_add_neg_iff_add_lt, le_add_neg_iff_add_le] simp_all only [mul_comm n, zero_mul, ← sub_eq_add_neg, ← h2, Fin.isValue, and_true] · simp only [reps, Fin.isValue, reduce_of_not_pos h h1, Int.ediv_neg, neg_neg, smul_def, ← mul_assoc, S_mul_S_eq, neg_mul, one_mul, coe_T_zpow, mul_neg, cons_mul, Nat.succ_eq_add_one, Nat.reduceAdd, empty_mul, Equiv.symm_apply_apply, neg_of, neg_cons, neg_empty, Set.mem_setOf_eq, of_apply, cons_val', Pi.neg_apply, vecMul, cons_dotProduct, vecHead, vecTail, Function.comp_apply, Fin.succ_zero_eq_one, h, mul_zero, dotProduct_of_isEmpty, add_zero, zero_mul, neg_zero, empty_val', cons_val_fin_one, cons_val_one, cons_val_zero, lt_neg, neg_add_rev, zero_add, le_add_neg_iff_add_le, ← le_neg, abs_neg, true_and] refine ⟨?_, Int.ediv_mul_le _ hd, ?_⟩ · simp only [Int.lt_iff_le_and_ne] exact ⟨not_lt.mp h1, A_a_ne_zero h hm⟩ · rw [mul_comm, add_comm, ← Int.sub_eq_add_neg, ← Int.emod_def, abs_eq_self.mpr <| Int.emod_nonneg _ hd] exact Int.emod_lt_abs _ hd variable {C : Δ m → Prop} private lemma prop_red_S (hS : ∀ B, C B → C (S • B)) (B) : C (S • B) ↔ C B := by refine ⟨?_, hS _⟩ intro ih rw [← (S_smul_four B)] solve_by_elim private lemma prop_red_T (hS : ∀ B, C B → C (S • B)) (hT : ∀ B, C B → C (T • B)) (B) : C (T • B) ↔ C B := by refine ⟨?_, hT _⟩ intro ih rw [show B = T⁻¹ • T • B by simp, ← T_S_rel_smul] solve_by_elim (maxDepth := 10) private lemma prop_red_T_pow (hS : ∀ B, C B → C (S • B)) (hT : ∀ B, C B → C (T • B)) : ∀ B (n : ℤ), C (T ^ n • B) ↔ C B := by intro B n induction n with | zero => simp only [zpow_zero, one_smul] | succ n hn => simpa only [add_comm (n:ℤ), zpow_add _ 1, ← smul_eq_mul, zpow_one, smul_assoc, prop_red_T hS hT] | pred m hm => rwa [sub_eq_neg_add, zpow_add, zpow_neg_one, ← prop_red_T hS hT, mul_smul, smul_inv_smul] @[elab_as_elim] theorem induction_on {C : Δ m → Prop} {A : Δ m} (hm : m ≠ 0) (h0 : ∀ A : Δ m, A.1 1 0 = 0 → 0 < A.1 0 0 → 0 ≤ A.1 0 1 → |(A.1 0 1)| < |(A.1 1 1)| → C A) (hS : ∀ B, C B → C (S • B)) (hT : ∀ B, C B → C (T • B)) : C A := by have h_reduce : C (reduce A) := by rcases reduce_mem_reps hm A with ⟨H1, H2, H3, H4⟩ exact h0 _ H1 H2 H3 H4 suffices ∀ A : Δ m, C (reduce A) → C A from this _ h_reduce apply reduce_rec · intro A h by_cases h1 : 0 < A.1 0 0 · simp only [reduce_of_pos h h1, prop_red_T_pow hS hT, imp_self] · simp only [reduce_of_not_pos h h1, prop_red_T_pow hS hT, prop_red_S hS, imp_self] intro A hc ih hA rw [← reduce_reduceStep hc] at hA simpa only [reduceStep, prop_red_S hS, prop_red_T_pow hS hT] using ih hA lemma reps_one_id (A : FixedDetMatrix (Fin 2) ℤ 1) (a1 : A.1 1 0 = 0) (a4 : 0 < A.1 0 0) (a6 : |A.1 0 1| < |(A.1 1 1)|) : A = (1 : SL(2, ℤ)) := by have := Int.mul_eq_one_iff_eq_one_or_neg_one.mp (A_c_eq_zero a1) ext i j fin_cases i <;> fin_cases j <;> aesop end IntegralFixedDetMatrices end FixedDetMatrices open MatrixGroups FixedDetMatrices section SL2Z_generators open ModularGroup Subgroup /-- `SL(2, ℤ)` is generated by `S` and `T`. -/ lemma SpecialLinearGroup.SL2Z_generators : closure {S, T} = ⊤ := by rw [eq_top_iff'] intro A induction A using (induction_on one_ne_zero) with | h0 A a1 a4 _ a6 => rw [reps_one_id A a1 a4 a6] exact one_mem _ | hS B hb => exact mul_mem (subset_closure (Set.mem_insert S {T})) hb | hT B hb => exact mul_mem (subset_closure (Set.mem_insert_of_mem S rfl)) hb end SL2Z_generators
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/DotProduct.lean
import Mathlib.Algebra.Order.Star.Basic import Mathlib.Algebra.Star.Pi import Mathlib.LinearAlgebra.Matrix.RowCol /-! # Dot product of two vectors This file contains some results on the map `dotProduct`, which maps two vectors `v w : n → R` to the sum of the entrywise products `v i * w i`. ## Main results * `dotProduct_stdBasis_one`: the dot product of `v` with the `i`th standard basis vector is `v i` * `dotProduct_eq_zero_iff`: if `v`'s dot product with all `w` is zero, then `v` is zero ## Tags matrix -/ variable {m n p R : Type*} section Semiring variable [Semiring R] [Fintype n] theorem dotProduct_eq (v w : n → R) (h : ∀ u, v ⬝ᵥ u = w ⬝ᵥ u) : v = w := by funext x classical rw [← dotProduct_single_one v x, ← dotProduct_single_one w x, h] theorem dotProduct_eq_iff {v w : n → R} : (∀ u, v ⬝ᵥ u = w ⬝ᵥ u) ↔ v = w := ⟨fun h => dotProduct_eq v w h, fun h _ => h ▸ rfl⟩ theorem dotProduct_eq_zero (v : n → R) (h : ∀ w, v ⬝ᵥ w = 0) : v = 0 := dotProduct_eq _ _ fun u => (h u).symm ▸ (zero_dotProduct u).symm theorem dotProduct_eq_zero_iff {v : n → R} : (∀ w, v ⬝ᵥ w = 0) ↔ v = 0 := ⟨fun h => dotProduct_eq_zero v h, fun h w => h.symm ▸ zero_dotProduct w⟩ end Semiring section OrderedSemiring variable [Semiring R] [PartialOrder R] [IsOrderedRing R] [Fintype n] lemma dotProduct_nonneg_of_nonneg {v w : n → R} (hv : 0 ≤ v) (hw : 0 ≤ w) : 0 ≤ v ⬝ᵥ w := Finset.sum_nonneg (fun i _ => mul_nonneg (hv i) (hw i)) lemma dotProduct_le_dotProduct_of_nonneg_right {u v w : n → R} (huv : u ≤ v) (hw : 0 ≤ w) : u ⬝ᵥ w ≤ v ⬝ᵥ w := by unfold dotProduct; gcongr <;> apply_rules lemma dotProduct_le_dotProduct_of_nonneg_left {u v w : n → R} (huv : u ≤ v) (hw : 0 ≤ w) : w ⬝ᵥ u ≤ w ⬝ᵥ v := by unfold dotProduct; gcongr <;> apply_rules end OrderedSemiring section Self variable [Fintype m] [Fintype n] [Fintype p] @[simp] theorem dotProduct_self_eq_zero [Ring R] [LinearOrder R] [IsStrictOrderedRing R] {v : n → R} : v ⬝ᵥ v = 0 ↔ v = 0 := (Finset.sum_eq_zero_iff_of_nonneg fun i _ => mul_self_nonneg (v i)).trans <| by simp [funext_iff] section StarOrderedRing variable [PartialOrder R] [NonUnitalRing R] [StarRing R] [StarOrderedRing R] /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_star_self_nonneg (v : n → R) : 0 ≤ star v ⬝ᵥ v := Fintype.sum_nonneg fun _ => star_mul_self_nonneg _ /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_self_star_nonneg (v : n → R) : 0 ≤ v ⬝ᵥ star v := Fintype.sum_nonneg fun _ => mul_star_self_nonneg _ variable [NoZeroDivisors R] /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_star_self_eq_zero {v : n → R} : star v ⬝ᵥ v = 0 ↔ v = 0 := (Fintype.sum_eq_zero_iff_of_nonneg fun _ => star_mul_self_nonneg _).trans <| by simp [funext_iff, mul_eq_zero] /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_self_star_eq_zero {v : n → R} : v ⬝ᵥ star v = 0 ↔ v = 0 := (Fintype.sum_eq_zero_iff_of_nonneg fun _ => mul_star_self_nonneg _).trans <| by simp [funext_iff, mul_eq_zero] namespace Matrix @[simp] lemma conjTranspose_mul_self_eq_zero {n} {A : Matrix m n R} : Aᴴ * A = 0 ↔ A = 0 := ⟨fun h => Matrix.ext fun i j => (congr_fun <| dotProduct_star_self_eq_zero.1 <| Matrix.ext_iff.2 h j j) i, fun h => h ▸ Matrix.mul_zero _⟩ @[simp] lemma self_mul_conjTranspose_eq_zero {m} {A : Matrix m n R} : A * Aᴴ = 0 ↔ A = 0 := ⟨fun h => Matrix.ext fun i j => (congr_fun <| dotProduct_self_star_eq_zero.1 <| Matrix.ext_iff.2 h i i) j, fun h => h ▸ Matrix.zero_mul _⟩ lemma conjTranspose_mul_self_mul_eq_zero {p} (A : Matrix m n R) (B : Matrix n p R) : (Aᴴ * A) * B = 0 ↔ A * B = 0 := by refine ⟨fun h => ?_, fun h => by simp only [Matrix.mul_assoc, h, Matrix.mul_zero]⟩ apply_fun (Bᴴ * ·) at h rwa [Matrix.mul_zero, Matrix.mul_assoc, ← Matrix.mul_assoc, ← conjTranspose_mul, conjTranspose_mul_self_eq_zero] at h lemma self_mul_conjTranspose_mul_eq_zero {p} (A : Matrix m n R) (B : Matrix m p R) : (A * Aᴴ) * B = 0 ↔ Aᴴ * B = 0 := by simpa only [conjTranspose_conjTranspose] using conjTranspose_mul_self_mul_eq_zero Aᴴ _ lemma mul_self_mul_conjTranspose_eq_zero {p} (A : Matrix m n R) (B : Matrix p m R) : B * (A * Aᴴ) = 0 ↔ B * A = 0 := by rw [← conjTranspose_eq_zero, conjTranspose_mul, conjTranspose_mul, conjTranspose_conjTranspose, self_mul_conjTranspose_mul_eq_zero, ← conjTranspose_mul, conjTranspose_eq_zero] lemma mul_conjTranspose_mul_self_eq_zero {p} (A : Matrix m n R) (B : Matrix p n R) : B * (Aᴴ * A) = 0 ↔ B * Aᴴ = 0 := by simpa only [conjTranspose_conjTranspose] using mul_self_mul_conjTranspose_eq_zero Aᴴ _ lemma conjTranspose_mul_self_mulVec_eq_zero (A : Matrix m n R) (v : n → R) : (Aᴴ * A) *ᵥ v = 0 ↔ A *ᵥ v = 0 := by simpa only [← Matrix.replicateCol_mulVec, replicateCol_eq_zero] using conjTranspose_mul_self_mul_eq_zero A (replicateCol (Fin 1) v) lemma self_mul_conjTranspose_mulVec_eq_zero (A : Matrix m n R) (v : m → R) : (A * Aᴴ) *ᵥ v = 0 ↔ Aᴴ *ᵥ v = 0 := by simpa only [conjTranspose_conjTranspose] using conjTranspose_mul_self_mulVec_eq_zero Aᴴ _ lemma vecMul_conjTranspose_mul_self_eq_zero (A : Matrix m n R) (v : n → R) : v ᵥ* (Aᴴ * A) = 0 ↔ v ᵥ* Aᴴ = 0 := by simpa only [← Matrix.replicateRow_vecMul, replicateRow_eq_zero] using mul_conjTranspose_mul_self_eq_zero A (replicateRow (Fin 1) v) lemma vecMul_self_mul_conjTranspose_eq_zero (A : Matrix m n R) (v : m → R) : v ᵥ* (A * Aᴴ) = 0 ↔ v ᵥ* A = 0 := by simpa only [conjTranspose_conjTranspose] using vecMul_conjTranspose_mul_self_eq_zero Aᴴ _ /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_star_self_pos_iff {v : n → R} : 0 < star v ⬝ᵥ v ↔ v ≠ 0 := by nontriviality R refine (Fintype.sum_pos_iff_of_nonneg fun i => star_mul_self_nonneg _).trans ?_ simp_rw [Pi.lt_def, Function.ne_iff, Pi.zero_apply] refine (and_iff_right fun i => star_mul_self_nonneg (v i)).trans <| exists_congr fun i => ?_ constructor · rintro h hv simp [hv] at h · exact (star_mul_self_pos <| isRegular_of_ne_zero ·) /-- Note that this applies to `ℂ` via `RCLike.toStarOrderedRing`. -/ @[simp] theorem dotProduct_self_star_pos_iff {v : n → R} : 0 < dotProduct v (star v) ↔ v ≠ 0 := by simpa using dotProduct_star_self_pos_iff (v := star v) end Matrix end StarOrderedRing end Self
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Orthogonal.lean
import Mathlib.Data.Matrix.Mul /-! # Orthogonal This file contains definitions and properties concerning orthogonality of rows and columns. ## Main results - `matrix.HasOrthogonalRows`: `A.HasOrthogonalRows` means `A` has orthogonal (with respect to `dotProduct`) rows. - `matrix.HasOrthogonalCols`: `A.HasOrthogonalCols` means `A` has orthogonal (with respect to `dotProduct`) columns. ## Tags orthogonal -/ assert_not_exists Field namespace Matrix variable {α n m : Type*} variable [Mul α] [AddCommMonoid α] variable (A : Matrix m n α) open Matrix /-- `A.HasOrthogonalRows` means matrix `A` has orthogonal rows (with respect to `dotProduct`). -/ def HasOrthogonalRows [Fintype n] : Prop := ∀ ⦃i₁ i₂⦄, i₁ ≠ i₂ → A i₁ ⬝ᵥ A i₂ = 0 /-- `A.HasOrthogonalCols` means matrix `A` has orthogonal columns (with respect to `dotProduct`). -/ def HasOrthogonalCols [Fintype m] : Prop := HasOrthogonalRows Aᵀ /-- `Aᵀ` has orthogonal rows iff `A` has orthogonal columns. -/ @[simp] theorem transpose_hasOrthogonalRows_iff_hasOrthogonalCols [Fintype m] : Aᵀ.HasOrthogonalRows ↔ A.HasOrthogonalCols := Iff.rfl /-- `Aᵀ` has orthogonal columns iff `A` has orthogonal rows. -/ @[simp] theorem transpose_hasOrthogonalCols_iff_hasOrthogonalRows [Fintype n] : Aᵀ.HasOrthogonalCols ↔ A.HasOrthogonalRows := Iff.rfl variable {A} theorem HasOrthogonalRows.hasOrthogonalCols [Fintype m] (h : Aᵀ.HasOrthogonalRows) : A.HasOrthogonalCols := h theorem HasOrthogonalCols.transpose_hasOrthogonalRows [Fintype m] (h : A.HasOrthogonalCols) : Aᵀ.HasOrthogonalRows := h theorem HasOrthogonalCols.hasOrthogonalRows [Fintype n] (h : Aᵀ.HasOrthogonalCols) : A.HasOrthogonalRows := h theorem HasOrthogonalRows.transpose_hasOrthogonalCols [Fintype n] (h : A.HasOrthogonalRows) : Aᵀ.HasOrthogonalCols := h end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/ProjectiveSpecialLinearGroup.lean
import Mathlib.LinearAlgebra.Matrix.SpecialLinearGroup /-! # Projective Special Linear Group ## Notation In the `MatrixGroups` locale: * `PSL(n, R)` is a shorthand for `Matrix.ProjectiveSpecialLinearGroup (Fin n) R` -/ namespace Matrix universe u v open Matrix LinearMap open scoped MatrixGroups variable (n : Type u) [DecidableEq n] [Fintype n] (R : Type v) [CommRing R] /-- A projective special linear group is the quotient of a special linear group by its center. -/ abbrev ProjectiveSpecialLinearGroup : Type _ := SpecialLinearGroup n R ⧸ Subgroup.center (SpecialLinearGroup n R) /-- `PSL(n, R)` is the projective special linear group `SL(n, R)/Z(SL(n, R))`. -/ scoped[MatrixGroups] notation "PSL(" n ", " R ")" => Matrix.ProjectiveSpecialLinearGroup (Fin n) R end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Adjugate.lean
import Mathlib.Algebra.Regular.Basic import Mathlib.LinearAlgebra.Matrix.MvPolynomial import Mathlib.LinearAlgebra.Matrix.Polynomial /-! # Cramer's rule and adjugate matrices The adjugate matrix is the transpose of the cofactor matrix. It is calculated with Cramer's rule, which we introduce first. The vectors returned by Cramer's rule are given by the linear map `cramer`, which sends a matrix `A` and vector `b` to the vector consisting of the determinant of replacing the `i`th column of `A` with `b` at index `i` (written as `(A.update_column i b).det`). Using Cramer's rule, we can compute for each matrix `A` the matrix `adjugate A`. The entries of the adjugate are the minors of `A`. Instead of defining a minor by deleting row `i` and column `j` of `A`, we replace the `i`th row of `A` with the `j`th basis vector; the resulting matrix has the same determinant but more importantly equals Cramer's rule applied to `A` and the `j`th basis vector, simplifying the subsequent proofs. We prove the adjugate behaves like `det A • A⁻¹`. ## Main definitions * `Matrix.cramer A b`: the vector output by Cramer's rule on `A` and `b`. * `Matrix.adjugate A`: the adjugate (or classical adjoint) of the matrix `A`. ## References * https://en.wikipedia.org/wiki/Cramer's_rule#Finding_inverse_matrix ## Tags cramer, cramer's rule, adjugate -/ namespace Matrix universe u v w variable {m : Type u} {n : Type v} {α : Type w} variable [DecidableEq n] [Fintype n] [DecidableEq m] [Fintype m] [CommRing α] open Matrix Polynomial Equiv Equiv.Perm Finset section Cramer /-! ### `cramer` section Introduce the linear map `cramer` with values defined by `cramerMap`. After defining `cramerMap` and showing it is linear, we will restrict our proofs to using `cramer`. -/ variable (A : Matrix n n α) (b : n → α) /-- `cramerMap A b i` is the determinant of the matrix `A` with column `i` replaced with `b`, and thus `cramerMap A b` is the vector output by Cramer's rule on `A` and `b`. If `A * x = b` has a unique solution in `x`, `cramerMap A` sends the vector `b` to `A.det • x`. Otherwise, the outcome of `cramerMap` is well-defined but not necessarily useful. -/ def cramerMap (i : n) : α := (A.updateCol i b).det theorem cramerMap_is_linear (i : n) : IsLinearMap α fun b => cramerMap A b i := { map_add := det_updateCol_add _ _ map_smul := det_updateCol_smul _ _ } theorem cramer_is_linear : IsLinearMap α (cramerMap A) := by constructor <;> intros <;> ext i · apply (cramerMap_is_linear A i).1 · apply (cramerMap_is_linear A i).2 /-- `cramer A b i` is the determinant of the matrix `A` with column `i` replaced with `b`, and thus `cramer A b` is the vector output by Cramer's rule on `A` and `b`. If `A * x = b` has a unique solution in `x`, `cramer A` sends the vector `b` to `A.det • x`. Otherwise, the outcome of `cramer` is well-defined but not necessarily useful. -/ def cramer (A : Matrix n n α) : (n → α) →ₗ[α] (n → α) := IsLinearMap.mk' (cramerMap A) (cramer_is_linear A) theorem cramer_apply (i : n) : cramer A b i = (A.updateCol i b).det := rfl theorem cramer_transpose_apply (i : n) : cramer Aᵀ b i = (A.updateRow i b).det := by rw [cramer_apply, updateCol_transpose, det_transpose] theorem cramer_transpose_row_self (i : n) : Aᵀ.cramer (A i) = Pi.single i A.det := by ext j rw [cramer_apply, Pi.single_apply] split_ifs with h · -- i = j: this entry should be `A.det` subst h simp only [updateCol_transpose, det_transpose, updateRow_eq_self] · -- i ≠ j: this entry should be 0 rw [updateCol_transpose, det_transpose] apply det_zero_of_row_eq h rw [updateRow_self, updateRow_ne (Ne.symm h)] theorem cramer_row_self (i : n) (h : ∀ j, b j = A j i) : A.cramer b = Pi.single i A.det := by rw [← transpose_transpose A, det_transpose] convert cramer_transpose_row_self Aᵀ i exact funext h @[simp] theorem cramer_one : cramer (1 : Matrix n n α) = 1 := by ext i j convert congr_fun (cramer_row_self (1 : Matrix n n α) (Pi.single i 1) i _) j · simp · intro j rw [Matrix.one_eq_pi_single, Pi.single_comm] theorem cramer_smul (r : α) (A : Matrix n n α) : cramer (r • A) = r ^ (Fintype.card n - 1) • cramer A := LinearMap.ext fun _ => funext fun _ => det_updateCol_smul_left _ _ _ _ @[simp] theorem cramer_subsingleton_apply [Subsingleton n] (A : Matrix n n α) (b : n → α) (i : n) : cramer A b i = b i := by rw [cramer_apply, det_eq_elem_of_subsingleton _ i, updateCol_self] theorem cramer_zero [Nontrivial n] : cramer (0 : Matrix n n α) = 0 := by ext i j obtain ⟨j', hj'⟩ : ∃ j', j' ≠ j := exists_ne j apply det_eq_zero_of_column_eq_zero j' intro j'' simp [updateCol_ne hj'] /-- Use linearity of `cramer` to take it out of a summation. -/ theorem sum_cramer {β} (s : Finset β) (f : β → n → α) : (∑ x ∈ s, cramer A (f x)) = cramer A (∑ x ∈ s, f x) := (map_sum (cramer A) ..).symm /-- Use linearity of `cramer` and vector evaluation to take `cramer A _ i` out of a summation. -/ theorem sum_cramer_apply {β} (s : Finset β) (f : n → β → α) (i : n) : (∑ x ∈ s, cramer A (fun j => f j x) i) = cramer A (fun j : n => ∑ x ∈ s, f j x) i := calc (∑ x ∈ s, cramer A (fun j => f j x) i) = (∑ x ∈ s, cramer A fun j => f j x) i := (Finset.sum_apply i s _).symm _ = cramer A (fun j : n => ∑ x ∈ s, f j x) i := by rw [sum_cramer, cramer_apply, cramer_apply] simp only [updateCol] congr with j congr apply Finset.sum_apply theorem cramer_submatrix_equiv (A : Matrix m m α) (e : n ≃ m) (b : n → α) : cramer (A.submatrix e e) b = cramer A (b ∘ e.symm) ∘ e := by ext i simp_rw [Function.comp_apply, cramer_apply, updateCol_submatrix_equiv, det_submatrix_equiv_self e, Function.comp_def] theorem cramer_reindex (e : m ≃ n) (A : Matrix m m α) (b : n → α) : cramer (reindex e e A) b = cramer A (b ∘ e) ∘ e.symm := cramer_submatrix_equiv _ _ _ end Cramer section Adjugate /-! ### `adjugate` section Define the `adjugate` matrix and a few equations. These will hold for any matrix over a commutative ring. -/ /-- The adjugate matrix is the transpose of the cofactor matrix. Typically, the cofactor matrix is defined by taking minors, i.e. the determinant of the matrix with a row and column removed. However, the proof of `mul_adjugate` becomes a lot easier if we use the matrix replacing a column with a basis vector, since it allows us to use facts about the `cramer` map. -/ def adjugate (A : Matrix n n α) : Matrix n n α := of fun i => cramer Aᵀ (Pi.single i 1) theorem adjugate_def (A : Matrix n n α) : adjugate A = of fun i => cramer Aᵀ (Pi.single i 1) := rfl theorem adjugate_apply (A : Matrix n n α) (i j : n) : adjugate A i j = (A.updateRow j (Pi.single i 1)).det := by rw [adjugate_def, of_apply, cramer_apply, updateCol_transpose, det_transpose] theorem adjugate_transpose (A : Matrix n n α) : (adjugate A)ᵀ = adjugate Aᵀ := by ext i j rw [transpose_apply, adjugate_apply, adjugate_apply, updateRow_transpose, det_transpose] rw [det_apply', det_apply'] apply Finset.sum_congr rfl intro σ _ congr 1 by_cases h : i = σ j · -- Everything except `(i, j)` (= `(σ j, j)`) is given by A, and the rest is a single `1`. congr ext j' subst h have : σ j' = σ j ↔ j' = j := σ.injective.eq_iff rw [updateRow_apply, updateCol_apply] simp_rw [this] rw [← dite_eq_ite, ← dite_eq_ite] congr 1 with rfl rw [Pi.single_eq_same, Pi.single_eq_same] · -- Otherwise, we need to show that there is a `0` somewhere in the product. have : (∏ j' : n, updateCol A j (Pi.single i 1) (σ j') j') = 0 := by apply prod_eq_zero (mem_univ j) rw [updateCol_self, Pi.single_eq_of_ne' h] rw [this] apply prod_eq_zero (mem_univ (σ⁻¹ i)) erw [apply_symm_apply σ i, updateRow_self] apply Pi.single_eq_of_ne intro h' exact h ((symm_apply_eq σ).mp h') @[simp] theorem adjugate_submatrix_equiv_self (e : n ≃ m) (A : Matrix m m α) : adjugate (A.submatrix e e) = (adjugate A).submatrix e e := by ext i j have : (fun j ↦ Pi.single i 1 <| e.symm j) = Pi.single (e i) 1 := Function.update_comp_equiv (0 : n → α) e.symm i 1 rw [adjugate_apply, submatrix_apply, adjugate_apply, ← det_submatrix_equiv_self e, updateRow_submatrix_equiv, this] theorem adjugate_reindex (e : m ≃ n) (A : Matrix m m α) : adjugate (reindex e e A) = reindex e e (adjugate A) := adjugate_submatrix_equiv_self _ _ /-- Since the map `b ↦ cramer A b` is linear in `b`, it must be multiplication by some matrix. This matrix is `A.adjugate`. -/ theorem cramer_eq_adjugate_mulVec (A : Matrix n n α) (b : n → α) : cramer A b = A.adjugate *ᵥ b := by nth_rw 2 [← A.transpose_transpose] rw [← adjugate_transpose, adjugate_def] have : b = ∑ i, b i • (Pi.single i 1 : n → α) := by refine (pi_eq_sum_univ b).trans ?_ congr with j simp [Pi.single_apply, eq_comm] conv_lhs => rw [this] ext k simp [mulVec, dotProduct, mul_comm] theorem mul_adjugate_apply (A : Matrix n n α) (i j k) : A i k * adjugate A k j = cramer Aᵀ (Pi.single k (A i k)) j := by rw [← smul_eq_mul, adjugate, of_apply, ← Pi.smul_apply, ← LinearMap.map_smul, ← Pi.single_smul', smul_eq_mul, mul_one] theorem mul_adjugate (A : Matrix n n α) : A * adjugate A = A.det • (1 : Matrix n n α) := by ext i j rw [mul_apply, Pi.smul_apply, Pi.smul_apply, one_apply, smul_eq_mul, mul_boole] simp [mul_adjugate_apply, sum_cramer_apply, cramer_transpose_row_self, Pi.single_apply, eq_comm] theorem adjugate_mul (A : Matrix n n α) : adjugate A * A = A.det • (1 : Matrix n n α) := calc adjugate A * A = (Aᵀ * adjugate Aᵀ)ᵀ := by rw [← adjugate_transpose, ← transpose_mul, transpose_transpose] _ = _ := by rw [mul_adjugate Aᵀ, det_transpose, transpose_smul, transpose_one] theorem adjugate_smul (r : α) (A : Matrix n n α) : adjugate (r • A) = r ^ (Fintype.card n - 1) • adjugate A := by rw [adjugate, adjugate, transpose_smul, cramer_smul] rfl /-- A stronger form of **Cramer's rule** that allows us to solve some instances of `A * x = b` even if the determinant is not a unit. A sufficient (but still not necessary) condition is that `A.det` divides `b`. -/ @[simp] theorem mulVec_cramer (A : Matrix n n α) (b : n → α) : A *ᵥ cramer A b = A.det • b := by rw [cramer_eq_adjugate_mulVec, mulVec_mulVec, mul_adjugate, smul_mulVec, one_mulVec] theorem adjugate_subsingleton [Subsingleton n] (A : Matrix n n α) : adjugate A = 1 := by ext i j simp [Subsingleton.elim i j, adjugate_apply, det_eq_elem_of_subsingleton _ i, one_apply] theorem adjugate_eq_one_of_card_eq_one {A : Matrix n n α} (h : Fintype.card n = 1) : adjugate A = 1 := haveI : Subsingleton n := Fintype.card_le_one_iff_subsingleton.mp h.le adjugate_subsingleton _ @[simp] theorem adjugate_zero [Nontrivial n] : adjugate (0 : Matrix n n α) = 0 := by ext i j obtain ⟨j', hj'⟩ : ∃ j', j' ≠ j := exists_ne j apply det_eq_zero_of_column_eq_zero j' intro j'' simp [updateCol_ne hj'] @[simp] theorem adjugate_one : adjugate (1 : Matrix n n α) = 1 := by ext simp [adjugate_def, Matrix.one_apply, Pi.single_apply, eq_comm] @[simp] theorem adjugate_diagonal (v : n → α) : adjugate (diagonal v) = diagonal fun i => ∏ j ∈ Finset.univ.erase i, v j := by ext i j simp only [adjugate_def, cramer_apply, diagonal_transpose, of_apply] obtain rfl | hij := eq_or_ne i j · rw [diagonal_apply_eq, diagonal_updateCol_single, det_diagonal, prod_update_of_mem (Finset.mem_univ _), sdiff_singleton_eq_erase, one_mul] · rw [diagonal_apply_ne _ hij] refine det_eq_zero_of_row_eq_zero j fun k => ?_ obtain rfl | hjk := eq_or_ne k j · rw [updateCol_self, Pi.single_eq_of_ne' hij] · rw [updateCol_ne hjk, diagonal_apply_ne' _ hjk] theorem _root_.RingHom.map_adjugate {R S : Type*} [CommRing R] [CommRing S] (f : R →+* S) (M : Matrix n n R) : f.mapMatrix M.adjugate = Matrix.adjugate (f.mapMatrix M) := by ext i k have : Pi.single i (1 : S) = f ∘ Pi.single i 1 := by rw [← f.map_one] exact Pi.single_op (fun _ => f) (fun _ => f.map_zero) i (1 : R) rw [adjugate_apply, RingHom.mapMatrix_apply, map_apply, RingHom.mapMatrix_apply, this, ← map_updateRow, ← RingHom.mapMatrix_apply, ← RingHom.map_det, ← adjugate_apply] theorem _root_.AlgHom.map_adjugate {R A B : Type*} [CommSemiring R] [CommRing A] [CommRing B] [Algebra R A] [Algebra R B] (f : A →ₐ[R] B) (M : Matrix n n A) : f.mapMatrix M.adjugate = Matrix.adjugate (f.mapMatrix M) := f.toRingHom.map_adjugate _ theorem det_adjugate (A : Matrix n n α) : (adjugate A).det = A.det ^ (Fintype.card n - 1) := by -- get rid of the `- 1` rcases (Fintype.card n).eq_zero_or_pos with h_card | h_card · haveI : IsEmpty n := Fintype.card_eq_zero_iff.mp h_card rw [h_card, Nat.zero_sub, pow_zero, adjugate_subsingleton, det_one] replace h_card := tsub_add_cancel_of_le h_card.nat_succ_le -- express `A` as an evaluation of a polynomial in n^2 variables, and solve in the polynomial ring -- where `A'.det` is non-zero. let A' := mvPolynomialX n n ℤ suffices A'.adjugate.det = A'.det ^ (Fintype.card n - 1) by rw [← mvPolynomialX_mapMatrix_aeval ℤ A, ← AlgHom.map_adjugate, ← AlgHom.map_det, ← AlgHom.map_det, ← map_pow, this] apply mul_left_cancel₀ (show A'.det ≠ 0 from det_mvPolynomialX_ne_zero n ℤ) calc A'.det * A'.adjugate.det = (A' * adjugate A').det := (det_mul _ _).symm _ = A'.det ^ Fintype.card n := by rw [mul_adjugate, det_smul, det_one, mul_one] _ = A'.det * A'.det ^ (Fintype.card n - 1) := by rw [← pow_succ', h_card] @[simp] theorem adjugate_fin_zero (A : Matrix (Fin 0) (Fin 0) α) : adjugate A = 0 := Subsingleton.elim _ _ @[simp] theorem adjugate_fin_one (A : Matrix (Fin 1) (Fin 1) α) : adjugate A = 1 := adjugate_subsingleton A theorem adjugate_fin_succ_eq_det_submatrix {n : ℕ} (A : Matrix (Fin n.succ) (Fin n.succ) α) (i j) : adjugate A i j = (-1) ^ (j + i : ℕ) * det (A.submatrix j.succAbove i.succAbove) := by simp_rw [adjugate_apply, det_succ_row _ j, updateRow_self, submatrix_updateRow_succAbove] rw [Fintype.sum_eq_single i fun h hjk => ?_, Pi.single_eq_same, mul_one] rw [Pi.single_eq_of_ne hjk, mul_zero, zero_mul] theorem adjugate_fin_two (A : Matrix (Fin 2) (Fin 2) α) : adjugate A = !![A 1 1, -A 0 1; -A 1 0, A 0 0] := by ext i j rw [adjugate_fin_succ_eq_det_submatrix] fin_cases i <;> fin_cases j <;> simp @[simp] theorem adjugate_fin_two_of (a b c d : α) : adjugate !![a, b; c, d] = !![d, -b; -c, a] := adjugate_fin_two _ theorem adjugate_fin_three (A : Matrix (Fin 3) (Fin 3) α) : adjugate A = !![A 1 1 * A 2 2 - A 1 2 * A 2 1, -(A 0 1 * A 2 2) + A 0 2 * A 2 1, A 0 1 * A 1 2 - A 0 2 * A 1 1; -(A 1 0 * A 2 2) + A 1 2 * A 2 0, A 0 0 * A 2 2 - A 0 2 * A 2 0, -(A 0 0 * A 1 2) + A 0 2 * A 1 0; A 1 0 * A 2 1 - A 1 1 * A 2 0, -(A 0 0 * A 2 1) + A 0 1 * A 2 0, A 0 0 * A 1 1 - A 0 1 * A 1 0] := by ext i j rw [adjugate_fin_succ_eq_det_submatrix, det_fin_two] fin_cases i <;> fin_cases j <;> simp [Fin.succAbove, Fin.lt_def] <;> ring set_option linter.style.commandStart false in -- Use spaces to format a matrix. @[simp] theorem adjugate_fin_three_of (a b c d e f g h i : α) : adjugate !![a, b, c; d, e, f; g, h, i] = !![ e * i - f * h, -(b * i) + c * h, b * f - c * e; -(d * i) + f * g, a * i - c * g, -(a * f) + c * d; d * h - e * g, -(a * h) + b * g, a * e - b * d] := adjugate_fin_three _ theorem det_eq_sum_mul_adjugate_row (A : Matrix n n α) (i : n) : det A = ∑ j : n, A i j * adjugate A j i := by haveI : Nonempty n := ⟨i⟩ obtain ⟨n', hn'⟩ := Nat.exists_eq_succ_of_ne_zero (Fintype.card_ne_zero : Fintype.card n ≠ 0) obtain ⟨e⟩ := Fintype.truncEquivFinOfCardEq hn' let A' := reindex e e A suffices det A' = ∑ j : Fin n'.succ, A' (e i) j * adjugate A' j (e i) by simp_rw [A', det_reindex_self, adjugate_reindex, reindex_apply, submatrix_apply, ← e.sum_comp, Equiv.symm_apply_apply] at this exact this rw [det_succ_row A' (e i)] simp_rw [mul_assoc, mul_left_comm _ (A' _ _), ← adjugate_fin_succ_eq_det_submatrix] theorem det_eq_sum_mul_adjugate_col (A : Matrix n n α) (j : n) : det A = ∑ i : n, A i j * adjugate A j i := by simpa only [det_transpose, ← adjugate_transpose] using det_eq_sum_mul_adjugate_row Aᵀ j theorem adjugate_conjTranspose [StarRing α] (A : Matrix n n α) : A.adjugateᴴ = adjugate Aᴴ := by dsimp only [conjTranspose] have : Aᵀ.adjugate.map star = adjugate (Aᵀ.map star) := (starRingEnd α).map_adjugate Aᵀ rw [A.adjugate_transpose, this] theorem isRegular_of_isLeftRegular_det {A : Matrix n n α} (hA : IsLeftRegular A.det) : IsRegular A := by constructor · intro B C h refine hA.matrix ?_ simp only at h ⊢ rw [← Matrix.one_mul B, ← Matrix.one_mul C, ← Matrix.smul_mul, ← Matrix.smul_mul, ← adjugate_mul, Matrix.mul_assoc, Matrix.mul_assoc, h] · intro B C (h : B * A = C * A) refine hA.matrix ?_ simp only rw [← Matrix.mul_one B, ← Matrix.mul_one C, ← Matrix.mul_smul, ← Matrix.mul_smul, ← mul_adjugate, ← Matrix.mul_assoc, ← Matrix.mul_assoc, h] theorem adjugate_mul_distrib_aux (A B : Matrix n n α) (hA : IsLeftRegular A.det) (hB : IsLeftRegular B.det) : adjugate (A * B) = adjugate B * adjugate A := by have hAB : IsLeftRegular (A * B).det := by rw [det_mul] exact hA.mul hB refine (isRegular_of_isLeftRegular_det hAB).left ?_ simp only rw [mul_adjugate, Matrix.mul_assoc, ← Matrix.mul_assoc B, mul_adjugate, smul_mul, Matrix.one_mul, mul_smul, mul_adjugate, smul_smul, mul_comm, ← det_mul] /-- Proof follows from "The trace Cayley-Hamilton theorem" by Darij Grinberg, Section 5.3 -/ theorem adjugate_mul_distrib (A B : Matrix n n α) : adjugate (A * B) = adjugate B * adjugate A := by let g : Matrix n n α → Matrix n n α[X] := fun M => M.map Polynomial.C + (Polynomial.X : α[X]) • (1 : Matrix n n α[X]) let f' : Matrix n n α[X] →+* Matrix n n α := (Polynomial.evalRingHom 0).mapMatrix have f'_inv : ∀ M, f' (g M) = M := by intro ext simp [f', g] have f'_adj : ∀ M : Matrix n n α, f' (adjugate (g M)) = adjugate M := by intro rw [RingHom.map_adjugate, f'_inv] have f'_g_mul : ∀ M N : Matrix n n α, f' (g M * g N) = M * N := by intro M N rw [RingHom.map_mul, f'_inv, f'_inv] have hu : ∀ M : Matrix n n α, IsRegular (g M).det := by intro M refine Polynomial.Monic.isRegular ?_ simp only [g, Polynomial.Monic.def, ← Polynomial.leadingCoeff_det_X_one_add_C M, add_comm] rw [← f'_adj, ← f'_adj, ← f'_adj, ← f'.map_mul, ← adjugate_mul_distrib_aux _ _ (hu A).left (hu B).left, RingHom.map_adjugate, RingHom.map_adjugate, f'_inv, f'_g_mul] @[simp] theorem adjugate_pow (A : Matrix n n α) (k : ℕ) : adjugate (A ^ k) = adjugate A ^ k := by induction k with | zero => simp | succ k IH => rw [pow_succ', adjugate_mul_distrib, IH, pow_succ] theorem det_smul_adjugate_adjugate (A : Matrix n n α) : det A • adjugate (adjugate A) = det A ^ (Fintype.card n - 1) • A := by have : A * (A.adjugate * A.adjugate.adjugate) = A * (A.det ^ (Fintype.card n - 1) • (1 : Matrix n n α)) := by rw [← adjugate_mul_distrib, adjugate_mul, adjugate_smul, adjugate_one] rwa [← Matrix.mul_assoc, mul_adjugate, Matrix.mul_smul, Matrix.mul_one, Matrix.smul_mul, Matrix.one_mul] at this /-- Note that this is not true for `Fintype.card n = 1` since `1 - 2 = 0` and not `-1`. -/ theorem adjugate_adjugate (A : Matrix n n α) (h : Fintype.card n ≠ 1) : adjugate (adjugate A) = det A ^ (Fintype.card n - 2) • A := by -- get rid of the `- 2` rcases h_card : Fintype.card n with _ | n' · subsingleton [Fintype.card_eq_zero_iff.mp h_card] cases n' · exact (h h_card).elim rw [← h_card] -- express `A` as an evaluation of a polynomial in n^2 variables, and solve in the polynomial ring -- where `A'.det` is non-zero. let A' := mvPolynomialX n n ℤ suffices adjugate (adjugate A') = det A' ^ (Fintype.card n - 2) • A' by rw [← mvPolynomialX_mapMatrix_aeval ℤ A, ← AlgHom.map_adjugate, ← AlgHom.map_adjugate, this, ← AlgHom.map_det, ← map_pow (MvPolynomial.aeval fun p : n × n ↦ A p.1 p.2), AlgHom.mapMatrix_apply, AlgHom.mapMatrix_apply, Matrix.map_smul' _ _ _ (map_mul _)] have h_card' : Fintype.card n - 2 + 1 = Fintype.card n - 1 := by simp [h_card] have is_reg : IsSMulRegular (MvPolynomial (n × n) ℤ) (det A') := fun x y => mul_left_cancel₀ (det_mvPolynomialX_ne_zero n ℤ) apply is_reg.matrix simp only rw [smul_smul, ← pow_succ', h_card', det_smul_adjugate_adjugate] /-- A weaker version of `Matrix.adjugate_adjugate` that uses `Nontrivial`. -/ theorem adjugate_adjugate' (A : Matrix n n α) [Nontrivial n] : adjugate (adjugate A) = det A ^ (Fintype.card n - 2) • A := adjugate_adjugate _ <| Fintype.one_lt_card.ne' end Adjugate end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Permanent.lean
import Mathlib.Data.Fintype.Perm import Mathlib.LinearAlgebra.Matrix.RowCol /-! # Permanent of a matrix This file defines the permanent of a matrix, `Matrix.permanent`, and some of its properties. ## Main definitions * `Matrix.permanent`: the permanent of a square matrix, as a sum over permutations -/ open Equiv Fintype Finset namespace Matrix variable {n : Type*} [DecidableEq n] [Fintype n] variable {R : Type*} [CommSemiring R] /-- The permanent of a square matrix defined as a sum over all permutations. This is analogous to the determinant but without alternating signs. -/ def permanent (M : Matrix n n R) : R := ∑ σ : Perm n, ∏ i, M (σ i) i @[simp] theorem permanent_diagonal {d : n → R} : permanent (diagonal d) = ∏ i, d i := by refine (sum_eq_single 1 (fun σ _ hσ ↦ ?_) (fun h ↦ (h <| mem_univ _).elim)).trans ?_ · match not_forall.mp (mt Equiv.ext hσ) with | ⟨x, hx⟩ => exact Finset.prod_eq_zero (mem_univ x) (if_neg hx) · simp only [Perm.one_apply, diagonal_apply_eq] @[simp] theorem permanent_zero [Nonempty n] : permanent (0 : Matrix n n R) = 0 := by simp [permanent] @[simp] theorem permanent_one : permanent (1 : Matrix n n R) = 1 := by rw [← diagonal_one]; simp [-diagonal_one] theorem permanent_isEmpty [IsEmpty n] {A : Matrix n n R} : permanent A = 1 := by simp [permanent] theorem permanent_eq_one_of_card_eq_zero {A : Matrix n n R} (h : card n = 0) : permanent A = 1 := haveI : IsEmpty n := card_eq_zero_iff.mp h permanent_isEmpty /-- If `n` has only one element, the permanent of an `n` by `n` matrix is just that element. Although `Unique` implies `DecidableEq` and `Fintype`, the instances might not be syntactically equal. Thus, we need to fill in the args explicitly. -/ @[simp] theorem permanent_unique {n : Type*} [Unique n] [DecidableEq n] [Fintype n] (A : Matrix n n R) : permanent A = A default default := by simp [permanent, univ_unique] theorem permanent_eq_elem_of_subsingleton [Subsingleton n] (A : Matrix n n R) (k : n) : permanent A = A k k := by have := uniqueOfSubsingleton k convert permanent_unique A theorem permanent_eq_elem_of_card_eq_one {A : Matrix n n R} (h : card n = 1) (k : n) : permanent A = A k k := haveI : Subsingleton n := card_le_one_iff_subsingleton.mp h.le permanent_eq_elem_of_subsingleton _ _ /-- Transposing a matrix preserves the permanent. -/ @[simp] theorem permanent_transpose (M : Matrix n n R) : Mᵀ.permanent = M.permanent := by refine sum_bijective _ inv_involutive.bijective _ _ ?_ intro σ apply Fintype.prod_equiv σ simp /-- Permuting the columns does not change the permanent. -/ theorem permanent_permute_cols (σ : Perm n) (M : Matrix n n R) : (M.submatrix σ id).permanent = M.permanent := (Group.mulLeft_bijective σ).sum_comp fun τ ↦ ∏ i : n, M (τ i) i /-- Permuting the rows does not change the permanent. -/ theorem permanent_permute_rows (σ : Perm n) (M : Matrix n n R) : (M.submatrix id σ).permanent = M.permanent := by rw [← permanent_transpose, transpose_submatrix, permanent_permute_cols, permanent_transpose] @[simp] theorem permanent_smul (M : Matrix n n R) (c : R) : permanent (c • M) = c ^ Fintype.card n * permanent M := by simp only [permanent, smul_apply, smul_eq_mul, Finset.mul_sum] congr ext rw [mul_comm] conv in ∏ _, c * _ => simp [mul_comm c]; exact prod_mul_pow_card.symm @[simp] theorem permanent_updateCol_smul (M : Matrix n n R) (j : n) (c : R) (u : n → R) : permanent (updateCol M j <| c • u) = c * permanent (updateCol M j u) := by simp only [permanent, ← mul_prod_erase _ _ (mem_univ j), updateCol_self, Pi.smul_apply, smul_eq_mul, mul_sum, ← mul_assoc] congr 1 with p rw [Finset.prod_congr rfl (fun i hi ↦ ?_)] simp only [ne_eq, ne_of_mem_erase hi, not_false_eq_true, updateCol_ne] @[simp] theorem permanent_updateRow_smul (M : Matrix n n R) (j : n) (c : R) (u : n → R) : permanent (updateRow M j <| c • u) = c * permanent (updateRow M j u) := by rw [← permanent_transpose, ← updateCol_transpose, permanent_updateCol_smul, updateCol_transpose, permanent_transpose] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/StdBasis.lean
import Mathlib.Data.Matrix.Basis import Mathlib.LinearAlgebra.StdBasis /-! # Standard basis on matrices ## Main results * `Basis.matrix`: extend a basis on `M` to the standard basis on `Matrix n m M` -/ open Module namespace Module.Basis variable {ι R M : Type*} (m n : Type*) variable [Fintype m] [Fintype n] [Semiring R] [AddCommMonoid M] [Module R M] /-- The standard basis of `Matrix m n M` given a basis on `M`. -/ protected noncomputable def matrix (b : Basis ι R M) : Basis (m × n × ι) R (Matrix m n M) := Basis.reindex (Pi.basis fun _ : m => Pi.basis fun _ : n => b) ((Equiv.sigmaEquivProd _ _).trans <| .prodCongr (.refl _) (Equiv.sigmaEquivProd _ _)) |>.map (Matrix.ofLinearEquiv R) variable {n m} @[simp] theorem matrix_apply (b : Basis ι R M) (i : m) (j : n) (k : ι) [DecidableEq m] [DecidableEq n] : b.matrix m n (i, j, k) = Matrix.single i j (b k) := by simp [Basis.matrix, Matrix.single_eq_of_single_single] end Module.Basis namespace Matrix variable (R : Type*) (m n : Type*) [Fintype m] [Finite n] [Semiring R] /-- The standard basis of `Matrix m n R`. -/ noncomputable def stdBasis : Basis (m × n) R (Matrix m n R) := Basis.reindex (Pi.basis fun _ : m => Pi.basisFun R n) (Equiv.sigmaEquivProd _ _) |>.map (ofLinearEquiv R) variable {n m} theorem stdBasis_eq_single (i : m) (j : n) [DecidableEq m] [DecidableEq n] : stdBasis R m n (i, j) = single i j (1 : R) := by simp [stdBasis, single_eq_of_single_single] @[deprecated (since := "2025-05-05")] alias stdBasis_eq_stdBasisMatrix := stdBasis_eq_single end Matrix namespace Module.Free variable (R M : Type*) [Semiring R] [AddCommMonoid M] [Module R M] [Module.Free R M] /-- The module of finite matrices is free. -/ instance matrix {m n : Type*} [Finite m] [Finite n] : Module.Free R (Matrix m n M) := Module.Free.pi R _ end Module.Free
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/InvariantBasisNumber.lean
import Mathlib.Algebra.Module.Projective import Mathlib.LinearAlgebra.Matrix.ToLin import Mathlib.LinearAlgebra.Matrix.SemiringInverse import Mathlib.LinearAlgebra.InvariantBasisNumber /-! # Invertible matrices over a ring with invariant basis number are square. -/ variable {n m : Type*} [Fintype n] [DecidableEq n] [Fintype m] [DecidableEq m] variable {R : Type*} [Semiring R] [InvariantBasisNumber R] theorem Matrix.square_of_invertible (M : Matrix n m R) (N : Matrix m n R) (h : M * N = 1) (h' : N * M = 1) : Fintype.card n = Fintype.card m := card_eq_of_linearEquiv R (Matrix.toLinearEquivRight'OfInv h' h) open Function in /-- Nontrivial commutative semirings `R` satisfy the rank condition. If `R` is moreover a ring, then it satisfies the strong rank condition, see `commRing_strongRankCondition`. It is unclear whether this generalizes to semirings. -/ instance (priority := 100) rankCondition_of_nontrivial_of_commSemiring {R : Type*} [CommSemiring R] [Nontrivial R] : RankCondition R where le_of_fin_surjective {n m} f hf := by by_contra! lt let p : (Fin m → R) →ₗ[R] Fin n → R := .funLeft R R (Fin.castLE lt.le) have hp : Surjective p := LinearMap.funLeft_surjective_of_injective _ _ _ (Fin.castLE_injective lt.le) have ⟨g, eq⟩ := (p ∘ₗ f).exists_rightInverse_of_surjective (LinearMap.range_eq_top.mpr <| hp.comp hf) let e := Matrix.toLinAlgEquiv' (R := R) (n := Fin n).symm apply_fun e at eq rw [← Module.End.mul_eq_comp, ← Module.End.one_eq_id, map_mul, map_one, Matrix.mul_eq_one_comm_of_equiv (Equiv.refl _), ← map_mul, ← map_one e, e.injective.eq_iff] at eq have : Injective p := (p.coe_comp f ▸ LinearMap.injective_of_comp_eq_id _ _ eq).of_comp_right hf have ⟨⟨i, lt⟩, eq⟩ := injective_comp_right_iff_surjective.mp this ⟨n, lt⟩ exact lt.ne congr($eq)
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Dual.lean
import Mathlib.LinearAlgebra.DFinsupp import Mathlib.LinearAlgebra.Dual.Basis import Mathlib.LinearAlgebra.Matrix.ToLin /-! # Dual space, linear maps and matrices. This file contains some results about matrices and dual spaces. ## Tags matrix, linear_map, transpose, dual -/ open Matrix Module section Transpose variable {K V₁ V₂ ι₁ ι₂ : Type*} [CommSemiring K] [AddCommGroup V₁] [Module K V₁] [AddCommGroup V₂] [Module K V₂] [Fintype ι₁] [Fintype ι₂] [DecidableEq ι₁] [DecidableEq ι₂] {B₁ : Basis ι₁ K V₁} {B₂ : Basis ι₂ K V₂} @[simp] theorem LinearMap.toMatrix_transpose (u : V₁ →ₗ[K] V₂) : LinearMap.toMatrix B₂.dualBasis B₁.dualBasis (Module.Dual.transpose (R := K) u) = (LinearMap.toMatrix B₁ B₂ u)ᵀ := by ext i j simp only [LinearMap.toMatrix_apply, Module.Dual.transpose_apply, B₁.dualBasis_repr, B₂.dualBasis_apply, Matrix.transpose_apply, LinearMap.comp_apply] @[simp] theorem Matrix.toLin_transpose (M : Matrix ι₁ ι₂ K) : Matrix.toLin B₁.dualBasis B₂.dualBasis Mᵀ = Module.Dual.transpose (R := K) (Matrix.toLin B₂ B₁ M) := by apply (LinearMap.toMatrix B₁.dualBasis B₂.dualBasis).injective rw [LinearMap.toMatrix_toLin, LinearMap.toMatrix_transpose, LinearMap.toMatrix_toLin] end Transpose /-- The dot product as a linear equivalence to the dual. -/ @[simps] def dotProductEquiv (R n : Type*) [CommSemiring R] [Fintype n] [DecidableEq n] : (n → R) ≃ₗ[R] Module.Dual R (n → R) where toFun v := ⟨⟨dotProduct v, dotProduct_add v⟩, fun t ↦ dotProduct_smul t v⟩ map_add' v w := by ext; simp map_smul' t v := by ext; simp invFun f i := f (LinearMap.single R _ i 1) left_inv v := by simp right_inv f := by ext; simp
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Polynomial.lean
import Mathlib.Algebra.Polynomial.BigOperators import Mathlib.Algebra.Polynomial.Degree.Lemmas import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.Tactic.ComputeDegree /-! # Matrices of polynomials and polynomials of matrices In this file, we prove results about matrices over a polynomial ring. In particular, we give results about the polynomial given by `det (t * I + A)`. ## References * "The trace Cayley-Hamilton theorem" by Darij Grinberg, Section 5.3 ## Tags matrix determinant, polynomial -/ open Matrix Polynomial variable {n α : Type*} [DecidableEq n] [Fintype n] [CommRing α] open Polynomial Matrix Equiv.Perm namespace Polynomial theorem natDegree_det_X_add_C_le (A B : Matrix n n α) : natDegree (det ((X : α[X]) • A.map C + B.map C : Matrix n n α[X])) ≤ Fintype.card n := by rw [det_apply] refine (natDegree_sum_le _ _).trans ?_ refine Multiset.max_le_of_forall_le _ _ ?_ simp only [forall_apply_eq_imp_iff, true_and, Function.comp_apply, Multiset.mem_map, exists_imp, Finset.mem_univ_val] intro g calc natDegree (sign g • ∏ i : n, (X • A.map C + B.map C : Matrix n n α[X]) (g i) i) ≤ natDegree (∏ i : n, (X • A.map C + B.map C : Matrix n n α[X]) (g i) i) := by rcases Int.units_eq_one_or (sign g) with sg | sg · rw [sg, one_smul] · rw [sg, Units.neg_smul, one_smul, natDegree_neg] _ ≤ ∑ i : n, natDegree (((X : α[X]) • A.map C + B.map C : Matrix n n α[X]) (g i) i) := (natDegree_prod_le (Finset.univ : Finset n) fun i : n => (X • A.map C + B.map C : Matrix n n α[X]) (g i) i) _ ≤ Finset.univ.card • 1 := (Finset.sum_le_card_nsmul _ _ 1 fun (i : n) _ => ?_) _ ≤ Fintype.card n := by simp [mul_one, Finset.card_univ] dsimp only [add_apply, smul_apply, map_apply, smul_eq_mul] compute_degree theorem coeff_det_X_add_C_zero (A B : Matrix n n α) : coeff (det ((X : α[X]) • A.map C + B.map C)) 0 = det B := by rw [det_apply, finset_sum_coeff, det_apply] refine Finset.sum_congr rfl ?_ rintro g - convert coeff_smul (R := α) (sign g) _ 0 rw [coeff_zero_prod] refine Finset.prod_congr rfl ?_ simp theorem coeff_det_X_add_C_card (A B : Matrix n n α) : coeff (det ((X : α[X]) • A.map C + B.map C)) (Fintype.card n) = det A := by rw [det_apply, det_apply, finset_sum_coeff] refine Finset.sum_congr rfl ?_ simp only [Finset.mem_univ, forall_true_left] intro g convert coeff_smul (R := α) (sign g) _ _ rw [← mul_one (Fintype.card n)] convert (coeff_prod_of_natDegree_le (R := α) _ _ _ _).symm · simp [coeff_C] · rintro p - dsimp only [add_apply, smul_apply, map_apply, smul_eq_mul] compute_degree theorem leadingCoeff_det_X_one_add_C (A : Matrix n n α) : leadingCoeff (det ((X : α[X]) • (1 : Matrix n n α[X]) + A.map C)) = 1 := by cases subsingleton_or_nontrivial α · simp [eq_iff_true_of_subsingleton] rw [← @det_one n, ← coeff_det_X_add_C_card _ A, leadingCoeff] simp only [Matrix.map_one, C_eq_zero, RingHom.map_one] rcases (natDegree_det_X_add_C_le 1 A).eq_or_lt with h | h · simp only [RingHom.map_one, Matrix.map_one, C_eq_zero] at h rw [h] · -- contradiction. we have a hypothesis that the degree is less than |n| -- but we know that coeff _ n = 1 have H := coeff_eq_zero_of_natDegree_lt h rw [coeff_det_X_add_C_card] at H simp at H end Polynomial
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/HermitianFunctionalCalculus.lean
import Mathlib.Analysis.Matrix.HermitianFunctionalCalculus deprecated_module (since := "2025-11-13")
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/SpecialLinearGroup.lean
import Mathlib.LinearAlgebra.Matrix.Adjugate import Mathlib.LinearAlgebra.Matrix.ToLin import Mathlib.LinearAlgebra.Matrix.Transvection import Mathlib.RingTheory.RootsOfUnity.Basic /-! # The Special Linear group $SL(n, R)$ This file defines the elements of the Special Linear group `SpecialLinearGroup n R`, consisting of all square `R`-matrices with determinant `1` on the fintype `n` by `n`. In addition, we define the group structure on `SpecialLinearGroup n R` and the embedding into the general linear group `GeneralLinearGroup R (n → R)`. ## Main definitions * `Matrix.SpecialLinearGroup` is the type of matrices with determinant 1 * `Matrix.SpecialLinearGroup.group` gives the group structure (under multiplication) * `Matrix.SpecialLinearGroup.toGL` is the embedding `SLₙ(R) → GLₙ(R)` ## Notation For `m : ℕ`, we introduce the notation `SL(m,R)` for the special linear group on the fintype `n = Fin m`, in the scope `MatrixGroups`. ## Implementation notes The inverse operation in the `SpecialLinearGroup` is defined to be the adjugate matrix, so that `SpecialLinearGroup n R` has a group structure for all `CommRing R`. We define the elements of `SpecialLinearGroup` to be matrices, since we need to compute their determinant. This is in contrast with `GeneralLinearGroup R M`, which consists of invertible `R`-linear maps on `M`. We provide `Matrix.SpecialLinearGroup.hasCoeToFun` for convenience, but do not state any lemmas about it, and use `Matrix.SpecialLinearGroup.coeFn_eq_coe` to eliminate it `⇑` in favor of a regular `↑` coercion. ## References * https://en.wikipedia.org/wiki/Special_linear_group ## Tags matrix group, group, matrix inverse -/ namespace Matrix universe u v open LinearMap section variable (n : Type u) [DecidableEq n] [Fintype n] (R : Type v) [CommRing R] /-- `SpecialLinearGroup n R` is the group of `n` by `n` `R`-matrices with determinant equal to 1. -/ def SpecialLinearGroup := { A : Matrix n n R // A.det = 1 } end @[inherit_doc] scoped[MatrixGroups] notation "SL(" n ", " R ")" => Matrix.SpecialLinearGroup (Fin n) R namespace SpecialLinearGroup variable {n : Type u} [DecidableEq n] [Fintype n] {R : Type v} [CommRing R] instance hasCoeToMatrix : Coe (SpecialLinearGroup n R) (Matrix n n R) := ⟨fun A => A.val⟩ /-- In this file, Lean often has a hard time working out the values of `n` and `R` for an expression like `det ↑A`. Rather than writing `(A : Matrix n n R)` everywhere in this file which is annoyingly verbose, or `A.val` which is not the simp-normal form for subtypes, we create a local notation `↑ₘA`. This notation references the local `n` and `R` variables, so is not valid as a global notation. -/ local notation:1024 "↑ₘ" A:1024 => ((A : SpecialLinearGroup n R) : Matrix n n R) section CoeFnInstance /-- This instance is here for convenience, but is literally the same as the coercion from `hasCoeToMatrix`. -/ instance instCoeFun : CoeFun (SpecialLinearGroup n R) fun _ => n → n → R where coe A := ↑ₘA end CoeFnInstance theorem ext_iff (A B : SpecialLinearGroup n R) : A = B ↔ ∀ i j, A i j = B i j := Subtype.ext_iff.trans Matrix.ext_iff.symm @[ext] theorem ext (A B : SpecialLinearGroup n R) : (∀ i j, A i j = B i j) → A = B := (SpecialLinearGroup.ext_iff A B).mpr instance subsingleton_of_subsingleton [Subsingleton n] : Subsingleton (SpecialLinearGroup n R) := by refine ⟨fun ⟨A, hA⟩ ⟨B, hB⟩ ↦ ?_⟩ ext i j rcases isEmpty_or_nonempty n with hn | hn; · exfalso; exact IsEmpty.false i rw [det_eq_elem_of_subsingleton _ i] at hA hB simp only [Subsingleton.elim j i, hA, hB] instance hasInv : Inv (SpecialLinearGroup n R) := ⟨fun A => ⟨adjugate A, by rw [det_adjugate, A.prop, one_pow]⟩⟩ instance hasMul : Mul (SpecialLinearGroup n R) := ⟨fun A B => ⟨A * B, by rw [det_mul, A.prop, B.prop, one_mul]⟩⟩ instance hasOne : One (SpecialLinearGroup n R) := ⟨⟨1, det_one⟩⟩ instance : Pow (SpecialLinearGroup n R) ℕ where pow x n := ⟨x ^ n, (det_pow _ _).trans <| x.prop.symm ▸ one_pow _⟩ instance : Inhabited (SpecialLinearGroup n R) := ⟨1⟩ instance [Fintype R] [DecidableEq R] : Fintype (SpecialLinearGroup n R) := Subtype.fintype _ instance [Finite R] : Finite (SpecialLinearGroup n R) := Subtype.finite /-- The transpose of a matrix in `SL(n, R)` -/ def transpose (A : SpecialLinearGroup n R) : SpecialLinearGroup n R := ⟨A.1.transpose, A.1.det_transpose ▸ A.2⟩ @[inherit_doc] scoped postfix:1024 "ᵀ" => SpecialLinearGroup.transpose section CoeLemmas variable (A B : SpecialLinearGroup n R) theorem coe_mk (A : Matrix n n R) (h : det A = 1) : ↑(⟨A, h⟩ : SpecialLinearGroup n R) = A := rfl @[simp] theorem coe_inv : ↑ₘ(A⁻¹) = adjugate A := rfl @[simp] theorem coe_mul : ↑ₘ(A * B) = ↑ₘA * ↑ₘB := rfl @[simp] theorem coe_one : (1 : SpecialLinearGroup n R) = (1 : Matrix n n R) := rfl @[simp] theorem det_coe : det ↑ₘA = 1 := A.2 @[simp] theorem coe_pow (m : ℕ) : ↑ₘ(A ^ m) = ↑ₘA ^ m := rfl @[simp] lemma coe_transpose (A : SpecialLinearGroup n R) : ↑ₘAᵀ = (↑ₘA)ᵀ := rfl theorem det_ne_zero [Nontrivial R] (g : SpecialLinearGroup n R) : det ↑ₘg ≠ 0 := by rw [g.det_coe] norm_num theorem row_ne_zero [Nontrivial R] (g : SpecialLinearGroup n R) (i : n) : g i ≠ 0 := fun h => g.det_ne_zero <| det_eq_zero_of_row_eq_zero i <| by simp [h] end CoeLemmas instance monoid : Monoid (SpecialLinearGroup n R) := Function.Injective.monoid _ Subtype.coe_injective coe_one coe_mul coe_pow instance : Group (SpecialLinearGroup n R) := { SpecialLinearGroup.monoid, SpecialLinearGroup.hasInv with inv_mul_cancel := fun A => by ext1 simp [adjugate_mul] } /-- A version of `Matrix.toLin' A` that produces linear equivalences. -/ def toLin' : SpecialLinearGroup n R →* (n → R) ≃ₗ[R] n → R where toFun A := LinearEquiv.ofLinear (Matrix.toLin' ↑ₘA) (Matrix.toLin' ↑ₘA⁻¹) (by rw [← toLin'_mul, ← coe_mul, mul_inv_cancel, coe_one, toLin'_one]) (by rw [← toLin'_mul, ← coe_mul, inv_mul_cancel, coe_one, toLin'_one]) map_one' := LinearEquiv.toLinearMap_injective Matrix.toLin'_one map_mul' A B := LinearEquiv.toLinearMap_injective <| Matrix.toLin'_mul ↑ₘA ↑ₘB theorem toLin'_apply (A : SpecialLinearGroup n R) (v : n → R) : SpecialLinearGroup.toLin' A v = Matrix.toLin' (↑ₘA) v := rfl theorem toLin'_to_linearMap (A : SpecialLinearGroup n R) : ↑(SpecialLinearGroup.toLin' A) = Matrix.toLin' ↑ₘA := rfl theorem toLin'_symm_apply (A : SpecialLinearGroup n R) (v : n → R) : A.toLin'.symm v = Matrix.toLin' (↑ₘA⁻¹) v := rfl theorem toLin'_symm_to_linearMap (A : SpecialLinearGroup n R) : ↑A.toLin'.symm = Matrix.toLin' ↑ₘA⁻¹ := rfl theorem toLin'_injective : Function.Injective ↑(toLin' : SpecialLinearGroup n R →* (n → R) ≃ₗ[R] n → R) := fun _ _ h => Subtype.coe_injective <| Matrix.toLin'.injective <| LinearEquiv.toLinearMap_injective.eq_iff.mpr h variable {S : Type*} [CommRing S] /-- A ring homomorphism from `R` to `S` induces a group homomorphism from `SpecialLinearGroup n R` to `SpecialLinearGroup n S`. -/ @[simps] def map (f : R →+* S) : SpecialLinearGroup n R →* SpecialLinearGroup n S where toFun g := ⟨f.mapMatrix ↑ₘg, by rw [← f.map_det] simp [g.prop]⟩ map_one' := Subtype.ext <| f.mapMatrix.map_one map_mul' x y := Subtype.ext <| f.mapMatrix.map_mul ↑ₘx ↑ₘy section center open Subgroup @[simp] theorem center_eq_bot_of_subsingleton [Subsingleton n] : center (SpecialLinearGroup n R) = ⊥ := eq_bot_iff.mpr fun x _ ↦ by rw [mem_bot, Subsingleton.elim x 1] theorem scalar_eq_self_of_mem_center {A : SpecialLinearGroup n R} (hA : A ∈ center (SpecialLinearGroup n R)) (i : n) : scalar n (A i i) = A := by obtain ⟨r : R, hr : scalar n r = A⟩ := mem_range_scalar_of_commute_transvectionStruct fun t ↦ Subtype.ext_iff.mp <| Subgroup.mem_center_iff.mp hA ⟨t.toMatrix, by simp⟩ simp [← congr_fun₂ hr i i, ← hr] theorem scalar_eq_coe_self_center (A : center (SpecialLinearGroup n R)) (i : n) : scalar n ((A : Matrix n n R) i i) = A := scalar_eq_self_of_mem_center A.property i /-- The center of a special linear group of degree `n` is the subgroup of scalar matrices, for which the scalars are the `n`-th roots of unity. -/ theorem mem_center_iff {A : SpecialLinearGroup n R} : A ∈ center (SpecialLinearGroup n R) ↔ ∃ (r : R), r ^ (Fintype.card n) = 1 ∧ scalar n r = A := by rcases isEmpty_or_nonempty n with hn | ⟨⟨i⟩⟩; · exact ⟨by aesop, by simp [Subsingleton.elim A 1]⟩ refine ⟨fun h ↦ ⟨A i i, ?_, ?_⟩, fun ⟨r, _, hr⟩ ↦ Subgroup.mem_center_iff.mpr fun B ↦ ?_⟩ · have : det ((scalar n) (A i i)) = 1 := (scalar_eq_self_of_mem_center h i).symm ▸ A.property simpa using this · exact scalar_eq_self_of_mem_center h i · suffices ↑ₘ(B * A) = ↑ₘ(A * B) from Subtype.val_injective this simpa only [coe_mul, ← hr] using (scalar_commute (n := n) r (Commute.all r) B).symm /-- An equivalence of groups, from the center of the special linear group to the roots of unity. -/ @[simps] def center_equiv_rootsOfUnity' (i : n) : center (SpecialLinearGroup n R) ≃* rootsOfUnity (Fintype.card n) R where toFun A := haveI : Nonempty n := ⟨i⟩ rootsOfUnity.mkOfPowEq (↑ₘA i i) <| by obtain ⟨r, hr, hr'⟩ := mem_center_iff.mp A.property replace hr' : A.val i i = r := by simp only [← hr', scalar_apply, diagonal_apply_eq] simp only [hr', hr] invFun a := ⟨⟨a • (1 : Matrix n n R), by aesop⟩, Subgroup.mem_center_iff.mpr fun B ↦ Subtype.val_injective <| by simp [coe_mul]⟩ left_inv A := by refine SetCoe.ext <| SetCoe.ext ?_ obtain ⟨r, _, hr⟩ := mem_center_iff.mp A.property simpa [← hr, Submonoid.smul_def, Units.smul_def] using smul_one_eq_diagonal r right_inv a := by obtain ⟨⟨a, _⟩, ha⟩ := a exact SetCoe.ext <| Units.ext <| by simp map_mul' A B := by dsimp ext simp only [rootsOfUnity.val_mkOfPowEq_coe, Subgroup.coe_mul, Units.val_mul] rw [← scalar_eq_coe_self_center A i, ← scalar_eq_coe_self_center B i] simp open scoped Classical in /-- An equivalence of groups, from the center of the special linear group to the roots of unity. See also `center_equiv_rootsOfUnity'`. -/ noncomputable def center_equiv_rootsOfUnity : center (SpecialLinearGroup n R) ≃* rootsOfUnity (max (Fintype.card n) 1) R := (isEmpty_or_nonempty n).by_cases (fun hn ↦ by rw [center_eq_bot_of_subsingleton, Fintype.card_eq_zero, max_eq_right_of_lt zero_lt_one, rootsOfUnity_one] exact MulEquiv.ofUnique) (fun _ ↦ (max_eq_left (NeZero.one_le : 1 ≤ Fintype.card n)).symm ▸ center_equiv_rootsOfUnity' (Classical.arbitrary n)) end center section cast /-- Coercion of SL `n` `ℤ` to SL `n` `R` for a commutative ring `R`. -/ instance : Coe (SpecialLinearGroup n ℤ) (SpecialLinearGroup n R) := ⟨fun x => map (Int.castRingHom R) x⟩ @[simp] theorem coe_matrix_coe (g : SpecialLinearGroup n ℤ) : ↑(g : SpecialLinearGroup n R) = (↑g : Matrix n n ℤ).map (Int.castRingHom R) := map_apply_coe (Int.castRingHom R) g lemma map_intCast_injective [CharZero R] : Function.Injective ((↑) : SpecialLinearGroup n ℤ → SpecialLinearGroup n R) := fun g h ↦ by simp_rw [ext_iff, map_apply_coe, RingHom.mapMatrix_apply, Int.coe_castRingHom, Matrix.map_apply, Int.cast_inj] tauto @[simp] lemma map_intCast_inj [CharZero R] {x y : SpecialLinearGroup n ℤ} : (x : SpecialLinearGroup n R) = y ↔ x = y := map_intCast_injective.eq_iff end cast section Neg variable [Fact (Even (Fintype.card n))] /-- Formal operation of negation on special linear group on even cardinality `n` given by negating each element. -/ instance instNeg : Neg (SpecialLinearGroup n R) := ⟨fun g => ⟨-g, by simpa [(@Fact.out <| Even <| Fintype.card n).neg_one_pow, g.det_coe] using det_smul (↑ₘg) (-1)⟩⟩ @[simp] theorem coe_neg (g : SpecialLinearGroup n R) : ↑(-g) = -(g : Matrix n n R) := rfl instance : HasDistribNeg (SpecialLinearGroup n R) := Function.Injective.hasDistribNeg _ Subtype.coe_injective coe_neg coe_mul @[simp] theorem coe_int_neg (g : SpecialLinearGroup n ℤ) : ↑(-g) = (-↑g : SpecialLinearGroup n R) := Subtype.ext <| (@RingHom.mapMatrix n _ _ _ _ _ _ (Int.castRingHom R)).map_neg ↑g end Neg section SpecialCases open scoped MatrixGroups theorem SL2_inv_expl_det (A : SL(2, R)) : det ![![A.1 1 1, -A.1 0 1], ![-A.1 1 0, A.1 0 0]] = 1 := by simpa [-det_coe, Matrix.det_fin_two, mul_comm] using A.2 theorem SL2_inv_expl (A : SL(2, R)) : A⁻¹ = ⟨![![A.1 1 1, -A.1 0 1], ![-A.1 1 0, A.1 0 0]], SL2_inv_expl_det A⟩ := by ext have := Matrix.adjugate_fin_two A.1 rw [coe_inv, this] simp theorem fin_two_induction (P : SL(2, R) → Prop) (h : ∀ (a b c d : R) (hdet : a * d - b * c = 1), P ⟨!![a, b; c, d], by rwa [det_fin_two_of]⟩) (g : SL(2, R)) : P g := by obtain ⟨m, hm⟩ := g convert h (m 0 0) (m 0 1) (m 1 0) (m 1 1) (by rwa [det_fin_two] at hm) ext i j; fin_cases i <;> fin_cases j <;> rfl theorem fin_two_exists_eq_mk_of_apply_zero_one_eq_zero {R : Type*} [Field R] (g : SL(2, R)) (hg : g 1 0 = 0) : ∃ (a b : R) (h : a ≠ 0), g = (⟨!![a, b; 0, a⁻¹], by simp [h]⟩ : SL(2, R)) := by induction g using Matrix.SpecialLinearGroup.fin_two_induction with | h a b c d h_det => replace hg : c = 0 := by simpa using hg have had : a * d = 1 := by rwa [hg, mul_zero, sub_zero] at h_det refine ⟨a, b, left_ne_zero_of_mul_eq_one had, ?_⟩ simp_rw [eq_inv_of_mul_eq_one_right had, hg] lemma isCoprime_row (A : SL(2, R)) (i : Fin 2) : IsCoprime (A i 0) (A i 1) := by refine match i with | 0 => ⟨A 1 1, -(A 1 0), ?_⟩ | 1 => ⟨-(A 0 1), A 0 0, ?_⟩ <;> · simp_rw [det_coe A ▸ det_fin_two A.1] ring lemma isCoprime_col (A : SL(2, R)) (j : Fin 2) : IsCoprime (A 0 j) (A 1 j) := by refine match j with | 0 => ⟨A 1 1, -(A 0 1), ?_⟩ | 1 => ⟨-(A 1 0), A 0 0, ?_⟩ <;> · simp_rw [det_coe A ▸ det_fin_two A.1] ring end SpecialCases end SpecialLinearGroup end Matrix namespace IsCoprime open Matrix MatrixGroups SpecialLinearGroup variable {R : Type*} [CommRing R] /-- Given any pair of coprime elements of `R`, there exists a matrix in `SL(2, R)` having those entries as its left or right column. -/ lemma exists_SL2_col {a b : R} (hab : IsCoprime a b) (j : Fin 2) : ∃ g : SL(2, R), g 0 j = a ∧ g 1 j = b := by obtain ⟨u, v, h⟩ := hab refine match j with | 0 => ⟨⟨!![a, -v; b, u], ?_⟩, rfl, rfl⟩ | 1 => ⟨⟨!![v, a; -u, b], ?_⟩, rfl, rfl⟩ <;> · rw [Matrix.det_fin_two_of, ← h] ring /-- Given any pair of coprime elements of `R`, there exists a matrix in `SL(2, R)` having those entries as its top or bottom row. -/ lemma exists_SL2_row {a b : R} (hab : IsCoprime a b) (i : Fin 2) : ∃ g : SL(2, R), g i 0 = a ∧ g i 1 = b := by obtain ⟨u, v, h⟩ := hab refine match i with | 0 => ⟨⟨!![a, b; -v, u], ?_⟩, rfl, rfl⟩ | 1 => ⟨⟨!![v, -u; a, b], ?_⟩, rfl, rfl⟩ <;> · rw [Matrix.det_fin_two_of, ← h] ring /-- A vector with coprime entries, right-multiplied by a matrix in `SL(2, R)`, has coprime entries. -/ lemma vecMulSL {v : Fin 2 → R} (hab : IsCoprime (v 0) (v 1)) (A : SL(2, R)) : IsCoprime ((v ᵥ* A.1) 0) ((v ᵥ* A.1) 1) := by obtain ⟨g, hg⟩ := hab.exists_SL2_row 0 have : v = g 0 := funext fun t ↦ by { fin_cases t <;> tauto } simpa only [this] using isCoprime_row (g * A) 0 /-- A vector with coprime entries, left-multiplied by a matrix in `SL(2, R)`, has coprime entries. -/ lemma mulVecSL {v : Fin 2 → R} (hab : IsCoprime (v 0) (v 1)) (A : SL(2, R)) : IsCoprime ((A.1 *ᵥ v) 0) ((A.1 *ᵥ v) 1) := by simpa only [← vecMul_transpose] using hab.vecMulSL A.transpose end IsCoprime namespace ModularGroup open MatrixGroups open Matrix Matrix.SpecialLinearGroup /-- The matrix `S = [[0, -1], [1, 0]]` as an element of `SL(2, ℤ)`. This element acts naturally on the Euclidean plane as a rotation about the origin by `π / 2`. This element also acts naturally on the hyperbolic plane as rotation about `i` by `π`. It represents the Mobiüs transformation `z ↦ -1/z` and is an involutive elliptic isometry. -/ def S : SL(2, ℤ) := ⟨!![0, -1; 1, 0], by simp [Matrix.det_fin_two_of]⟩ /-- The matrix `T = [[1, 1], [0, 1]]` as an element of `SL(2, ℤ)`. -/ def T : SL(2, ℤ) := ⟨!![1, 1; 0, 1], by simp [Matrix.det_fin_two_of]⟩ theorem coe_S : ↑S = !![0, -1; 1, 0] := rfl theorem coe_T : ↑T = (!![1, 1; 0, 1] : Matrix _ _ ℤ) := rfl theorem coe_T_inv : ↑(T⁻¹) = !![1, -1; 0, 1] := by simp [coe_inv, coe_T, adjugate_fin_two] theorem coe_T_zpow (n : ℤ) : (T ^ n).1 = !![1, n; 0, 1] := by induction n with | zero => rw [zpow_zero, coe_one, Matrix.one_fin_two] | succ n h => simp_rw [zpow_add, zpow_one, coe_mul, h, coe_T, Matrix.mul_fin_two] congrm !![_, ?_; _, _] rw [mul_one, mul_one, add_comm] | pred n h => simp_rw [zpow_sub, zpow_one, coe_mul, h, coe_T_inv, Matrix.mul_fin_two] congrm !![?_, ?_; _, _] <;> ring @[simp] theorem T_pow_mul_apply_one (n : ℤ) (g : SL(2, ℤ)) : (T ^ n * g) 1 = g 1 := by ext j simp [coe_T_zpow, Matrix.vecMul, dotProduct, Fin.sum_univ_succ] @[simp] theorem T_mul_apply_one (g : SL(2, ℤ)) : (T * g) 1 = g 1 := by simpa using T_pow_mul_apply_one 1 g @[simp] theorem T_inv_mul_apply_one (g : SL(2, ℤ)) : (T⁻¹ * g) 1 = g 1 := by simpa using T_pow_mul_apply_one (-1) g lemma S_mul_S_eq : (S : Matrix (Fin 2) (Fin 2) ℤ) * S = -1 := by simp only [S, Int.reduceNeg, cons_mul, Nat.succ_eq_add_one, Nat.reduceAdd, vecMul_cons, head_cons, zero_smul, tail_cons, neg_smul, one_smul, neg_cons, neg_zero, neg_empty, empty_vecMul, add_zero, zero_add, empty_mul, Equiv.symm_apply_apply] exact Eq.symm (eta_fin_two (-1)) lemma T_S_rel : S • S • S • T • S • T • S = T⁻¹ := by ext i j fin_cases i <;> fin_cases j <;> rfl end ModularGroup
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Symmetric.lean
import Mathlib.Data.Matrix.Basic import Mathlib.Data.Matrix.Block /-! # Symmetric matrices This file contains the definition and basic results about symmetric matrices. ## Main definition * `Matrix.isSymm`: a matrix `A : Matrix n n α` is "symmetric" if `Aᵀ = A`. ## Tags symm, symmetric, matrix -/ variable {α β n m R : Type*} namespace Matrix /-- A matrix `A : Matrix n n α` is "symmetric" if `Aᵀ = A`. -/ def IsSymm (A : Matrix n n α) : Prop := Aᵀ = A instance (A : Matrix n n α) [Decidable (Aᵀ = A)] : Decidable (IsSymm A) := inferInstanceAs <| Decidable (_ = _) theorem IsSymm.eq {A : Matrix n n α} (h : A.IsSymm) : Aᵀ = A := h /-- A version of `Matrix.ext_iff` that unfolds the `Matrix.transpose`. -/ theorem IsSymm.ext_iff {A : Matrix n n α} : A.IsSymm ↔ ∀ i j, A j i = A i j := Matrix.ext_iff.symm /-- A version of `Matrix.ext` that unfolds the `Matrix.transpose`. -/ theorem IsSymm.ext {A : Matrix n n α} : (∀ i j, A j i = A i j) → A.IsSymm := Matrix.ext theorem IsSymm.apply {A : Matrix n n α} (h : A.IsSymm) (i j : n) : A j i = A i j := IsSymm.ext_iff.1 h i j theorem isSymm_mul_transpose_self [Fintype n] [NonUnitalCommSemiring α] (A : Matrix n n α) : (A * Aᵀ).IsSymm := transpose_mul _ _ theorem isSymm_transpose_mul_self [Fintype n] [NonUnitalCommSemiring α] (A : Matrix n n α) : (Aᵀ * A).IsSymm := transpose_mul _ _ theorem isSymm_add_transpose_self [AddCommSemigroup α] (A : Matrix n n α) : (A + Aᵀ).IsSymm := add_comm _ _ theorem isSymm_transpose_add_self [AddCommSemigroup α] (A : Matrix n n α) : (Aᵀ + A).IsSymm := add_comm _ _ @[simp] theorem isSymm_zero [Zero α] : (0 : Matrix n n α).IsSymm := transpose_zero @[simp] theorem isSymm_one [DecidableEq n] [Zero α] [One α] : (1 : Matrix n n α).IsSymm := transpose_one theorem IsSymm.pow [CommSemiring α] [Fintype n] [DecidableEq n] {A : Matrix n n α} (h : A.IsSymm) (k : ℕ) : (A ^ k).IsSymm := by rw [IsSymm, transpose_pow, h] @[simp] theorem IsSymm.map {A : Matrix n n α} (h : A.IsSymm) (f : α → β) : (A.map f).IsSymm := transpose_map.symm.trans (h.symm ▸ rfl) @[simp] theorem IsSymm.transpose {A : Matrix n n α} (h : A.IsSymm) : Aᵀ.IsSymm := congr_arg _ h @[simp] theorem IsSymm.conjTranspose [Star α] {A : Matrix n n α} (h : A.IsSymm) : Aᴴ.IsSymm := h.transpose.map _ @[simp] theorem IsSymm.neg [Neg α] {A : Matrix n n α} (h : A.IsSymm) : (-A).IsSymm := (transpose_neg _).trans (congr_arg _ h) @[simp] theorem IsSymm.add {A B : Matrix n n α} [Add α] (hA : A.IsSymm) (hB : B.IsSymm) : (A + B).IsSymm := (transpose_add _ _).trans (hA.symm ▸ hB.symm ▸ rfl) @[simp] theorem IsSymm.sub {A B : Matrix n n α} [Sub α] (hA : A.IsSymm) (hB : B.IsSymm) : (A - B).IsSymm := (transpose_sub _ _).trans (hA.symm ▸ hB.symm ▸ rfl) @[simp] theorem IsSymm.smul [SMul R α] {A : Matrix n n α} (h : A.IsSymm) (k : R) : (k • A).IsSymm := (transpose_smul _ _).trans (congr_arg _ h) @[simp] theorem IsSymm.submatrix {A : Matrix n n α} (h : A.IsSymm) (f : m → n) : (A.submatrix f f).IsSymm := (transpose_submatrix _ _ _).trans (h.symm ▸ rfl) /-- The diagonal matrix `diagonal v` is symmetric. -/ @[simp] theorem isSymm_diagonal [DecidableEq n] [Zero α] (v : n → α) : (diagonal v).IsSymm := diagonal_transpose _ /-- A block matrix `A.fromBlocks B C D` is symmetric, if `A` and `D` are symmetric and `Bᵀ = C`. -/ theorem IsSymm.fromBlocks {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} (hA : A.IsSymm) (hBC : Bᵀ = C) (hD : D.IsSymm) : (A.fromBlocks B C D).IsSymm := by have hCB : Cᵀ = B := by rw [← hBC] simp unfold Matrix.IsSymm rw [fromBlocks_transpose, hA, hCB, hBC, hD] /-- This is the `iff` version of `Matrix.isSymm.fromBlocks`. -/ theorem isSymm_fromBlocks_iff {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} : (A.fromBlocks B C D).IsSymm ↔ A.IsSymm ∧ Bᵀ = C ∧ Cᵀ = B ∧ D.IsSymm := ⟨fun h => ⟨(congr_arg toBlocks₁₁ h :), (congr_arg toBlocks₂₁ h :), (congr_arg toBlocks₁₂ h :), (congr_arg toBlocks₂₂ h :)⟩, fun ⟨hA, hBC, _, hD⟩ => IsSymm.fromBlocks hA hBC hD⟩ end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Basis.lean
import Mathlib.LinearAlgebra.Basis.Submodule import Mathlib.LinearAlgebra.Matrix.Reindex import Mathlib.LinearAlgebra.Matrix.ToLin /-! # Bases and matrices This file defines the map `Basis.toMatrix` that sends a family of vectors to the matrix of their coordinates with respect to some basis. ## Main definitions * `Basis.toMatrix e v` is the matrix whose `i, j`th entry is `e.repr (v j) i` * `basis.toMatrixEquiv` is `Basis.toMatrix` bundled as a linear equiv ## Main results * `LinearMap.toMatrix_id_eq_basis_toMatrix`: `LinearMap.toMatrix b c id` is equal to `Basis.toMatrix b c` * `Basis.toMatrix_mul_toMatrix`: multiplying `Basis.toMatrix` with another `Basis.toMatrix` gives a `Basis.toMatrix` ## Tags matrix, basis -/ noncomputable section open Function LinearMap Matrix Module Set Submodule variable {ι ι' κ κ' : Type*} variable {R M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M] variable {R₂ M₂ : Type*} [CommRing R₂] [AddCommGroup M₂] [Module R₂ M₂] namespace Module.Basis /-- From a basis `e : ι → M` and a family of vectors `v : ι' → M`, make the matrix whose columns are the vectors `v i` written in the basis `e`. -/ def toMatrix (e : Basis ι R M) (v : ι' → M) : Matrix ι ι' R := fun i j ↦ e.repr (v j) i variable (e : Basis ι R M) (v : ι' → M) (i : ι) (j : ι') theorem toMatrix_apply : e.toMatrix v i j = e.repr (v j) i := rfl theorem toMatrix_transpose_apply : (e.toMatrix v)ᵀ j = e.repr (v j) := funext fun _ => rfl theorem toMatrix_eq_toMatrix_constr [Fintype ι] [DecidableEq ι] (v : ι → M) : e.toMatrix v = LinearMap.toMatrix e e (e.constr ℕ v) := by ext rw [Basis.toMatrix_apply, LinearMap.toMatrix_apply, Basis.constr_basis] -- TODO (maybe) Adjust the definition of `Basis.toMatrix` to eliminate the transpose. theorem coePiBasisFun.toMatrix_eq_transpose [Finite ι] : ((Pi.basisFun R ι).toMatrix : Matrix ι ι R → Matrix ι ι R) = Matrix.transpose := by ext M i j rfl @[simp] theorem toMatrix_self [DecidableEq ι] : e.toMatrix e = 1 := by unfold Basis.toMatrix ext i j simp [Matrix.one_apply, Finsupp.single_apply, eq_comm] theorem toMatrix_update [DecidableEq ι'] (x : M) : e.toMatrix (Function.update v j x) = Matrix.updateCol (e.toMatrix v) j (e.repr x) := by ext i' k rw [Basis.toMatrix, Matrix.updateCol_apply, e.toMatrix_apply] split_ifs with h · rw [h, update_self j x v] · rw [update_of_ne h] /-- The basis constructed by `unitsSMul` has vectors given by a diagonal matrix. -/ @[simp] theorem toMatrix_unitsSMul [DecidableEq ι] (e : Basis ι R₂ M₂) (w : ι → R₂ˣ) : e.toMatrix (e.unitsSMul w) = diagonal ((↑) ∘ w) := by ext i j by_cases h : i = j <;> simp [h, toMatrix_apply, unitsSMul_apply, Units.smul_def] /-- The basis constructed by `isUnitSMul` has vectors given by a diagonal matrix. -/ @[simp] theorem toMatrix_isUnitSMul [DecidableEq ι] (e : Basis ι R₂ M₂) {w : ι → R₂} (hw : ∀ i, IsUnit (w i)) : e.toMatrix (e.isUnitSMul hw) = diagonal w := e.toMatrix_unitsSMul _ theorem toMatrix_smul_left {G} [Group G] [DistribMulAction G M] [SMulCommClass G R M] (g : G) : (g • e).toMatrix v = e.toMatrix (g⁻¹ • v) := rfl @[simp] theorem sum_toMatrix_smul_self [Fintype ι] : ∑ i : ι, e.toMatrix v i j • e i = v j := by simp_rw [e.toMatrix_apply, e.sum_repr] theorem toMatrix_smul {R₁ S : Type*} [CommSemiring R₁] [Semiring S] [Algebra R₁ S] [Fintype ι] [DecidableEq ι] (x : S) (b : Basis ι R₁ S) (w : ι → S) : (b.toMatrix (x • w)) = (Algebra.leftMulMatrix b x) * (b.toMatrix w) := by ext rw [Basis.toMatrix_apply, Pi.smul_apply, smul_eq_mul, ← Algebra.leftMulMatrix_mulVec_repr] rfl theorem toMatrix_map_vecMul {S : Type*} [Semiring S] [Algebra R S] [Fintype ι] (b : Basis ι R S) (v : ι' → S) : b ᵥ* ((b.toMatrix v).map <| algebraMap R S) = v := by ext i simp_rw [vecMul, dotProduct, Matrix.map_apply, ← Algebra.commutes, ← Algebra.smul_def, sum_toMatrix_smul_self] @[simp] theorem toLin_toMatrix [Finite ι] [Fintype ι'] [DecidableEq ι'] (v : Basis ι' R M) : Matrix.toLin v e (e.toMatrix v) = LinearMap.id := v.ext fun i => by cases nonempty_fintype ι; rw [toLin_self, id_apply, e.sum_toMatrix_smul_self] /-- From a basis `e : ι → M`, build a linear equivalence between families of vectors `v : ι → M`, and matrices, making the matrix whose columns are the vectors `v i` written in the basis `e`. -/ def toMatrixEquiv [Fintype ι] (e : Basis ι R M) : (ι → M) ≃ₗ[R] Matrix ι ι R where toFun := e.toMatrix map_add' v w := by ext i j rw [Matrix.add_apply, e.toMatrix_apply, Pi.add_apply, LinearEquiv.map_add] rfl map_smul' := by intro c v ext i j dsimp only [] rw [e.toMatrix_apply, Pi.smul_apply, LinearEquiv.map_smul] rfl invFun m j := ∑ i, m i j • e i left_inv := by intro v ext j exact e.sum_toMatrix_smul_self v j right_inv := by intro m ext k l simp only [e.toMatrix_apply, ← e.equivFun_apply, ← e.equivFun_symm_apply, LinearEquiv.apply_symm_apply] variable (R₂) in theorem restrictScalars_toMatrix [Fintype ι] [DecidableEq ι] {S : Type*} [CommRing S] [Nontrivial S] [Algebra R₂ S] [Module S M₂] [IsScalarTower R₂ S M₂] [NoZeroSMulDivisors R₂ S] (b : Basis ι S M₂) (v : ι → span R₂ (Set.range b)) : (algebraMap R₂ S).mapMatrix ((b.restrictScalars R₂).toMatrix v) = b.toMatrix (fun i ↦ (v i : M₂)) := by ext rw [RingHom.mapMatrix_apply, Matrix.map_apply, Basis.toMatrix_apply, Basis.restrictScalars_repr_apply, Basis.toMatrix_apply] end Module.Basis section MulLinearMapToMatrix variable {N : Type*} [AddCommMonoid N] [Module R N] variable (b : Basis ι R M) (b' : Basis ι' R M) (c : Basis κ R N) (c' : Basis κ' R N) variable (f : M →ₗ[R] N) open LinearMap section Fintype /-- A generalization of `LinearMap.toMatrix_id`. -/ @[simp] theorem LinearMap.toMatrix_id_eq_basis_toMatrix [Fintype ι] [DecidableEq ι] [Finite ι'] : LinearMap.toMatrix b b' id = b'.toMatrix b := by ext i apply LinearMap.toMatrix_apply variable [Fintype ι'] @[simp] theorem basis_toMatrix_mul_linearMap_toMatrix [Finite κ] [Fintype κ'] [DecidableEq ι'] : c.toMatrix c' * LinearMap.toMatrix b' c' f = LinearMap.toMatrix b' c f := (Matrix.toLin b' c).injective <| by haveI := Classical.decEq κ' rw [toLin_toMatrix, toLin_mul b' c' c, toLin_toMatrix, c.toLin_toMatrix, LinearMap.id_comp] theorem basis_toMatrix_mul [Fintype κ] [Finite ι] [DecidableEq κ] (b₁ : Basis ι R M) (b₂ : Basis ι' R M) (b₃ : Basis κ R N) (A : Matrix ι' κ R) : b₁.toMatrix b₂ * A = LinearMap.toMatrix b₃ b₁ (toLin b₃ b₂ A) := by have := basis_toMatrix_mul_linearMap_toMatrix b₃ b₁ b₂ (Matrix.toLin b₃ b₂ A) rwa [LinearMap.toMatrix_toLin] at this variable [Finite κ] [Fintype ι] @[simp] theorem linearMap_toMatrix_mul_basis_toMatrix [Finite κ'] [DecidableEq ι] [DecidableEq ι'] : LinearMap.toMatrix b' c' f * b'.toMatrix b = LinearMap.toMatrix b c' f := (Matrix.toLin b c').injective <| by rw [toLin_toMatrix, toLin_mul b b' c', toLin_toMatrix, b'.toLin_toMatrix, LinearMap.comp_id] theorem basis_toMatrix_mul_linearMap_toMatrix_mul_basis_toMatrix [Fintype κ'] [DecidableEq ι] [DecidableEq ι'] : c.toMatrix c' * LinearMap.toMatrix b' c' f * b'.toMatrix b = LinearMap.toMatrix b c f := by cases nonempty_fintype κ rw [basis_toMatrix_mul_linearMap_toMatrix, linearMap_toMatrix_mul_basis_toMatrix] theorem mul_basis_toMatrix [DecidableEq ι] [DecidableEq ι'] (b₁ : Basis ι R M) (b₂ : Basis ι' R M) (b₃ : Basis κ R N) (A : Matrix κ ι R) : A * b₁.toMatrix b₂ = LinearMap.toMatrix b₂ b₃ (toLin b₁ b₃ A) := by cases nonempty_fintype κ have := linearMap_toMatrix_mul_basis_toMatrix b₂ b₁ b₃ (Matrix.toLin b₁ b₃ A) rwa [LinearMap.toMatrix_toLin] at this theorem basis_toMatrix_basisFun_mul (b : Basis ι R (ι → R)) (A : Matrix ι ι R) : b.toMatrix (Pi.basisFun R ι) * A = of fun i j => b.repr (A.col j) i := by classical simp only [basis_toMatrix_mul _ _ (Pi.basisFun R ι), Matrix.toLin_eq_toLin'] ext i j rw [LinearMap.toMatrix_apply, Matrix.toLin'_apply, Pi.basisFun_apply, Matrix.mulVec_single_one, Matrix.of_apply] namespace Module.Basis /-- See also `Basis.toMatrix_reindex` which gives the `simp` normal form of this result. -/ theorem toMatrix_reindex' [DecidableEq ι] [DecidableEq ι'] (b : Basis ι R M) (v : ι' → M) (e : ι ≃ ι') : (b.reindex e).toMatrix v = Matrix.reindexAlgEquiv R R e (b.toMatrix (v ∘ e)) := by ext simp only [Basis.toMatrix_apply, Basis.repr_reindex, Matrix.reindexAlgEquiv_apply, Matrix.reindex_apply, Matrix.submatrix_apply, Function.comp_apply, e.apply_symm_apply, Finsupp.mapDomain_equiv_apply] omit [Fintype ι'] in @[simp] lemma toMatrix_mulVec_repr [Finite ι'] (m : M) : b'.toMatrix b *ᵥ b.repr m = b'.repr m := by classical cases nonempty_fintype ι' simp [← LinearMap.toMatrix_id_eq_basis_toMatrix, LinearMap.toMatrix_mulVec_repr] end Module.Basis end Fintype namespace Module.Basis /-- A generalization of `Basis.toMatrix_self`, in the opposite direction. -/ @[simp] theorem toMatrix_mul_toMatrix {ι'' : Type*} [Fintype ι'] (b'' : ι'' → M) : b.toMatrix b' * b'.toMatrix b'' = b.toMatrix b'' := by haveI := Classical.decEq ι haveI := Classical.decEq ι' haveI := Classical.decEq ι'' ext i j simp only [Matrix.mul_apply, toMatrix_apply, sum_repr_mul_repr] /-- `b.toMatrix b'` and `b'.toMatrix b` are inverses. -/ theorem toMatrix_mul_toMatrix_flip [DecidableEq ι] [Fintype ι'] : b.toMatrix b' * b'.toMatrix b = 1 := by rw [toMatrix_mul_toMatrix, toMatrix_self] /-- A matrix whose columns form a basis `b'`, expressed w.r.t. a basis `b`, is invertible. -/ def invertibleToMatrix [DecidableEq ι] [Fintype ι] (b b' : Basis ι R₂ M₂) : Invertible (b.toMatrix b') := ⟨b'.toMatrix b, toMatrix_mul_toMatrix_flip _ _, toMatrix_mul_toMatrix_flip _ _⟩ @[simp] theorem toMatrix_reindex (b : Basis ι R M) (v : ι' → M) (e : ι ≃ ι') : (b.reindex e).toMatrix v = (b.toMatrix v).submatrix e.symm _root_.id := by ext simp only [toMatrix_apply, repr_reindex, Matrix.submatrix_apply, _root_.id, Finsupp.mapDomain_equiv_apply] @[simp] theorem toMatrix_map (b : Basis ι R M) (f : M ≃ₗ[R] N) (v : ι → N) : (b.map f).toMatrix v = b.toMatrix (f.symm ∘ v) := by ext simp only [toMatrix_apply, Basis.map, LinearEquiv.trans_apply, (· ∘ ·)] end Module.Basis end MulLinearMapToMatrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/BaseChange.lean
import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.Algebra.Field.Subfield.Defs /-! # Matrices and base change This file is a home for results about base change for matrices. ## Main results: * `Matrix.mem_subfield_of_mul_eq_one_of_mem_subfield_right`: if an invertible matrix over `L` takes values in subfield `K ⊆ L`, then so does its (right) inverse. * `Matrix.mem_subfield_of_mul_eq_one_of_mem_subfield_left`: if an invertible matrix over `L` takes values in subfield `K ⊆ L`, then so does its (left) inverse. -/ namespace Matrix variable {m n L : Type*} [Finite m] [Fintype n] [DecidableEq m] [Field L] (e : m ≃ n) (K : Subfield L) {A : Matrix m n L} {B : Matrix n m L} (hAB : A * B = 1) include e hAB lemma mem_subfield_of_mul_eq_one_of_mem_subfield_right (h_mem : ∀ i j, A i j ∈ K) (i : n) (j : m) : B i j ∈ K := by cases nonempty_fintype m let A' : Matrix m m K := of fun i j ↦ ⟨A.submatrix id e i j, h_mem i (e j)⟩ have hA' : A'.map K.subtype = A.submatrix id e := rfl have hA : IsUnit A' := by have h_unit : IsUnit (A.submatrix id e) := isUnit_of_right_inverse (B := B.submatrix e id) (by simpa) have h_det : (A.submatrix id e).det = K.subtype A'.det := by simp [A', K.subtype.map_det, map, submatrix] simpa [isUnit_iff_isUnit_det, h_det] using h_unit obtain ⟨B', hB⟩ := exists_right_inverse_iff_isUnit.mpr hA suffices (B'.submatrix e.symm id).map K.subtype = B by simp [← this] replace hB : A * (B'.submatrix e.symm id).map K.subtype = 1 := by replace hB := congr_arg (fun C ↦ C.map K.subtype) hB simp_rw [Matrix.map_mul] at hB rw [hA', ← e.symm_symm, ← submatrix_id_mul_left] at hB simpa using hB classical simpa [← Matrix.mul_assoc, (mul_eq_one_comm_of_equiv e).mp hAB] using congr_arg (B * ·) hB lemma mem_subfield_of_mul_eq_one_of_mem_subfield_left (h_mem : ∀ i j, B i j ∈ K) (i : m) (j : n) : A i j ∈ K := by replace hAB : Bᵀ * Aᵀ = 1 := by simpa using congr_arg transpose hAB rw [← A.transpose_apply] simp_rw [← B.transpose_apply] at h_mem exact mem_subfield_of_mul_eq_one_of_mem_subfield_right e K hAB (fun i j ↦ h_mem j i) j i end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Hermitian.lean
import Mathlib.Analysis.InnerProductSpace.PiL2 import Mathlib.LinearAlgebra.Matrix.ConjTranspose import Mathlib.LinearAlgebra.Matrix.ZPow /-! # Hermitian matrices This file defines Hermitian matrices and some basic results about them. See also `IsSelfAdjoint`, which generalizes this definition to other star rings. ## Main definition * `Matrix.IsHermitian` : a matrix `A : Matrix n n α` is Hermitian if `Aᴴ = A`. ## Tags self-adjoint matrix, hermitian matrix -/ namespace Matrix variable {α β : Type*} {m n : Type*} {A : Matrix n n α} open scoped Matrix local notation "⟪" x ", " y "⟫" => inner α x y section Star variable [Star α] [Star β] /-- A matrix is Hermitian if it is equal to its conjugate transpose. On the reals, this definition captures symmetric matrices. -/ def IsHermitian (A : Matrix n n α) : Prop := Aᴴ = A instance (A : Matrix n n α) [Decidable (Aᴴ = A)] : Decidable (IsHermitian A) := inferInstanceAs <| Decidable (_ = _) theorem IsHermitian.eq {A : Matrix n n α} (h : A.IsHermitian) : Aᴴ = A := h theorem isHermitian_iff_isSelfAdjoint {A : Matrix n n α} : A.IsHermitian ↔ IsSelfAdjoint A := Iff.rfl protected alias ⟨IsHermitian.isSelfAdjoint, _root_.IsSelfAdjoint.isHermitian⟩ := isHermitian_iff_isSelfAdjoint theorem IsHermitian.ext {A : Matrix n n α} : (∀ i j, star (A j i) = A i j) → A.IsHermitian := by intro h; ext i j; exact h i j theorem IsHermitian.apply {A : Matrix n n α} (h : A.IsHermitian) (i j : n) : star (A j i) = A i j := congr_fun (congr_fun h _) _ theorem IsHermitian.ext_iff {A : Matrix n n α} : A.IsHermitian ↔ ∀ i j, star (A j i) = A i j := ⟨IsHermitian.apply, IsHermitian.ext⟩ @[simp] theorem IsHermitian.map {A : Matrix n n α} (h : A.IsHermitian) (f : α → β) (hf : Function.Semiconj f star star) : (A.map f).IsHermitian := (conjTranspose_map f hf).symm.trans <| h.eq.symm ▸ rfl theorem IsHermitian.transpose {A : Matrix n n α} (h : A.IsHermitian) : Aᵀ.IsHermitian := by rw [IsHermitian, conjTranspose, transpose_map] exact congr_arg Matrix.transpose h @[simp] theorem isHermitian_transpose_iff (A : Matrix n n α) : Aᵀ.IsHermitian ↔ A.IsHermitian := ⟨by intro h; rw [← transpose_transpose A]; exact IsHermitian.transpose h, IsHermitian.transpose⟩ theorem IsHermitian.conjTranspose {A : Matrix n n α} (h : A.IsHermitian) : Aᴴ.IsHermitian := h.transpose.map _ fun _ => rfl @[simp] theorem IsHermitian.submatrix {A : Matrix n n α} (h : A.IsHermitian) (f : m → n) : (A.submatrix f f).IsHermitian := (conjTranspose_submatrix _ _ _).trans (h.symm ▸ rfl) @[simp] theorem isHermitian_submatrix_equiv {A : Matrix n n α} (e : m ≃ n) : (A.submatrix e e).IsHermitian ↔ A.IsHermitian := ⟨fun h => by simpa using h.submatrix e.symm, fun h => h.submatrix _⟩ end Star section InvolutiveStar variable [InvolutiveStar α] @[simp] theorem isHermitian_conjTranspose_iff (A : Matrix n n α) : Aᴴ.IsHermitian ↔ A.IsHermitian := IsSelfAdjoint.star_iff /-- A block matrix `A.from_blocks B C D` is Hermitian, if `A` and `D` are Hermitian and `Bᴴ = C`. -/ theorem IsHermitian.fromBlocks {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} (hA : A.IsHermitian) (hBC : Bᴴ = C) (hD : D.IsHermitian) : (A.fromBlocks B C D).IsHermitian := by have hCB : Cᴴ = B := by rw [← hBC, conjTranspose_conjTranspose] unfold Matrix.IsHermitian rw [fromBlocks_conjTranspose, hBC, hCB, hA, hD] /-- This is the `iff` version of `Matrix.IsHermitian.fromBlocks`. -/ theorem isHermitian_fromBlocks_iff {A : Matrix m m α} {B : Matrix m n α} {C : Matrix n m α} {D : Matrix n n α} : (A.fromBlocks B C D).IsHermitian ↔ A.IsHermitian ∧ Bᴴ = C ∧ Cᴴ = B ∧ D.IsHermitian := ⟨fun h => ⟨congr_arg toBlocks₁₁ h, congr_arg toBlocks₂₁ h, congr_arg toBlocks₁₂ h, congr_arg toBlocks₂₂ h⟩, fun ⟨hA, hBC, _hCB, hD⟩ => IsHermitian.fromBlocks hA hBC hD⟩ end InvolutiveStar section AddMonoid variable [AddMonoid α] [StarAddMonoid α] /-- A diagonal matrix is Hermitian if the entries are self-adjoint (as a vector) -/ theorem isHermitian_diagonal_of_self_adjoint [DecidableEq n] (v : n → α) (h : IsSelfAdjoint v) : (diagonal v).IsHermitian := (-- TODO: add a `pi.has_trivial_star` instance and remove the `funext` diagonal_conjTranspose v).trans <| congr_arg _ h /-- A diagonal matrix is Hermitian if each diagonal entry is self-adjoint -/ lemma isHermitian_diagonal_iff [DecidableEq n] {d : n → α} : IsHermitian (diagonal d) ↔ (∀ i : n, IsSelfAdjoint (d i)) := by simp [isSelfAdjoint_iff, IsHermitian, conjTranspose, diagonal_transpose, diagonal_map] /-- A diagonal matrix is Hermitian if the entries have the trivial `star` operation (such as on the reals). -/ @[simp] theorem isHermitian_diagonal [TrivialStar α] [DecidableEq n] (v : n → α) : (diagonal v).IsHermitian := isHermitian_diagonal_of_self_adjoint _ (IsSelfAdjoint.all _) @[simp] theorem isHermitian_zero : (0 : Matrix n n α).IsHermitian := IsSelfAdjoint.zero _ @[simp] theorem IsHermitian.add {A B : Matrix n n α} (hA : A.IsHermitian) (hB : B.IsHermitian) : (A + B).IsHermitian := IsSelfAdjoint.add hA hB end AddMonoid section AddCommMonoid variable [AddCommMonoid α] [StarAddMonoid α] theorem isHermitian_add_transpose_self (A : Matrix n n α) : (A + Aᴴ).IsHermitian := IsSelfAdjoint.add_star_self A theorem isHermitian_transpose_add_self (A : Matrix n n α) : (Aᴴ + A).IsHermitian := IsSelfAdjoint.star_add_self A end AddCommMonoid section AddGroup variable [AddGroup α] [StarAddMonoid α] @[simp] theorem IsHermitian.neg {A : Matrix n n α} (h : A.IsHermitian) : (-A).IsHermitian := IsSelfAdjoint.neg h @[simp] theorem IsHermitian.sub {A B : Matrix n n α} (hA : A.IsHermitian) (hB : B.IsHermitian) : (A - B).IsHermitian := IsSelfAdjoint.sub hA hB end AddGroup section NonUnitalSemiring variable [NonUnitalSemiring α] [StarRing α] /-- Note this is more general than `IsSelfAdjoint.mul_star_self` as `B` can be rectangular. -/ theorem isHermitian_mul_conjTranspose_self [Fintype n] (A : Matrix m n α) : (A * Aᴴ).IsHermitian := by rw [IsHermitian, conjTranspose_mul, conjTranspose_conjTranspose] /-- Note this is more general than `IsSelfAdjoint.star_mul_self` as `B` can be rectangular. -/ theorem isHermitian_conjTranspose_mul_self [Fintype m] (A : Matrix m n α) : (Aᴴ * A).IsHermitian := by rw [IsHermitian, conjTranspose_mul, conjTranspose_conjTranspose] @[deprecated (since := "2025-11-10")] alias isHermitian_transpose_mul_self := isHermitian_conjTranspose_mul_self /-- Note this is more general than `IsSelfAdjoint.conjugate'` as `B` can be rectangular. -/ theorem isHermitian_conjTranspose_mul_mul [Fintype m] {A : Matrix m m α} (B : Matrix m n α) (hA : A.IsHermitian) : (Bᴴ * A * B).IsHermitian := by simp only [IsHermitian, conjTranspose_mul, conjTranspose_conjTranspose, hA.eq, Matrix.mul_assoc] /-- Note this is more general than `IsSelfAdjoint.conjugate` as `B` can be rectangular. -/ theorem isHermitian_mul_mul_conjTranspose [Fintype m] {A : Matrix m m α} (B : Matrix n m α) (hA : A.IsHermitian) : (B * A * Bᴴ).IsHermitian := by simp only [IsHermitian, conjTranspose_mul, conjTranspose_conjTranspose, hA.eq, Matrix.mul_assoc] lemma IsHermitian.commute_iff [Fintype n] {A B : Matrix n n α} (hA : A.IsHermitian) (hB : B.IsHermitian) : Commute A B ↔ (A * B).IsHermitian := hA.isSelfAdjoint.commute_iff hB.isSelfAdjoint @[deprecated (since := "13-08-2025")] alias commute_iff := IsHermitian.commute_iff end NonUnitalSemiring section Semiring variable [Semiring α] [StarRing α] /-- Note this is more general for matrices than `isSelfAdjoint_one` as it does not require `Fintype n`, which is necessary for `Monoid (Matrix n n R)`. -/ @[simp] theorem isHermitian_one [DecidableEq n] : (1 : Matrix n n α).IsHermitian := conjTranspose_one @[simp] theorem isHermitian_natCast [DecidableEq n] (d : ℕ) : (d : Matrix n n α).IsHermitian := conjTranspose_natCast _ theorem IsHermitian.pow [Fintype n] [DecidableEq n] {A : Matrix n n α} (h : A.IsHermitian) (k : ℕ) : (A ^ k).IsHermitian := IsSelfAdjoint.pow h _ end Semiring section Ring variable [Ring α] [StarRing α] @[simp] theorem isHermitian_intCast [DecidableEq n] (d : ℤ) : (d : Matrix n n α).IsHermitian := conjTranspose_intCast _ end Ring section CommRing variable [CommRing α] [StarRing α] theorem IsHermitian.inv [Fintype m] [DecidableEq m] {A : Matrix m m α} (hA : A.IsHermitian) : A⁻¹.IsHermitian := by simp [IsHermitian, conjTranspose_nonsing_inv, hA.eq] @[simp] theorem isHermitian_inv [Fintype m] [DecidableEq m] (A : Matrix m m α) [Invertible A] : A⁻¹.IsHermitian ↔ A.IsHermitian := ⟨fun h => by rw [← inv_inv_of_invertible A]; exact IsHermitian.inv h, IsHermitian.inv⟩ theorem IsHermitian.adjugate [Fintype m] [DecidableEq m] {A : Matrix m m α} (hA : A.IsHermitian) : A.adjugate.IsHermitian := by simp [IsHermitian, adjugate_conjTranspose, hA.eq] /-- Note that `IsSelfAdjoint.zpow` does not apply to matrices as they are not a division ring. -/ theorem IsHermitian.zpow [Fintype m] [DecidableEq m] {A : Matrix m m α} (h : A.IsHermitian) (k : ℤ) : (A ^ k).IsHermitian := by rw [IsHermitian, conjTranspose_zpow, h] section SchurComplement /-- Notation for `Sum.elim`, scoped within the `Matrix` namespace. -/ scoped infixl:65 " ⊕ᵥ " => Sum.elim theorem schur_complement_eq₁₁ [Fintype m] [DecidableEq m] [Fintype n] {A : Matrix m m α} (B : Matrix m n α) (D : Matrix n n α) (x : m → α) (y : n → α) [Invertible A] (hA : A.IsHermitian) : (star (x ⊕ᵥ y)) ᵥ* (Matrix.fromBlocks A B Bᴴ D) ⬝ᵥ (x ⊕ᵥ y) = (star (x + (A⁻¹ * B) *ᵥ y)) ᵥ* A ⬝ᵥ (x + (A⁻¹ * B) *ᵥ y) + (star y) ᵥ* (D - Bᴴ * A⁻¹ * B) ⬝ᵥ y := by simp [Function.star_sumElim, vecMul_fromBlocks, add_vecMul, dotProduct_mulVec, vecMul_sub, Matrix.mul_assoc, hA.eq, conjTranspose_nonsing_inv, star_mulVec] abel theorem schur_complement_eq₂₂ [Fintype m] [Fintype n] [DecidableEq n] (A : Matrix m m α) (B : Matrix m n α) {D : Matrix n n α} (x : m → α) (y : n → α) [Invertible D] (hD : D.IsHermitian) : (star (x ⊕ᵥ y)) ᵥ* (Matrix.fromBlocks A B Bᴴ D) ⬝ᵥ (x ⊕ᵥ y) = (star ((D⁻¹ * Bᴴ) *ᵥ x + y)) ᵥ* D ⬝ᵥ ((D⁻¹ * Bᴴ) *ᵥ x + y) + (star x) ᵥ* (A - B * D⁻¹ * Bᴴ) ⬝ᵥ x := by simp [Function.star_sumElim, vecMul_fromBlocks, add_vecMul, dotProduct_mulVec, vecMul_sub, Matrix.mul_assoc, hD.eq, conjTranspose_nonsing_inv, star_mulVec] abel namespace IsHermitian theorem fromBlocks₁₁ [Fintype m] [DecidableEq m] {A : Matrix m m α} (B : Matrix m n α) (D : Matrix n n α) (hA : A.IsHermitian) : (Matrix.fromBlocks A B Bᴴ D).IsHermitian ↔ (D - Bᴴ * A⁻¹ * B).IsHermitian := by have hBAB : (Bᴴ * A⁻¹ * B).IsHermitian := isHermitian_conjTranspose_mul_mul _ hA.inv rw [isHermitian_fromBlocks_iff] exact ⟨fun h ↦ h.2.2.2.sub hBAB, fun h ↦ ⟨hA, rfl, conjTranspose_conjTranspose B, sub_add_cancel D _ ▸ h.add hBAB⟩⟩ theorem fromBlocks₂₂ [Fintype n] [DecidableEq n] (A : Matrix m m α) (B : Matrix m n α) {D : Matrix n n α} (hD : D.IsHermitian) : (Matrix.fromBlocks A B Bᴴ D).IsHermitian ↔ (A - B * D⁻¹ * Bᴴ).IsHermitian := by rw [← isHermitian_submatrix_equiv (Equiv.sumComm n m), Equiv.sumComm_apply, fromBlocks_submatrix_sum_swap_sum_swap] convert IsHermitian.fromBlocks₁₁ _ _ hD <;> simp end IsHermitian end SchurComplement end CommRing section RCLike open RCLike variable [RCLike α] /-- The diagonal elements of a complex Hermitian matrix are real. -/ theorem IsHermitian.coe_re_apply_self {A : Matrix n n α} (h : A.IsHermitian) (i : n) : (re (A i i) : α) = A i i := by rw [← conj_eq_iff_re, ← star_def, ← conjTranspose_apply, h.eq] /-- The diagonal elements of a complex Hermitian matrix are real. -/ theorem IsHermitian.coe_re_diag {A : Matrix n n α} (h : A.IsHermitian) : (fun i => (re (A.diag i) : α)) = A.diag := funext h.coe_re_apply_self /-- A matrix is Hermitian iff the corresponding linear map is self adjoint. -/ theorem isHermitian_iff_isSymmetric [Fintype n] [DecidableEq n] {A : Matrix n n α} : IsHermitian A ↔ A.toEuclideanLin.IsSymmetric := by rw [LinearMap.IsSymmetric, (WithLp.toLp_surjective _).forall₂] simp only [toEuclideanLin_toLp, Matrix.toLin'_apply, EuclideanSpace.inner_eq_star_dotProduct, star_mulVec] constructor · rintro (h : Aᴴ = A) x y rw [dotProduct_comm, ← dotProduct_mulVec, h, dotProduct_comm] · intro h ext i j simpa [(Pi.single_star i 1).symm] using h (Pi.single i 1) (Pi.single j 1) theorem IsHermitian.im_star_dotProduct_mulVec_self [Fintype n] {A : Matrix n n α} (hA : A.IsHermitian) (x : n → α) : RCLike.im (star x ⬝ᵥ A *ᵥ x) = 0 := by classical exact dotProduct_comm _ (star x) ▸ (isHermitian_iff_isSymmetric.mp hA).im_inner_self_apply _ end RCLike end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Gershgorin.lean
import Mathlib.Analysis.Normed.Field.Basic import Mathlib.LinearAlgebra.Eigenspace.Basic import Mathlib.LinearAlgebra.Determinant /-! # Gershgorin's circle theorem This file gives the proof of Gershgorin's circle theorem `eigenvalue_mem_ball` on the eigenvalues of matrices and some applications. ## Reference * https://en.wikipedia.org/wiki/Gershgorin_circle_theorem -/ variable {K n : Type*} [NormedField K] [Fintype n] [DecidableEq n] {A : Matrix n n K} /-- **Gershgorin's circle theorem**: for any eigenvalue `μ` of a square matrix `A`, there exists an index `k` such that `μ` lies in the closed ball of center the diagonal term `A k k` and of radius the sum of the norms `∑ j ≠ k, ‖A k j‖`. -/ theorem eigenvalue_mem_ball {μ : K} (hμ : Module.End.HasEigenvalue (Matrix.toLin' A) μ) : ∃ k, μ ∈ Metric.closedBall (A k k) (∑ j ∈ Finset.univ.erase k, ‖A k j‖) := by cases isEmpty_or_nonempty n · exfalso exact hμ Submodule.eq_bot_of_subsingleton · obtain ⟨v, h_eg, h_nz⟩ := hμ.exists_hasEigenvector obtain ⟨i, -, h_i⟩ := Finset.exists_mem_eq_sup' Finset.univ_nonempty (fun i => ‖v i‖) have h_nz : v i ≠ 0 := by contrapose! h_nz ext j rw [Pi.zero_apply, ← norm_le_zero_iff] refine (h_i ▸ Finset.le_sup' (fun i => ‖v i‖) (Finset.mem_univ j)).trans ?_ exact norm_le_zero_iff.mpr h_nz have h_le : ∀ j, ‖v j * (v i)⁻¹‖ ≤ 1 := fun j => by rw [norm_mul, norm_inv, mul_inv_le_iff₀ (norm_pos_iff.mpr h_nz), one_mul] exact h_i ▸ Finset.le_sup' (fun i => ‖v i‖) (Finset.mem_univ j) simp_rw [mem_closedBall_iff_norm'] refine ⟨i, ?_⟩ calc _ = ‖(A i i * v i - μ * v i) * (v i)⁻¹‖ := by congr; field _ = ‖(A i i * v i - ∑ j, A i j * v j) * (v i)⁻¹‖ := by rw [show μ * v i = ∑ x : n, A i x * v x by rw [← dotProduct, ← Matrix.mulVec] exact (congrFun (Module.End.mem_eigenspace_iff.mp h_eg) i).symm] _ = ‖(∑ j ∈ Finset.univ.erase i, A i j * v j) * (v i)⁻¹‖ := by rw [Finset.sum_erase_eq_sub (Finset.mem_univ i), ← neg_sub, neg_mul, norm_neg] _ ≤ ∑ j ∈ Finset.univ.erase i, ‖A i j‖ * ‖v j * (v i)⁻¹‖ := by rw [Finset.sum_mul] exact (norm_sum_le _ _).trans (le_of_eq (by simp_rw [mul_assoc, norm_mul])) _ ≤ ∑ j ∈ Finset.univ.erase i, ‖A i j‖ := (Finset.sum_le_sum fun j _ => mul_le_of_le_one_right (norm_nonneg _) (h_le j)) /-- If `A` is a row strictly dominant diagonal matrix, then it's determinant is nonzero. -/ theorem det_ne_zero_of_sum_row_lt_diag (h : ∀ k, ∑ j ∈ Finset.univ.erase k, ‖A k j‖ < ‖A k k‖) : A.det ≠ 0 := by contrapose! h suffices ∃ k, 0 ∈ Metric.closedBall (A k k) (∑ j ∈ Finset.univ.erase k, ‖A k j‖) by exact this.imp (fun a h ↦ by rwa [mem_closedBall_iff_norm', sub_zero] at h) refine eigenvalue_mem_ball ?_ rw [Module.End.hasEigenvalue_iff, Module.End.eigenspace_zero, ne_comm] exact ne_of_lt (LinearMap.bot_lt_ker_of_det_eq_zero (by rwa [LinearMap.det_toLin'])) /-- If `A` is a column strictly dominant diagonal matrix, then it's determinant is nonzero. -/ theorem det_ne_zero_of_sum_col_lt_diag (h : ∀ k, ∑ i ∈ Finset.univ.erase k, ‖A i k‖ < ‖A k k‖) : A.det ≠ 0 := by rw [← Matrix.det_transpose] exact det_ne_zero_of_sum_row_lt_diag (by simp_rw [Matrix.transpose_apply]; exact h)
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/FiniteDimensional.lean
import Mathlib.Data.Matrix.Basic import Mathlib.LinearAlgebra.FiniteDimensional.Defs import Mathlib.LinearAlgebra.FreeModule.Finite.Matrix /-! # The finite-dimensional space of matrices This file shows that `m` by `n` matrices form a finite-dimensional space. Note that this is proven more generally elsewhere over modules as `Module.Finite.matrix`; this file exists only to provide an entry in the instance list for `FiniteDimensional`. ## Main definitions * `Matrix.finiteDimensional`: matrices form a finite-dimensional vector space over a field `K` * `LinearMap.finiteDimensional` ## Tags matrix, finite dimensional, findim, finrank -/ universe u v namespace Matrix section FiniteDimensional variable {m n : Type*} {R : Type v} [Field R] instance finiteDimensional [Finite m] [Finite n] : FiniteDimensional R (Matrix m n R) := Module.Finite.matrix end FiniteDimensional end Matrix namespace LinearMap variable {K : Type*} [Field K] variable {V : Type*} [AddCommGroup V] [Module K V] [FiniteDimensional K V] variable {W : Type*} [AddCommGroup W] [Module K W] [FiniteDimensional K W] instance finiteDimensional : FiniteDimensional K (V →ₗ[K] W) := Module.Finite.linearMap _ _ _ _ variable {A : Type*} [Ring A] [Algebra K A] [Module A V] [IsScalarTower K A V] [Module A W] [IsScalarTower K A W] /-- Linear maps over a `k`-algebra are finite dimensional (over `k`) if both the source and target are, as they form a subspace of all `k`-linear maps. -/ instance finiteDimensional' : FiniteDimensional K (V →ₗ[A] W) := FiniteDimensional.of_injective (restrictScalarsₗ K A V W K) (restrictScalars_injective _) end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/AbsoluteValue.lean
import Mathlib.Algebra.Order.BigOperators.Ring.Finset import Mathlib.Data.Int.AbsoluteValue import Mathlib.LinearAlgebra.Matrix.Determinant.Basic /-! # Absolute values and matrices This file proves some bounds on matrices involving absolute values. ## Main results * `Matrix.det_le`: if the entries of an `n × n` matrix are bounded by `x`, then the determinant is bounded by `n! x^n` * `Matrix.det_sum_le`: if we have `s` `n × n` matrices and the entries of each matrix are bounded by `x`, then the determinant of their sum is bounded by `n! (s * x)^n` * `Matrix.det_sum_smul_le`: if we have `s` `n × n` matrices each multiplied by a constant bounded by `y`, and the entries of each matrix are bounded by `x`, then the determinant of the linear combination is bounded by `n! (s * y * x)^n` -/ open Matrix open scoped Nat namespace Matrix open Equiv Finset variable {R S : Type*} [CommRing R] [Nontrivial R] [CommRing S] [LinearOrder S] [IsStrictOrderedRing S] variable {n : Type*} [Fintype n] [DecidableEq n] theorem det_le {A : Matrix n n R} {abv : AbsoluteValue R S} {x : S} (hx : ∀ i j, abv (A i j) ≤ x) : abv A.det ≤ (Fintype.card n)! • x ^ Fintype.card n := calc abv A.det = abv (∑ σ : Perm n, Perm.sign σ • ∏ i, A (σ i) i) := congr_arg abv (det_apply _) _ ≤ ∑ σ : Perm n, abv (Perm.sign σ • ∏ i, A (σ i) i) := abv.sum_le _ _ _ = ∑ σ : Perm n, ∏ i, abv (A (σ i) i) := sum_congr rfl fun σ _ => by rw [abv.map_units_int_smul, abv.map_prod] _ ≤ ∑ _σ : Perm n, ∏ _i : n, x := by gcongr; simp [hx] _ = (Fintype.card n)! • x ^ Fintype.card n := by simp [Fintype.card_perm] theorem det_sum_le {ι : Type*} (s : Finset ι) {A : ι → Matrix n n R} {abv : AbsoluteValue R S} {x : S} (hx : ∀ k i j, abv (A k i j) ≤ x) : abv (det (∑ k ∈ s, A k)) ≤ (Fintype.card n)! • (#s • x) ^ Fintype.card n := det_le fun i j => calc abv ((∑ k ∈ s, A k) i j) = abv (∑ k ∈ s, A k i j) := by simp only [sum_apply] _ ≤ ∑ k ∈ s, abv (A k i j) := abv.sum_le _ _ _ ≤ ∑ _k ∈ s, x := by gcongr; apply hx _ = #s • x := sum_const _ theorem det_sum_smul_le {ι : Type*} (s : Finset ι) {c : ι → R} {A : ι → Matrix n n R} {abv : AbsoluteValue R S} {x : S} (hx : ∀ k i j, abv (A k i j) ≤ x) {y : S} (hy : ∀ k, abv (c k) ≤ y) : abv (det (∑ k ∈ s, c k • A k)) ≤ Nat.factorial (Fintype.card n) • (#s • y * x) ^ Fintype.card n := by simpa only [smul_mul_assoc] using det_sum_le s fun k i j => calc abv (c k * A k i j) = abv (c k) * abv (A k i j) := abv.map_mul _ _ _ ≤ y * x := mul_le_mul (hy k) (hx k i j) (abv.nonneg _) ((abv.nonneg _).trans (hy k)) end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/ToLinearEquiv.lean
import Mathlib.LinearAlgebra.Matrix.GeneralLinearGroup.Defs import Mathlib.LinearAlgebra.Matrix.Nondegenerate import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.LinearAlgebra.Matrix.ToLin import Mathlib.RingTheory.Localization.FractionRing import Mathlib.RingTheory.Localization.Integer /-! # Matrices and linear equivalences This file gives the map `Matrix.toLinearEquiv` from matrices with invertible determinant, to linear equivs. ## Main definitions * `Matrix.toLinearEquiv`: a matrix with an invertible determinant forms a linear equiv ## Main results * `Matrix.exists_mulVec_eq_zero_iff`: `M` maps some `v ≠ 0` to zero iff `det M = 0` ## Tags matrix, linear_equiv, determinant, inverse -/ open Module variable {n : Type*} [Fintype n] namespace Matrix section LinearEquiv open LinearMap variable {R M : Type*} [CommRing R] [AddCommGroup M] [Module R M] section ToLinearEquiv' variable [DecidableEq n] /-- An invertible matrix yields a linear equivalence from the free module to itself. See `Matrix.toLinearEquiv` for the same map on arbitrary modules. -/ def toLinearEquiv' (P : Matrix n n R) (_ : Invertible P) : (n → R) ≃ₗ[R] n → R := GeneralLinearGroup.generalLinearEquiv _ _ <| Matrix.GeneralLinearGroup.toLin <| unitOfInvertible P @[simp] theorem toLinearEquiv'_apply (P : Matrix n n R) (h : Invertible P) : (P.toLinearEquiv' h : Module.End R (n → R)) = Matrix.toLin' P := rfl @[simp] theorem toLinearEquiv'_symm_apply (P : Matrix n n R) (h : Invertible P) : (↑(P.toLinearEquiv' h).symm : Module.End R (n → R)) = Matrix.toLin' (⅟P) := rfl end ToLinearEquiv' section ToLinearEquiv variable (b : Basis n R M) /-- Given `hA : IsUnit A.det` and `b : Basis R b`, `A.toLinearEquiv b hA` is the `LinearEquiv` arising from `toLin b b A`. See `Matrix.toLinearEquiv'` for this result on `n → R`. -/ @[simps apply] noncomputable def toLinearEquiv [DecidableEq n] (A : Matrix n n R) (hA : IsUnit A.det) : M ≃ₗ[R] M where __ := toLin b b A toFun := toLin b b A invFun := toLin b b A⁻¹ left_inv x := by simp_rw [← LinearMap.comp_apply, ← Matrix.toLin_mul b b b, Matrix.nonsing_inv_mul _ hA, toLin_one, LinearMap.id_apply] right_inv x := by simp_rw [← LinearMap.comp_apply, ← Matrix.toLin_mul b b b, Matrix.mul_nonsing_inv _ hA, toLin_one, LinearMap.id_apply] theorem ker_toLin_eq_bot [DecidableEq n] (A : Matrix n n R) (hA : IsUnit A.det) : LinearMap.ker (toLin b b A) = ⊥ := ker_eq_bot.mpr (toLinearEquiv b A hA).injective theorem range_toLin_eq_top [DecidableEq n] (A : Matrix n n R) (hA : IsUnit A.det) : LinearMap.range (toLin b b A) = ⊤ := range_eq_top.mpr (toLinearEquiv b A hA).surjective end ToLinearEquiv section Nondegenerate open Matrix /-- This holds for all integral domains (see `Matrix.exists_mulVec_eq_zero_iff`), not just fields, but it's easier to prove it for the field of fractions first. -/ theorem exists_mulVec_eq_zero_iff_aux {K : Type*} [DecidableEq n] [Field K] {M : Matrix n n K} : (∃ v ≠ 0, M *ᵥ v = 0) ↔ M.det = 0 := by constructor · rintro ⟨v, hv, mul_eq⟩ contrapose! hv exact eq_zero_of_mulVec_eq_zero hv mul_eq · contrapose! intro h have : Function.Injective (Matrix.toLin' M) := by simpa only [← LinearMap.ker_eq_bot, ker_toLin'_eq_bot_iff, not_imp_not] using h have : M * LinearMap.toMatrix' ((LinearEquiv.ofInjectiveEndo (Matrix.toLin' M) this).symm : (n → K) →ₗ[K] n → K) = 1 := by refine Matrix.toLin'.injective (LinearMap.ext fun v => ?_) rw [Matrix.toLin'_mul, Matrix.toLin'_one, Matrix.toLin'_toMatrix', LinearMap.comp_apply] exact (LinearEquiv.ofInjectiveEndo (Matrix.toLin' M) this).apply_symm_apply v exact Matrix.det_ne_zero_of_right_inverse this theorem exists_mulVec_eq_zero_iff' {A : Type*} (K : Type*) [DecidableEq n] [CommRing A] [Nontrivial A] [Field K] [Algebra A K] [IsFractionRing A K] {M : Matrix n n A} : (∃ v ≠ 0, M *ᵥ v = 0) ↔ M.det = 0 := by have : (∃ v ≠ 0, (algebraMap A K).mapMatrix M *ᵥ v = 0) ↔ _ := exists_mulVec_eq_zero_iff_aux rw [← RingHom.map_det, IsFractionRing.to_map_eq_zero_iff] at this refine Iff.trans ?_ this; constructor <;> rintro ⟨v, hv, mul_eq⟩ · refine ⟨fun i => algebraMap _ _ (v i), mt (fun h => funext fun i => ?_) hv, ?_⟩ · exact IsFractionRing.to_map_eq_zero_iff.mp (congr_fun h i) · ext i refine (RingHom.map_mulVec _ _ _ i).symm.trans ?_ rw [mul_eq, Pi.zero_apply, RingHom.map_zero, Pi.zero_apply] · letI := Classical.decEq K obtain ⟨⟨b, hb⟩, ba_eq⟩ := IsLocalization.exist_integer_multiples_of_finset (nonZeroDivisors A) (Finset.univ.image v) choose f hf using ba_eq refine ⟨fun i => f _ (Finset.mem_image.mpr ⟨i, Finset.mem_univ i, rfl⟩), mt (fun h => funext fun i => ?_) hv, ?_⟩ · have := congr_arg (algebraMap A K) (congr_fun h i) rw [hf, Subtype.coe_mk, Pi.zero_apply, RingHom.map_zero, Algebra.smul_def, mul_eq_zero, IsFractionRing.to_map_eq_zero_iff] at this exact this.resolve_left (nonZeroDivisors.ne_zero hb) · ext i refine IsFractionRing.injective A K ?_ calc algebraMap A K ((M *ᵥ (fun i : n => f (v i) _)) i) = ((algebraMap A K).mapMatrix M *ᵥ algebraMap _ K b • v) i := ?_ _ = 0 := ?_ _ = algebraMap A K 0 := (RingHom.map_zero _).symm · simp_rw [RingHom.map_mulVec, mulVec, dotProduct, Function.comp_apply, hf, RingHom.mapMatrix_apply, Pi.smul_apply, smul_eq_mul, Algebra.smul_def] · rw [mulVec_smul, mul_eq, Pi.smul_apply, Pi.zero_apply, smul_zero] theorem exists_mulVec_eq_zero_iff {A : Type*} [DecidableEq n] [CommRing A] [IsDomain A] {M : Matrix n n A} : (∃ v ≠ 0, M *ᵥ v = 0) ↔ M.det = 0 := exists_mulVec_eq_zero_iff' (FractionRing A) theorem exists_vecMul_eq_zero_iff {A : Type*} [DecidableEq n] [CommRing A] [IsDomain A] {M : Matrix n n A} : (∃ v ≠ 0, v ᵥ* M = 0) ↔ M.det = 0 := by simpa only [← M.det_transpose, ← mulVec_transpose] using exists_mulVec_eq_zero_iff theorem nondegenerate_iff_det_ne_zero {A : Type*} [DecidableEq n] [CommRing A] [IsDomain A] {M : Matrix n n A} : Nondegenerate M ↔ M.det ≠ 0 := by rw [ne_eq, ← exists_vecMul_eq_zero_iff] push_neg constructor · intro hM v hv hMv obtain ⟨w, hwMv⟩ := hM.exists_not_ortho_of_ne_zero hv simp [dotProduct_mulVec, hMv, zero_dotProduct, ne_eq] at hwMv · rw [Matrix.nondegenerate_def] intro h v hv refine not_imp_not.mp (h v) (funext fun i => ?_) simpa only [dotProduct_mulVec, dotProduct_single, mul_one] using hv (Pi.single i 1) theorem Nondegenerate.mul_iff_right {A : Type*} [CommRing A] [IsDomain A] {M N : Matrix n n A} (h : N.Nondegenerate) : (M * N).Nondegenerate ↔ M.Nondegenerate := by classical simp only [nondegenerate_iff_det_ne_zero, det_mul] at h ⊢ exact mul_ne_zero_iff_right h theorem Nondegenerate.mul_iff_left {A : Type*} [CommRing A] [IsDomain A] {M N : Matrix n n A} (h : M.Nondegenerate) : (M * N).Nondegenerate ↔ N.Nondegenerate := by classical simp only [nondegenerate_iff_det_ne_zero, det_mul] at h ⊢ exact mul_ne_zero_iff_left h omit [Fintype n] in theorem Nondegenerate.smul_iff [Finite n] {A : Type*} [CommRing A] [IsDomain A] {M : Matrix n n A} {t : A} (h : t ≠ 0) : (t • M).Nondegenerate ↔ M.Nondegenerate := by simp_rw [Nondegenerate, smul_mulVec, dotProduct_smul] refine ⟨fun hM v hv ↦ hM v fun w ↦ ?_, fun hM v hv ↦ hM v fun w ↦ ?_⟩ · simp [hv] · exact (mul_eq_zero_iff_left h).mp <| hv w alias ⟨Nondegenerate.det_ne_zero, Nondegenerate.of_det_ne_zero⟩ := nondegenerate_iff_det_ne_zero end Nondegenerate end LinearEquiv section Determinant /-- A matrix whose nondiagonal entries are negative with the sum of the entries of each column positive has nonzero determinant. -/ lemma det_ne_zero_of_sum_col_pos [DecidableEq n] {S : Type*} [CommRing S] [LinearOrder S] [IsStrictOrderedRing S] {A : Matrix n n S} (h1 : Pairwise fun i j => A i j < 0) (h2 : ∀ j, 0 < ∑ i, A i j) : A.det ≠ 0 := by cases isEmpty_or_nonempty n · simp · contrapose! h2 obtain ⟨v, ⟨h_vnz, h_vA⟩⟩ := Matrix.exists_vecMul_eq_zero_iff.mpr h2 wlog h_sup : 0 < Finset.sup' Finset.univ Finset.univ_nonempty v · refine this h1 inferInstance h2 (-1 • v) (by simp [*]) ?_ ?_ · rw [Matrix.smul_vecMul, h_vA, smul_zero] · obtain ⟨i, hi⟩ := Function.ne_iff.mp h_vnz simp_rw [Finset.lt_sup'_iff, Finset.mem_univ, true_and] at h_sup ⊢ simp_rw [not_exists, not_lt] at h_sup refine ⟨i, ?_⟩ rw [Pi.smul_apply, neg_smul, one_smul, Left.neg_pos_iff] exact Ne.lt_of_le hi (h_sup i) · obtain ⟨j₀, -, h_j₀⟩ := Finset.exists_mem_eq_sup' Finset.univ_nonempty v refine ⟨j₀, ?_⟩ rw [← mul_le_mul_iff_right₀ (h_j₀ ▸ h_sup), Finset.mul_sum, mul_zero] rw [show 0 = ∑ i, v i * A i j₀ from (congrFun h_vA j₀).symm] refine Finset.sum_le_sum (fun i hi => ?_) by_cases h : i = j₀ · rw [h] · exact (mul_le_mul_right_of_neg (h1 h)).mpr (h_j₀ ▸ Finset.le_sup' v hi) /-- A matrix whose nondiagonal entries are negative with the sum of the entries of each row positive has nonzero determinant. -/ lemma det_ne_zero_of_sum_row_pos [DecidableEq n] {S : Type*} [CommRing S] [LinearOrder S] [IsStrictOrderedRing S] {A : Matrix n n S} (h1 : Pairwise fun i j => A i j < 0) (h2 : ∀ i, 0 < ∑ j, A i j) : A.det ≠ 0 := by rw [← Matrix.det_transpose] refine det_ne_zero_of_sum_col_pos ?_ ?_ · simp_rw [Matrix.transpose_apply] exact fun i j h => h1 h.symm · simp_rw [Matrix.transpose_apply] exact h2 end Determinant end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Irreducible/Defs.lean
import Mathlib.Combinatorics.Quiver.ConnectedComponent import Mathlib.Combinatorics.Quiver.Path.Vertices import Mathlib.Data.Matrix.Mul /-! # Irreducibility and primitivity of nonnegative matrices This file develops a graph-theoretic interface for studying the properties of nonnegative square matrices. We associate a directed graph (quiver) with a matrix `A`, where an edge `i ⟶ j` exists if and only if the entry `A i j` is strictly positive. This allows translating algebraic properties of the matrix (like powers) into graph-theoretic properties of its quiver (like theexistence of paths). ## Main definitions * `Matrix.toQuiver A`: The quiver associated with a matrix `A`, where an edge `i ⟶ j` exists if `0 < A i j`. * `Matrix.IsIrreducible A`: A matrix `A` is defined as irreducible if it is entrywise nonnegative and its associated quiver `toQuiver A` is strongly connected. The theorem `Matrix.isIrreducible_iff_exists_pow_pos` proves this graph-theoretic definition is equivalent to the algebraic one in seneta2006 (Def 1.6, p.18): for every pair of indices `(i, j)`, there exists a positive integer `k` such that `(A ^ k) i j > 0`. * `Matrix.IsPrimitive A`: A matrix `A` is primitive if it is nonnegative and some power `A ^ k` is strictly positive (all entries are `> 0`), (seneta2006, Definition 1.1, p.14). ## Main results * `Matrix.pow_apply_pos_iff_nonempty_path`: Establishes the link between matrix powers and graph theory: `(A ^ k) i j > 0` if and only if there is a path of length `k` from `i` to `j` in `toQuiver A`. * `Matrix.isIrreducible_iff_exists_pow_pos`: Shows the equivalence between the graph-theoretic definition of irreducibility (strong connectivity) and the algebraic one (existence of a positive entry in some power). * `Matrix.IsPrimitive.to_IsIrreducible`: Proves that a primitive matrix is also irreducible (Seneta, p.14). * `Matrix.IsIrreducible.transpose`: Shows that the irreducibility property is preserved under transposition. ## Implementation notes Throughout we work over a `LinearOrderedRing R`. Some results require stronger assumptions, like `PosMulStrictMono R` or `Nontrivial R`. Some statements expand matrix powers and thus require `[DecidableEq n]` to reason about finite sums. ## References * [E. Seneta, *Non-negative Matrices and Markov Chains*][seneta2006] ## Tags matrix, nonnegative, positive, power, quiver, graph, irreducible, primitive, perron-frobenius -/ namespace Matrix open Quiver Quiver.Path variable {n R : Type*} [Ring R] [LinearOrder R] /-- The directed graph (quiver) associated with a matrix `A`, with an edge `i ⟶ j` iff `0 < A i j`. -/ def toQuiver (A : Matrix n n R) : Quiver n := ⟨fun i j => 0 < A i j⟩ /-- A matrix `A` is irreducible if it is entrywise nonnegative and its quiver of positive entries (`toQuiver A`) is strongly connected. -/ @[mk_iff] structure IsIrreducible (A : Matrix n n R) : Prop where nonneg (i j : n) : 0 ≤ A i j connected : @IsSStronglyConnected n (toQuiver A) /-- A matrix `A` is primitive if it is entrywise nonnegative and some positive power has all entries strictly positive. -/ @[mk_iff] structure IsPrimitive [Fintype n] [DecidableEq n] (A : Matrix n n R) : Prop where nonneg (i j : n) : 0 ≤ A i j exists_pos_pow : ∃ k > 0, ∀ i j, 0 < (A ^ k) i j variable {A : Matrix n n R} /-- If `A` is irreducible and `n` is non-trivial then every row has a positive entry. -/ lemma IsIrreducible.exists_pos [Nontrivial n] (h_irr : IsIrreducible A) (i : n) : ∃ j, 0 < A i j := by letI : Quiver n := toQuiver A by_contra h_row have no_out : ∀ j : n, IsEmpty (i ⟶ j) := fun j => ⟨fun e => h_row ⟨j, e⟩⟩ obtain ⟨j, hij⟩ := exists_pair_ne n obtain ⟨p, hp_pos⟩ := h_irr.connected i j have h_le : 1 ≤ p.length := Nat.succ_le_of_lt hp_pos have ⟨v, p₁, p₂, _hp_eq, hp₁_len⟩ := p.exists_eq_comp_of_le_length (n := 1) h_le have hlen_ne : p₁.length ≠ 0 := by simp [hp₁_len] obtain ⟨c, p', e, rfl⟩ := (Quiver.Path.length_ne_zero_iff_eq_cons (p := p₁)).1 (by omega) obtain ⟨rfl⟩ : i = c := Quiver.Path.eq_of_length_zero p' (by aesop) exact (no_out _).false e /-- For a matrix `A` with nonnegative entries, the `(i, j)`-entry of the `k`-th power `A ^ k` is strictly positive if and only if there exists a path of length `k` from `i` to `j` in the quiver associated to `A` via `toQuiver`. -/ theorem pow_apply_pos_iff_nonempty_path [Fintype n] [IsOrderedRing R] [PosMulStrictMono R] [Nontrivial R] [DecidableEq n] (hA : ∀ i j, 0 ≤ A i j) (k : ℕ) (i j : n) : letI := toQuiver A 0 < (A ^ k) i j ↔ Nonempty {p : Path i j // p.length = k} := by letI := toQuiver A induction k generalizing i j with | zero => refine ⟨fun h_pos ↦ ?_, fun ⟨p, hp⟩ ↦ ?_⟩ · rcases eq_or_ne i j with rfl | h_eq · exact ⟨⟨Quiver.Path.nil, rfl⟩⟩ · simp_all only [pow_zero, ne_eq, not_false_eq_true, one_apply_ne, lt_self_iff_false] · simp [Quiver.Path.eq_of_length_zero p hp] | succ m ih => rw [pow_succ, mul_apply] constructor · intro h_pos obtain ⟨l, hl_mem, hl_pos⟩ : ∃ l ∈ (Finset.univ : Finset n), 0 < (A ^ m) i l * A l j := by simpa [Finset.sum_pos_iff_of_nonneg (fun x _ => mul_nonneg (pow_apply_nonneg hA m i x) (hA x j))] using h_pos have hAm_nonneg : 0 ≤ (A ^ m) i l := pow_apply_nonneg hA m i l have hA_nonneg' : 0 ≤ A l j := hA l j have h_Am : 0 < (A ^ m) i l := by by_contra! h; simp [le_antisymm h hAm_nonneg] at hl_pos have h_A : 0 < A l j := by by_contra! h; simp [le_antisymm h hA_nonneg'] at hl_pos obtain ⟨⟨p, rfl⟩⟩ := (ih i l).mp h_Am exact ⟨p.cons h_A, by simp⟩ · rintro ⟨p, hp_len⟩ cases p with | nil => simp [Quiver.Path.length] at hp_len | @cons b _ q e => simp only [Quiver.Path.length_cons, Nat.succ.injEq] at hp_len have h_Am_pos : 0 < (A ^ m) i b := (ih i b).mpr ⟨q, hp_len⟩ let h_A_pos := e have h_prod : 0 < (A ^ m) i b * A b j := mul_pos h_Am_pos h_A_pos exact (Finset.sum_pos_iff_of_nonneg (fun x _ => mul_nonneg (pow_apply_nonneg hA m i x) (hA x j))).2 ⟨b, Finset.mem_univ b, h_prod⟩ /-- Irreducibility of a nonnegative matrix `A` is equivalent to entrywise positivity of some power: between any two indices `i, j` there exists a positive integer `k` such that the `(i, j)`-entry of `A ^ k` is strictly positive. -/ theorem isIrreducible_iff_exists_pow_pos [Fintype n] [IsOrderedRing R] [PosMulStrictMono R] [Nontrivial R] [DecidableEq n] (hA : ∀ i j, 0 ≤ A i j) : IsIrreducible A ↔ ∀ i j, ∃ k > 0, 0 < (A ^ k) i j := by letI : Quiver n := toQuiver A constructor · intro h_irr i j obtain ⟨p, hp_len⟩ := h_irr.2 i j refine ⟨p.length, hp_len, ?_⟩ have : Nonempty {q : Path i j // q.length = p.length} := ⟨⟨p, rfl⟩⟩ have hpos := (pow_apply_pos_iff_nonempty_path (A := A) hA p.length i j).2 this simpa using hpos · intro h_exists constructor · exact hA · intro i j obtain ⟨k, hk_pos, hk_entry⟩ := h_exists i j obtain ⟨⟨p, hp_len⟩⟩ := (pow_apply_pos_iff_nonempty_path (A := A) hA k i j).mp hk_entry subst hp_len exact ⟨p, hk_pos⟩ /-- If a nonnegative square matrix `A` is primitive, then `A` is irreducible. -/ theorem IsPrimitive.isIrreducible [Fintype n] [IsOrderedRing R] [PosMulStrictMono R] [Nontrivial R] [DecidableEq n] (h_prim : IsPrimitive A) : IsIrreducible A := by obtain ⟨h_nonneg, k, hk_pos, hk_all⟩ := h_prim rw [isIrreducible_iff_exists_pow_pos h_nonneg] aesop /-! ## Transposition -/ /-- Reverse a path in `toQuiver A` to a path in `toQuiver Aᵀ`, swapping endpoints. -/ def transposePath {i j : n} (p : @Quiver.Path n A.toQuiver i j) : @Quiver.Path n Aᵀ.toQuiver j i := by letI : Quiver n := toQuiver A induction p with | nil => exact (@Quiver.Path.nil _ (toQuiver Aᵀ) _) | @cons b c q e ih => have eT : @Quiver.Hom n (toQuiver Aᵀ) c b := by change 0 < (Aᵀ) c b simpa [Matrix.transpose_apply] using e exact (@Quiver.Path.comp n (toQuiver Aᵀ) c b i (@Quiver.Hom.toPath n (toQuiver Aᵀ) c b eT) ih) /-- Irreducibility is invariant under transpose. -/ theorem IsIrreducible.transpose (hA : IsIrreducible A) : IsIrreducible Aᵀ := by have hA_T_nonneg : ∀ i j, 0 ≤ Aᵀ i j := fun i j => by simpa [Matrix.transpose_apply] using hA.nonneg j i refine ⟨hA_T_nonneg, ?_⟩ intro i j letI : Quiver n := toQuiver A obtain ⟨p, hp_pos⟩ := hA.connected j i cases p with | nil => exact False.elim ((lt_irrefl (0 : Nat)) (by simp [Quiver.Path.length] at hp_pos)) | @cons b _ q e => let qT := transposePath (A := A) (q.cons e) letI : Quiver n := toQuiver Aᵀ have hqT_pos : 0 < qT.length := by have : 0 < Nat.succ ((transposePath (A := A) q).length) := Nat.succ_pos _ simp [qT, transposePath, Quiver.Path.length_comp, Quiver.Path.length_toPath] exact ⟨qT, hqT_pos⟩ @[simp] theorem isIrreducible_transpose_iff : Aᵀ.IsIrreducible ↔ A.IsIrreducible := by by_cases hA_nonneg : ∀ i j, 0 ≤ A i j · exact ⟨fun h ↦ let hA_T_nonneg : ∀ i j, 0 ≤ (Aᵀ) i j := fun i j => by simpa [Matrix.transpose_apply] using hA_nonneg j i IsIrreducible.transpose h, fun h ↦ IsIrreducible.transpose h⟩ · have : ¬ Aᵀ.IsIrreducible := by rw [isIrreducible_iff] simp only [transpose_apply, isSStronglyConnected_iff, not_and, not_forall, not_exists, not_lt, nonpos_iff_eq_zero] intro a; simp_all only [implies_true, not_true_eq_false] have : ¬ A.IsIrreducible := by rw [isIrreducible_iff]; simp_all only [not_forall, not_le, isSStronglyConnected_iff, not_and, not_exists, not_lt, nonpos_iff_eq_zero, isEmpty_Prop, IsEmpty.forall_iff] simp_all only [not_forall, not_le] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/FiniteField.lean
import Mathlib.FieldTheory.Finite.Basic import Mathlib.LinearAlgebra.Matrix.Charpoly.Coeff import Mathlib.LinearAlgebra.Matrix.CharP /-! # Results on characteristic polynomials and traces over finite fields. -/ noncomputable section open Polynomial Matrix open scoped Polynomial variable {n : Type*} [DecidableEq n] [Fintype n] @[simp] theorem FiniteField.Matrix.charpoly_pow_card {K : Type*} [Field K] [Fintype K] (M : Matrix n n K) : (M ^ Fintype.card K).charpoly = M.charpoly := by cases (isEmpty_or_nonempty n).symm · obtain ⟨p, hp⟩ := CharP.exists K rcases FiniteField.card K p with ⟨⟨k, kpos⟩, ⟨hp, hk⟩⟩ haveI : Fact p.Prime := ⟨hp⟩ dsimp at hk; rw [hk] apply (frobenius_inj K[X] p).iterate k repeat' rw [iterate_frobenius (R := K[X])]; rw [← hk] rw [← FiniteField.expand_card] unfold charpoly rw [AlgHom.map_det, ← coe_detMonoidHom, ← (detMonoidHom : Matrix n n K[X] →* K[X]).map_pow] apply congr_arg det refine matPolyEquiv.injective ?_ rw [map_pow, matPolyEquiv_charmatrix, hk, sub_pow_char_pow_of_commute, ← C_pow] · exact (id (matPolyEquiv_eq_X_pow_sub_C (p ^ k) M) :) · exact (C M).commute_X · exact congr_arg _ (Subsingleton.elim _ _) @[simp] theorem ZMod.charpoly_pow_card {p : ℕ} [Fact p.Prime] (M : Matrix n n (ZMod p)) : (M ^ p).charpoly = M.charpoly := by have h := FiniteField.Matrix.charpoly_pow_card M rwa [ZMod.card] at h theorem FiniteField.trace_pow_card {K : Type*} [Field K] [Fintype K] (M : Matrix n n K) : trace (M ^ Fintype.card K) = trace M ^ Fintype.card K := by cases isEmpty_or_nonempty n · simp [Matrix.trace] rw [Matrix.trace_eq_neg_charpoly_coeff, Matrix.trace_eq_neg_charpoly_coeff, FiniteField.Matrix.charpoly_pow_card, FiniteField.pow_card] theorem ZMod.trace_pow_card {p : ℕ} [Fact p.Prime] (M : Matrix n n (ZMod p)) : trace (M ^ p) = trace M ^ p := by have h := FiniteField.trace_pow_card M; rwa [ZMod.card] at h
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Basic.lean
import Mathlib.Algebra.Polynomial.Eval.SMul import Mathlib.LinearAlgebra.Matrix.Adjugate import Mathlib.LinearAlgebra.Matrix.Block import Mathlib.RingTheory.MatrixPolynomialAlgebra /-! # Characteristic polynomials and the Cayley-Hamilton theorem We define characteristic polynomials of matrices and prove the Cayley–Hamilton theorem over arbitrary commutative rings. See the file `Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean` for corollaries of this theorem. ## Main definitions * `Matrix.charpoly` is the characteristic polynomial of a matrix. ## Implementation details We follow a nice proof from http://drorbn.net/AcademicPensieve/2015-12/CayleyHamilton.pdf -/ noncomputable section universe u v w namespace Matrix open Finset Matrix Polynomial variable {R S : Type*} [CommRing R] [CommRing S] variable {m n : Type*} [DecidableEq m] [DecidableEq n] [Fintype m] [Fintype n] variable (M₁₁ : Matrix m m R) (M₁₂ : Matrix m n R) (M₂₁ : Matrix n m R) (M₂₂ M : Matrix n n R) variable (i j : n) /-- The "characteristic matrix" of `M : Matrix n n R` is the matrix of polynomials $t I - M$. The determinant of this matrix is the characteristic polynomial. -/ def charmatrix (M : Matrix n n R) : Matrix n n R[X] := Matrix.scalar n (X : R[X]) - (C : R →+* R[X]).mapMatrix M theorem charmatrix_apply : charmatrix M i j = (Matrix.diagonal fun _ : n => X) i j - C (M i j) := rfl @[simp] theorem charmatrix_apply_eq : charmatrix M i i = (X : R[X]) - C (M i i) := by simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply, map_apply, diagonal_apply_eq] @[simp] theorem charmatrix_apply_ne (h : i ≠ j) : charmatrix M i j = -C (M i j) := by simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply, diagonal_apply_ne _ h, map_apply, sub_eq_neg_self] @[simp] theorem charmatrix_zero : charmatrix (0 : Matrix n n R) = Matrix.scalar n (X : R[X]) := by simp [charmatrix] @[simp] theorem charmatrix_diagonal (d : n → R) : charmatrix (diagonal d) = diagonal fun i => X - C (d i) := by rw [charmatrix, scalar_apply, RingHom.mapMatrix_apply, diagonal_map (map_zero _), diagonal_sub] @[simp] theorem charmatrix_one : charmatrix (1 : Matrix n n R) = diagonal fun _ => X - 1 := charmatrix_diagonal _ @[simp] theorem charmatrix_natCast (k : ℕ) : charmatrix (k : Matrix n n R) = diagonal fun _ => X - (k : R[X]) := charmatrix_diagonal _ @[simp] theorem charmatrix_ofNat (k : ℕ) [k.AtLeastTwo] : charmatrix (ofNat(k) : Matrix n n R) = diagonal fun _ => X - ofNat(k) := charmatrix_natCast _ @[simp] theorem charmatrix_transpose (M : Matrix n n R) : (Mᵀ).charmatrix = M.charmatrixᵀ := by simp [charmatrix, transpose_map] theorem matPolyEquiv_charmatrix : matPolyEquiv (charmatrix M) = X - C M := by ext k i j simp only [matPolyEquiv_coeff_apply, coeff_sub] by_cases h : i = j · subst h rw [charmatrix_apply_eq, coeff_sub] simp only [coeff_X, coeff_C] split_ifs <;> simp · rw [charmatrix_apply_ne _ _ _ h, coeff_X, coeff_neg, coeff_C, coeff_C] split_ifs <;> simp [h] theorem charmatrix_reindex (e : n ≃ m) : charmatrix (reindex e e M) = reindex e e (charmatrix M) := by ext i j x by_cases h : i = j all_goals simp [h] lemma charmatrix_map (M : Matrix n n R) (f : R →+* S) : charmatrix (M.map f) = (charmatrix M).map (Polynomial.map f) := by ext i j by_cases h : i = j <;> simp [h, charmatrix, diagonal] lemma charmatrix_fromBlocks : charmatrix (fromBlocks M₁₁ M₁₂ M₂₁ M₂₂) = fromBlocks (charmatrix M₁₁) (- M₁₂.map C) (- M₂₁.map C) (charmatrix M₂₂) := by simp only [charmatrix] ext (i|i) (j|j) : 2 <;> simp [diagonal] -- TODO: importing block triangular here is somewhat expensive, if more lemmas about it are added -- to this file, it may be worth extracting things out to Charpoly/Block.lean @[simp] lemma charmatrix_blockTriangular_iff {α : Type*} [Preorder α] {M : Matrix n n R} {b : n → α} : M.charmatrix.BlockTriangular b ↔ M.BlockTriangular b := by rw [charmatrix, scalar_apply, RingHom.mapMatrix_apply, (blockTriangular_diagonal _).sub_iff_right] simp [BlockTriangular] alias ⟨BlockTriangular.of_charmatrix, BlockTriangular.charmatrix⟩ := charmatrix_blockTriangular_iff /-- The characteristic polynomial of a matrix `M` is given by $\det (t I - M)$. -/ def charpoly (M : Matrix n n R) : R[X] := (charmatrix M).det theorem eval_charpoly (M : Matrix m m R) (t : R) : M.charpoly.eval t = (Matrix.scalar _ t - M).det := by rw [Matrix.charpoly, ← Polynomial.coe_evalRingHom, RingHom.map_det, Matrix.charmatrix] congr ext i j obtain rfl | hij := eq_or_ne i j <;> simp [*] @[simp] theorem charpoly_isEmpty [IsEmpty n] {A : Matrix n n R} : charpoly A = 1 := by simp [charpoly] @[simp] theorem charpoly_zero : charpoly (0 : Matrix n n R) = X ^ Fintype.card n := by simp [charpoly] theorem charpoly_diagonal (d : n → R) : charpoly (diagonal d) = ∏ i, (X - C (d i)) := by simp [charpoly] theorem charpoly_one : charpoly (1 : Matrix n n R) = (X - 1) ^ Fintype.card n := by simp [charpoly] theorem charpoly_natCast (k : ℕ) : charpoly (k : Matrix n n R) = (X - (k : R[X])) ^ Fintype.card n := by simp [charpoly] theorem charpoly_ofNat (k : ℕ) [k.AtLeastTwo] : charpoly (ofNat(k) : Matrix n n R) = (X - ofNat(k)) ^ Fintype.card n:= charpoly_natCast _ @[simp] theorem charpoly_transpose (M : Matrix n n R) : (Mᵀ).charpoly = M.charpoly := by simp [charpoly] theorem charpoly_reindex (e : n ≃ m) (M : Matrix n n R) : (reindex e e M).charpoly = M.charpoly := by unfold Matrix.charpoly rw [charmatrix_reindex, Matrix.det_reindex_self] lemma charpoly_map (M : Matrix n n R) (f : R →+* S) : (M.map f).charpoly = M.charpoly.map f := by rw [charpoly, charmatrix_map, ← Polynomial.coe_mapRingHom, charpoly, RingHom.map_det, RingHom.mapMatrix_apply] @[simp] lemma charpoly_fromBlocks_zero₁₂ : (fromBlocks M₁₁ 0 M₂₁ M₂₂).charpoly = (M₁₁.charpoly * M₂₂.charpoly) := by simp only [charpoly, charmatrix_fromBlocks, Matrix.map_zero _ (Polynomial.C_0), neg_zero, det_fromBlocks_zero₁₂] @[simp] lemma charpoly_fromBlocks_zero₂₁ : (fromBlocks M₁₁ M₁₂ 0 M₂₂).charpoly = (M₁₁.charpoly * M₂₂.charpoly) := by simp only [charpoly, charmatrix_fromBlocks, Matrix.map_zero _ (Polynomial.C_0), neg_zero, det_fromBlocks_zero₂₁] lemma charmatrix_toSquareBlock {α : Type*} [DecidableEq α] {b : n → α} {a : α} : (M.toSquareBlock b a).charmatrix = M.charmatrix.toSquareBlock b a := by ext i j : 1 simp [charmatrix_apply, toSquareBlock_def, diagonal_apply, Subtype.ext_iff] lemma BlockTriangular.charpoly {α : Type*} {b : n → α} [LinearOrder α] (h : M.BlockTriangular b) : M.charpoly = ∏ a ∈ image b univ, (M.toSquareBlock b a).charpoly := by simp only [Matrix.charpoly, h.charmatrix.det, charmatrix_toSquareBlock] lemma charpoly_of_upperTriangular [LinearOrder n] (M : Matrix n n R) (h : M.BlockTriangular id) : M.charpoly = ∏ i : n, (X - C (M i i)) := by simp [charpoly, det_of_upperTriangular h.charmatrix] -- This proof follows http://drorbn.net/AcademicPensieve/2015-12/CayleyHamilton.pdf /-- The **Cayley-Hamilton Theorem**, that the characteristic polynomial of a matrix, applied to the matrix itself, is zero. This holds over any commutative ring. See `LinearMap.aeval_self_charpoly` for the equivalent statement about endomorphisms. -/ theorem aeval_self_charpoly (M : Matrix n n R) : aeval M M.charpoly = 0 := by -- We begin with the fact $χ_M(t) I = adjugate (t I - M) * (t I - M)$, -- as an identity in `Matrix n n R[X]`. have h : M.charpoly • (1 : Matrix n n R[X]) = adjugate (charmatrix M) * charmatrix M := (adjugate_mul _).symm -- Using the algebra isomorphism `Matrix n n R[X] ≃ₐ[R] Polynomial (Matrix n n R)`, -- we have the same identity in `Polynomial (Matrix n n R)`. apply_fun matPolyEquiv at h simp only [map_mul, matPolyEquiv_charmatrix] at h -- Because the coefficient ring `Matrix n n R` is non-commutative, -- evaluation at `M` is not multiplicative. -- However, any polynomial which is a product of the form $N * (t I - M)$ -- is sent to zero, because the evaluation function puts the polynomial variable -- to the right of any coefficients, so everything telescopes. apply_fun fun p => p.eval M at h rw [eval_mul_X_sub_C] at h -- Now $χ_M (t) I$, when thought of as a polynomial of matrices -- and evaluated at some `N` is exactly $χ_M (N)$. rw [matPolyEquiv_smul_one, eval_map] at h -- Thus we have $χ_M(M) = 0$, which is the desired result. exact h /-- A version of `Matrix.charpoly_mul_comm` for rectangular matrices. See also `Matrix.charpoly_mul_comm_of_le` which has just `(A * B).charpoly` as the LHS. -/ theorem charpoly_mul_comm' (A : Matrix m n R) (B : Matrix n m R) : X ^ Fintype.card n * (A * B).charpoly = X ^ Fintype.card m * (B * A).charpoly := by -- This proof follows https://math.stackexchange.com/a/311362/315369 let M := fromBlocks (scalar m X) (A.map C) (B.map C) (1 : Matrix n n R[X]) let N := fromBlocks (-1 : Matrix m m R[X]) 0 (B.map C) (-scalar n X) have hMN : M * N = fromBlocks (-scalar m X + (A * B).map C) (-(X : R[X]) • A.map C) 0 (-scalar n X) := by simp [M, N, fromBlocks_multiply, smul_eq_mul_diagonal, -diagonal_neg] have hNM : N * M = fromBlocks (-scalar m X) (-A.map C) 0 ((B * A).map C - scalar n X) := by simp [M, N, fromBlocks_multiply, sub_eq_add_neg, -scalar_apply, scalar_comm, Commute.all] have hdet_MN : (M * N).det = (-1 : R[X]) ^ (Fintype.card m + Fintype.card n) * (X ^ Fintype.card n * (scalar m X - (A * B).map C).det) := by rw [hMN, det_fromBlocks_zero₂₁, neg_add_eq_sub, ← neg_sub, det_neg] simp ring have hdet_NM : (N * M).det = (-1 : R[X]) ^ (Fintype.card m + Fintype.card n) * (X ^ Fintype.card m * (scalar n X - (B * A).map C).det) := by rw [hNM, det_fromBlocks_zero₂₁, ← neg_sub, det_neg (_ - _)] simp ring dsimp only [charpoly, charmatrix, RingHom.mapMatrix_apply] rw [← (isUnit_neg_one.pow _).isRegular.left.eq_iff, ← hdet_NM, ← hdet_MN, det_mul_comm] theorem charpoly_mul_comm_of_le (A : Matrix m n R) (B : Matrix n m R) (hle : Fintype.card n ≤ Fintype.card m) : (A * B).charpoly = X ^ (Fintype.card m - Fintype.card n) * (B * A).charpoly := by rw [← (isRegular_X_pow _).left.eq_iff, ← mul_assoc, ← pow_add, Nat.add_sub_cancel' hle, charpoly_mul_comm'] /-- A version of `charpoly_mul_comm'` for square matrices. -/ theorem charpoly_mul_comm (A B : Matrix n n R) : (A * B).charpoly = (B * A).charpoly := (isRegular_X_pow _).left.eq_iff.mp <| charpoly_mul_comm' A B theorem charpoly_vecMulVec (u v : n → R) : (vecMulVec u v).charpoly = X ^ Fintype.card n - (u ⬝ᵥ v) • X ^ (Fintype.card n - 1) := by cases isEmpty_or_nonempty n · simp · have h : 1 ≤ Fintype.card n := NeZero.one_le rw [vecMulVec_eq (ι := Unit), charpoly_mul_comm_of_le (n := Unit) _ _ h, charpoly, charmatrix] simp [-Matrix.map_mul, mul_sub, ← pow_succ, h, dotProduct_comm, smul_eq_C_mul] theorem charpoly_units_conj (M : (Matrix n n R)ˣ) (N : Matrix n n R) : (M.val * N * M⁻¹.val).charpoly = N.charpoly := by rw [Matrix.charpoly_mul_comm, ← mul_assoc] simp theorem charpoly_units_conj' (M : (Matrix n n R)ˣ) (N : Matrix n n R) : (M⁻¹.val * N * M.val).charpoly = N.charpoly := charpoly_units_conj M⁻¹ N theorem charpoly_sub_scalar (M : Matrix n n R) (μ : R) : (M - scalar n μ).charpoly = M.charpoly.comp (X + C μ) := by simp_rw [charpoly, det_apply, Polynomial.sum_comp, Polynomial.smul_comp, Polynomial.prod_comp] congr! with σ _ i _ by_cases hi : σ i = i <;> simp [hi] ring end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Univ.lean
import Mathlib.Algebra.MvPolynomial.Equiv import Mathlib.LinearAlgebra.Matrix.Charpoly.Coeff import Mathlib.RingTheory.MvPolynomial.Homogeneous /-! # The universal characteristic polynomial In this file we define the universal characteristic polynomial `Matrix.charpoly.univ`, which is the characteristic polynomial of the matrix with entries `Xᵢⱼ`, and hence has coefficients that are multivariate polynomials. It is universal in the sense that one obtains the characteristic polynomial of a matrix `M` by evaluating the coefficients of `univ` at the entries of `M`. We use it to show that the coefficients of the characteristic polynomial of a matrix are homogeneous polynomials in the matrix entries. ## Main results * `Matrix.charpoly.univ`: the universal characteristic polynomial * `Matrix.charpoly.univ_map_eval₂Hom`: evaluating `univ` on the entries of a matrix `M` gives the characteristic polynomial of `M`. * `Matrix.charpoly.univ_coeff_isHomogeneous`: the `i`-th coefficient of `univ` is a homogeneous polynomial of degree `n - i`. -/ namespace Matrix.charpoly variable {R S : Type*} (n : Type*) [CommRing R] [CommRing S] [Fintype n] [DecidableEq n] variable (f : R →+* S) variable (R) in /-- The universal characteristic polynomial for `n × n`-matrices, is the characteristic polynomial of `Matrix.mvPolynomialX n n ℤ` with entries `Xᵢⱼ`. Its `i`-th coefficient is a homogeneous polynomial of degree `n - i`, see `Matrix.charpoly.univ_coeff_isHomogeneous`. By evaluating the coefficients at the entries of a matrix `M`, one obtains the characteristic polynomial of `M`, see `Matrix.charpoly.univ_map_eval₂Hom`. -/ noncomputable abbrev univ : Polynomial (MvPolynomial (n × n) R) := charpoly <| mvPolynomialX n n R open MvPolynomial RingHomClass in @[simp] lemma univ_map_eval₂Hom (M : n × n → S) : (univ R n).map (eval₂Hom f M) = charpoly (Matrix.of M.curry) := by rw [univ, ← charpoly_map, coe_eval₂Hom, ← mvPolynomialX_map_eval₂ f (Matrix.of M.curry)] simp only [of_apply, Function.curry_apply, Prod.mk.eta] lemma univ_map_map : (univ R n).map (MvPolynomial.map f) = univ S n := by rw [MvPolynomial.map, univ_map_eval₂Hom]; rfl @[simp] lemma univ_coeff_eval₂Hom (M : n × n → S) (i : ℕ) : MvPolynomial.eval₂Hom f M ((univ R n).coeff i) = (charpoly (Matrix.of M.curry)).coeff i := by rw [← univ_map_eval₂Hom n f M, Polynomial.coeff_map] variable (R) lemma univ_monic : (univ R n).Monic := charpoly_monic (mvPolynomialX n n R) lemma univ_natDegree [Nontrivial R] : (univ R n).natDegree = Fintype.card n := charpoly_natDegree_eq_dim (mvPolynomialX n n R) @[simp] lemma univ_coeff_card : (univ R n).coeff (Fintype.card n) = 1 := by suffices Polynomial.coeff (univ ℤ n) (Fintype.card n) = 1 by rw [← univ_map_map n (Int.castRingHom R), Polynomial.coeff_map, this, map_one] rw [← univ_natDegree ℤ n] exact (univ_monic ℤ n).leadingCoeff open MvPolynomial in lemma optionEquivLeft_symm_univ_isHomogeneous : ((optionEquivLeft R (n × n)).symm (univ R n)).IsHomogeneous (Fintype.card n) := by have aux : Fintype.card n = 0 + ∑ i : n, 1 := by simp only [zero_add, Finset.sum_const, smul_eq_mul, mul_one, Fintype.card] simp only [aux, univ, charpoly, charmatrix, scalar_apply, RingHom.mapMatrix_apply, det_apply', sub_apply, map_apply, of_apply, map_sum, map_mul, map_intCast, map_prod, map_sub, optionEquivLeft_symm_apply, Polynomial.aevalTower_C, rename_X, diagonal, mvPolynomialX] apply IsHomogeneous.sum rintro i - apply IsHomogeneous.mul · apply isHomogeneous_C · apply IsHomogeneous.prod rintro j - by_cases h : i j = j · simp only [h, ↓reduceIte, Polynomial.aevalTower_X, IsHomogeneous.sub, isHomogeneous_X] · simp only [h, ↓reduceIte, map_zero, zero_sub, (isHomogeneous_X _ _).neg] lemma univ_coeff_isHomogeneous (i j : ℕ) (h : i + j = Fintype.card n) : ((univ R n).coeff i).IsHomogeneous j := (optionEquivLeft_symm_univ_isHomogeneous R n).coeff_isHomogeneous_of_optionEquivLeft_symm _ _ h end Matrix.charpoly
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/LinearMap.lean
import Mathlib.LinearAlgebra.Matrix.Charpoly.Coeff import Mathlib.LinearAlgebra.Matrix.ToLin /-! # Cayley-Hamilton theorem for f.g. modules. Given a fixed finite spanning set `b : ι → M` of an `R`-module `M`, we say that a matrix `M` represents an endomorphism `f : M →ₗ[R] M` if the matrix as an endomorphism of `ι → R` commutes with `f` via the projection `(ι → R) →ₗ[R] M` given by `b`. We show that every endomorphism has a matrix representation, and if `f.range ≤ I • ⊤` for some ideal `I`, we may furthermore obtain a matrix representation whose entries fall in `I`. This is used to conclude the Cayley-Hamilton theorem for f.g. modules over arbitrary rings. -/ variable {ι : Type*} [Fintype ι] variable {M : Type*} [AddCommGroup M] (R : Type*) [CommRing R] [Module R M] (I : Ideal R) variable (b : ι → M) open Polynomial Matrix /-- The composition of a matrix (as an endomorphism of `ι → R`) with the projection `(ι → R) →ₗ[R] M`. -/ def PiToModule.fromMatrix [DecidableEq ι] : Matrix ι ι R →ₗ[R] (ι → R) →ₗ[R] M := (LinearMap.llcomp R _ _ _ (Fintype.linearCombination R b)).comp algEquivMatrix'.symm.toLinearMap theorem PiToModule.fromMatrix_apply [DecidableEq ι] (A : Matrix ι ι R) (w : ι → R) : PiToModule.fromMatrix R b A w = Fintype.linearCombination R b (A *ᵥ w) := rfl theorem PiToModule.fromMatrix_apply_single_one [DecidableEq ι] (A : Matrix ι ι R) (j : ι) : PiToModule.fromMatrix R b A (Pi.single j 1) = ∑ i : ι, A i j • b i := by rw [PiToModule.fromMatrix_apply, Fintype.linearCombination_apply, Matrix.mulVec_single] simp_rw [MulOpposite.op_one, one_smul, col_apply] /-- The endomorphisms of `M` acts on `(ι → R) →ₗ[R] M`, and takes the projection to a `(ι → R) →ₗ[R] M`. -/ def PiToModule.fromEnd : Module.End R M →ₗ[R] (ι → R) →ₗ[R] M := LinearMap.lcomp _ _ (Fintype.linearCombination R b) theorem PiToModule.fromEnd_apply (f : Module.End R M) (w : ι → R) : PiToModule.fromEnd R b f w = f (Fintype.linearCombination R b w) := rfl theorem PiToModule.fromEnd_apply_single_one [DecidableEq ι] (f : Module.End R M) (i : ι) : PiToModule.fromEnd R b f (Pi.single i 1) = f (b i) := by rw [PiToModule.fromEnd_apply, Fintype.linearCombination_apply_single, one_smul] theorem PiToModule.fromEnd_injective (hb : Submodule.span R (Set.range b) = ⊤) : Function.Injective (PiToModule.fromEnd R b) := by intro x y e ext m obtain ⟨m, rfl⟩ : m ∈ LinearMap.range (Fintype.linearCombination R b) := by rw [(Fintype.range_linearCombination R b).trans hb] exact Submodule.mem_top exact (LinearMap.congr_fun e m :) section variable {R} [DecidableEq ι] /-- We say that a matrix represents an endomorphism of `M` if the matrix acting on `ι → R` is equal to `f` via the projection `(ι → R) →ₗ[R] M` given by a fixed (spanning) set. -/ def Matrix.Represents (A : Matrix ι ι R) (f : Module.End R M) : Prop := PiToModule.fromMatrix R b A = PiToModule.fromEnd R b f variable {b} theorem Matrix.Represents.congr_fun {A : Matrix ι ι R} {f : Module.End R M} (h : A.Represents b f) (x) : Fintype.linearCombination R b (A *ᵥ x) = f (Fintype.linearCombination R b x) := LinearMap.congr_fun h x theorem Matrix.represents_iff {A : Matrix ι ι R} {f : Module.End R M} : A.Represents b f ↔ ∀ x, Fintype.linearCombination R b (A *ᵥ x) = f (Fintype.linearCombination R b x) := ⟨fun e x => e.congr_fun x, fun H => LinearMap.ext fun x => H x⟩ theorem Matrix.represents_iff' {A : Matrix ι ι R} {f : Module.End R M} : A.Represents b f ↔ ∀ j, ∑ i : ι, A i j • b i = f (b j) := by constructor · intro h i have := LinearMap.congr_fun h (Pi.single i 1) rwa [PiToModule.fromEnd_apply_single_one, PiToModule.fromMatrix_apply_single_one] at this · intro h ext simp_rw [LinearMap.comp_apply, LinearMap.coe_single, PiToModule.fromEnd_apply_single_one, PiToModule.fromMatrix_apply_single_one] apply h theorem Matrix.Represents.mul {A A' : Matrix ι ι R} {f f' : Module.End R M} (h : A.Represents b f) (h' : Matrix.Represents b A' f') : (A * A').Represents b (f * f') := by delta Matrix.Represents PiToModule.fromMatrix rw [LinearMap.comp_apply, AlgEquiv.toLinearMap_apply, map_mul] ext dsimp [PiToModule.fromEnd] rw [← h'.congr_fun, ← h.congr_fun] rfl theorem Matrix.Represents.one : (1 : Matrix ι ι R).Represents b 1 := by delta Matrix.Represents PiToModule.fromMatrix rw [LinearMap.comp_apply, AlgEquiv.toLinearMap_apply, map_one] ext rfl theorem Matrix.Represents.add {A A' : Matrix ι ι R} {f f' : Module.End R M} (h : A.Represents b f) (h' : Matrix.Represents b A' f') : (A + A').Represents b (f + f') := by delta Matrix.Represents at h h' ⊢; rw [map_add, map_add, h, h'] theorem Matrix.Represents.zero : (0 : Matrix ι ι R).Represents b 0 := by delta Matrix.Represents rw [map_zero, map_zero] theorem Matrix.Represents.smul {A : Matrix ι ι R} {f : Module.End R M} (h : A.Represents b f) (r : R) : (r • A).Represents b (r • f) := by delta Matrix.Represents at h ⊢ rw [map_smul, map_smul, h] theorem Matrix.Represents.algebraMap (r : R) : (algebraMap _ (Matrix ι ι R) r).Represents b (algebraMap _ (Module.End R M) r) := by simpa only [Algebra.algebraMap_eq_smul_one] using Matrix.Represents.one.smul r theorem Matrix.Represents.eq (hb : Submodule.span R (Set.range b) = ⊤) {A : Matrix ι ι R} {f f' : Module.End R M} (h : A.Represents b f) (h' : A.Represents b f') : f = f' := PiToModule.fromEnd_injective R b hb (h.symm.trans h') variable (b R) /-- The subalgebra of `Matrix ι ι R` that consists of matrices that actually represent endomorphisms on `M`. -/ def Matrix.isRepresentation : Subalgebra R (Matrix ι ι R) where carrier := { A | ∃ f : Module.End R M, A.Represents b f } mul_mem' := fun ⟨f₁, e₁⟩ ⟨f₂, e₂⟩ => ⟨f₁ * f₂, e₁.mul e₂⟩ one_mem' := ⟨1, Matrix.Represents.one⟩ add_mem' := fun ⟨f₁, e₁⟩ ⟨f₂, e₂⟩ => ⟨f₁ + f₂, e₁.add e₂⟩ zero_mem' := ⟨0, Matrix.Represents.zero⟩ algebraMap_mem' r := ⟨algebraMap _ _ r, .algebraMap _⟩ variable (hb : Submodule.span R (Set.range b) = ⊤) include hb /-- The map sending a matrix to the endomorphism it represents. This is an `R`-algebra morphism. -/ noncomputable def Matrix.isRepresentation.toEnd : Matrix.isRepresentation R b →ₐ[R] Module.End R M where toFun A := A.2.choose map_one' := (1 : Matrix.isRepresentation R b).2.choose_spec.eq hb Matrix.Represents.one map_mul' A₁ A₂ := (A₁ * A₂).2.choose_spec.eq hb (A₁.2.choose_spec.mul A₂.2.choose_spec) map_zero' := (0 : Matrix.isRepresentation R b).2.choose_spec.eq hb Matrix.Represents.zero map_add' A₁ A₂ := (A₁ + A₂).2.choose_spec.eq hb (A₁.2.choose_spec.add A₂.2.choose_spec) commutes' r := (algebraMap _ (Matrix.isRepresentation R b) r).2.choose_spec.eq hb (.algebraMap r) theorem Matrix.isRepresentation.toEnd_represents (A : Matrix.isRepresentation R b) : (A : Matrix ι ι R).Represents b (Matrix.isRepresentation.toEnd R b hb A) := A.2.choose_spec theorem Matrix.isRepresentation.eq_toEnd_of_represents (A : Matrix.isRepresentation R b) {f : Module.End R M} (h : (A : Matrix ι ι R).Represents b f) : Matrix.isRepresentation.toEnd R b hb A = f := A.2.choose_spec.eq hb h theorem Matrix.isRepresentation.toEnd_exists_mem_ideal (f : Module.End R M) (I : Ideal R) (hI : LinearMap.range f ≤ I • ⊤) : ∃ M, Matrix.isRepresentation.toEnd R b hb M = f ∧ ∀ i j, M.1 i j ∈ I := by have : ∀ x, f x ∈ LinearMap.range (Ideal.finsuppTotal ι M I b) := by rw [Ideal.range_finsuppTotal, hb] exact fun x => hI (LinearMap.mem_range_self f x) choose bM' hbM' using this let A : Matrix ι ι R := fun i j => bM' (b j) i have : A.Represents b f := by rw [Matrix.represents_iff'] dsimp [A] intro j specialize hbM' (b j) rwa [Ideal.finsuppTotal_apply_eq_of_fintype] at hbM' exact ⟨⟨A, f, this⟩, Matrix.isRepresentation.eq_toEnd_of_represents R b hb ⟨A, f, this⟩ this, fun i j => (bM' (b j) i).prop⟩ theorem Matrix.isRepresentation.toEnd_surjective : Function.Surjective (Matrix.isRepresentation.toEnd R b hb) := by intro f obtain ⟨M, e, -⟩ := Matrix.isRepresentation.toEnd_exists_mem_ideal R b hb f ⊤ (by simp) exact ⟨M, e⟩ end /-- The **Cayley-Hamilton Theorem** for f.g. modules over arbitrary rings states that for each `R`-endomorphism `φ` of an `R`-module `M` such that `φ(M) ≤ I • M` for some ideal `I`, there exists some `n` and some `aᵢ ∈ Iⁱ` such that `φⁿ + a₁ φⁿ⁻¹ + ⋯ + aₙ = 0`. This is the version found in Eisenbud 4.3, which is slightly weaker than Matsumura 2.1 (this lacks the constraint on `n`), and is slightly stronger than Atiyah-Macdonald 2.4. -/ theorem LinearMap.exists_monic_and_coeff_mem_pow_and_aeval_eq_zero_of_range_le_smul [Module.Finite R M] (f : Module.End R M) (I : Ideal R) (hI : LinearMap.range f ≤ I • ⊤) : ∃ p : R[X], p.Monic ∧ (∀ k, p.coeff k ∈ I ^ (p.natDegree - k)) ∧ Polynomial.aeval f p = 0 := by classical cases subsingleton_or_nontrivial R · exact ⟨0, Polynomial.monic_of_subsingleton _, by simp⟩ obtain ⟨s : Finset M, hs : Submodule.span R (s : Set M) = ⊤⟩ := Module.Finite.fg_top (R := R) (M := M) have : Submodule.span R (Set.range ((↑) : { x // x ∈ s } → M)) = ⊤ := by rw [Subtype.range_coe_subtype, Finset.setOf_mem, hs] obtain ⟨A, rfl, h⟩ := Matrix.isRepresentation.toEnd_exists_mem_ideal R ((↑) : s → M) this f I hI refine ⟨A.1.charpoly, A.1.charpoly_monic, ?_, ?_⟩ · rw [A.1.charpoly_natDegree_eq_dim] exact coeff_charpoly_mem_ideal_pow h · rw [Polynomial.aeval_algHom_apply, ← map_zero (Matrix.isRepresentation.toEnd R ((↑) : s → M) this)] congr 1 ext1 rw [Polynomial.aeval_subalgebra_coe, Matrix.aeval_self_charpoly, Subalgebra.coe_zero] theorem LinearMap.exists_monic_and_aeval_eq_zero [Module.Finite R M] (f : Module.End R M) : ∃ p : R[X], p.Monic ∧ Polynomial.aeval f p = 0 := (LinearMap.exists_monic_and_coeff_mem_pow_and_aeval_eq_zero_of_range_le_smul R f ⊤ (by simp)).imp fun _ h => h.imp_right And.right
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean
import Mathlib.Algebra.Polynomial.Expand import Mathlib.Algebra.Polynomial.Laurent import Mathlib.Algebra.Polynomial.Eval.SMul import Mathlib.LinearAlgebra.Matrix.Charpoly.Basic import Mathlib.LinearAlgebra.Matrix.Reindex import Mathlib.LinearAlgebra.Matrix.SchurComplement import Mathlib.RingTheory.Polynomial.Nilpotent /-! # Characteristic polynomials We give methods for computing coefficients of the characteristic polynomial. ## Main definitions - `Matrix.charpoly_degree_eq_dim` proves that the degree of the characteristic polynomial over a nonzero ring is the dimension of the matrix - `Matrix.det_eq_sign_charpoly_coeff` proves that the determinant is the constant term of the characteristic polynomial, up to sign. - `Matrix.trace_eq_neg_charpoly_coeff` proves that the trace is the negative of the (d-1)th coefficient of the characteristic polynomial, where d is the dimension of the matrix. For a nonzero ring, this is the second-highest coefficient. - `Matrix.charpolyRev` the reverse of the characteristic polynomial. - `Matrix.reverse_charpoly` characterises the reverse of the characteristic polynomial. -/ noncomputable section universe u v w z open Finset Matrix Polynomial variable {R : Type u} [CommRing R] variable {n G : Type v} [DecidableEq n] [Fintype n] variable {α β : Type v} [DecidableEq α] variable {M : Matrix n n R} namespace Matrix theorem charmatrix_apply_natDegree [Nontrivial R] (i j : n) : (charmatrix M i j).natDegree = ite (i = j) 1 0 := by by_cases h : i = j <;> simp [h] theorem charmatrix_apply_natDegree_le (i j : n) : (charmatrix M i j).natDegree ≤ ite (i = j) 1 0 := by split_ifs with h <;> simp [h, natDegree_X_le] variable (M) theorem charpoly_sub_diagonal_degree_lt : (M.charpoly - ∏ i : n, (X - C (M i i))).degree < ↑(Fintype.card n - 1) := by rw [charpoly, det_apply', ← insert_erase (mem_univ (Equiv.refl n)), sum_insert (notMem_erase (Equiv.refl n) univ), add_comm] simp only [charmatrix_apply_eq, one_mul, Equiv.Perm.sign_refl, id, Int.cast_one, Units.val_one, add_sub_cancel_right, Equiv.coe_refl] rw [← mem_degreeLT] apply Submodule.sum_mem (degreeLT R (Fintype.card n - 1)) intro c hc; rw [← C_eq_intCast, C_mul'] apply Submodule.smul_mem (degreeLT R (Fintype.card n - 1)) ↑↑(Equiv.Perm.sign c) rw [mem_degreeLT] apply lt_of_le_of_lt degree_le_natDegree _ rw [Nat.cast_lt] apply lt_of_le_of_lt _ (Equiv.Perm.fixed_point_card_lt_of_ne_one (ne_of_mem_erase hc)) apply le_trans (Polynomial.natDegree_prod_le univ fun i : n => charmatrix M (c i) i) _ rw [card_eq_sum_ones]; rw [sum_filter]; apply sum_le_sum intros apply charmatrix_apply_natDegree_le theorem charpoly_coeff_eq_prod_coeff_of_le {k : ℕ} (h : Fintype.card n - 1 ≤ k) : M.charpoly.coeff k = (∏ i : n, (X - C (M i i))).coeff k := by apply eq_of_sub_eq_zero; rw [← coeff_sub] apply Polynomial.coeff_eq_zero_of_degree_lt apply lt_of_lt_of_le (charpoly_sub_diagonal_degree_lt M) ?_ rw [Nat.cast_le]; apply h @[deprecated (since := "2025-08-14")] alias det_of_card_zero := det_eq_one_of_card_eq_zero @[simp] theorem charpoly_degree_eq_dim [Nontrivial R] (M : Matrix n n R) : M.charpoly.degree = Fintype.card n := by by_cases h : Fintype.card n = 0 · rw [h] unfold charpoly rw [det_eq_one_of_card_eq_zero] · simp · assumption rw [← sub_add_cancel M.charpoly (∏ i : n, (X - C (M i i)))] -- Porting note: added `↑` in front of `Fintype.card n` have h1 : (∏ i : n, (X - C (M i i))).degree = ↑(Fintype.card n) := by rw [degree_eq_iff_natDegree_eq_of_pos (Nat.pos_of_ne_zero h), natDegree_prod'] · simp_rw [natDegree_X_sub_C] rw [← Finset.card_univ, sum_const, smul_eq_mul, mul_one] simp_rw [(monic_X_sub_C _).leadingCoeff] simp rw [degree_add_eq_right_of_degree_lt] · exact h1 rw [h1] apply lt_trans (charpoly_sub_diagonal_degree_lt M) rw [Nat.cast_lt] cutsat @[simp] theorem charpoly_natDegree_eq_dim [Nontrivial R] (M : Matrix n n R) : M.charpoly.natDegree = Fintype.card n := natDegree_eq_of_degree_eq_some (charpoly_degree_eq_dim M) theorem charpoly_monic (M : Matrix n n R) : M.charpoly.Monic := by nontriviality R by_cases h : Fintype.card n = 0 · rw [charpoly, det_eq_one_of_card_eq_zero h] apply monic_one have mon : (∏ i : n, (X - C (M i i))).Monic := by apply monic_prod_of_monic univ fun i : n => X - C (M i i) simp [monic_X_sub_C] rw [← sub_add_cancel (∏ i : n, (X - C (M i i))) M.charpoly] at mon rw [Monic] at * rwa [leadingCoeff_add_of_degree_lt] at mon rw [charpoly_degree_eq_dim] rw [← neg_sub] rw [degree_neg] apply lt_trans (charpoly_sub_diagonal_degree_lt M) rw [Nat.cast_lt] cutsat /-- See also `Matrix.coeff_charpolyRev_eq_neg_trace`. -/ theorem trace_eq_neg_charpoly_coeff [Nonempty n] (M : Matrix n n R) : trace M = -M.charpoly.coeff (Fintype.card n - 1) := by rw [charpoly_coeff_eq_prod_coeff_of_le _ le_rfl, Fintype.card, prod_X_sub_C_coeff_card_pred univ (fun i : n => M i i) Fintype.card_pos, neg_neg, trace] simp_rw [diag_apply] theorem trace_eq_neg_charpoly_nextCoeff (M : Matrix n n R) : M.trace = -M.charpoly.nextCoeff := by cases isEmpty_or_nonempty n · simp [nextCoeff] nontriviality simp [trace_eq_neg_charpoly_coeff, nextCoeff] theorem det_eq_sign_charpoly_coeff (M : Matrix n n R) : M.det = (-1) ^ Fintype.card n * M.charpoly.coeff 0 := by rw [coeff_zero_eq_eval_zero, charpoly, eval_det, matPolyEquiv_charmatrix, ← det_smul] simp lemma derivative_det_one_add_X_smul_aux {n} (M : Matrix (Fin n) (Fin n) R) : (derivative <| det (1 + (X : R[X]) • M.map C)).eval 0 = trace M := by induction n with | zero => simp | succ n IH => rw [det_succ_row_zero, map_sum, eval_finset_sum] simp only [add_apply, smul_apply, map_apply, smul_eq_mul, X_mul_C, submatrix_add, submatrix_smul, Pi.add_apply, Pi.smul_apply, submatrix_map, derivative_mul, map_add, derivative_C, zero_mul, derivative_X, mul_one, zero_add, eval_add, eval_mul, eval_C, eval_X, mul_zero, add_zero, eval_det_add_X_smul, eval_pow, eval_neg, eval_one] rw [Finset.sum_eq_single 0] · simp only [Fin.val_zero, pow_zero, derivative_one, eval_zero, one_apply_eq, eval_one, mul_one, zero_add, one_mul, Fin.succAbove_zero, submatrix_one _ (Fin.succ_injective _), det_one, IH, trace_submatrix_succ] · intro i _ hi cases n with | zero => exact (hi (Subsingleton.elim i 0)).elim | succ n => simp only [one_apply_ne' hi, eval_zero, mul_zero, zero_add, zero_mul, add_zero] rw [det_eq_zero_of_column_eq_zero 0, eval_zero, mul_zero] intro j rw [submatrix_apply, Fin.succAbove_of_castSucc_lt, one_apply_ne] · exact (bne_iff_ne (a := Fin.succ j) (b := Fin.castSucc 0)).mp rfl · rw [Fin.castSucc_zero]; exact lt_of_le_of_ne (Fin.zero_le _) hi.symm · exact fun H ↦ (H <| Finset.mem_univ _).elim /-- The derivative of `det (1 + M X)` at `0` is the trace of `M`. -/ lemma derivative_det_one_add_X_smul (M : Matrix n n R) : (derivative <| det (1 + (X : R[X]) • M.map C)).eval 0 = trace M := by let e := Matrix.reindexLinearEquiv R R (Fintype.equivFin n) (Fintype.equivFin n) rw [← Matrix.det_reindexLinearEquiv_self R[X] (Fintype.equivFin n)] convert derivative_det_one_add_X_smul_aux (e M) · ext; simp [map_add, e] · delta trace rw [← (Fintype.equivFin n).symm.sum_comp] simp_rw [e, reindexLinearEquiv_apply, reindex_apply, diag_apply, submatrix_apply] lemma coeff_det_one_add_X_smul_one (M : Matrix n n R) : (det (1 + (X : R[X]) • M.map C)).coeff 1 = trace M := by simp only [← derivative_det_one_add_X_smul, ← coeff_zero_eq_eval_zero, coeff_derivative, zero_add, Nat.cast_zero, mul_one] lemma det_one_add_X_smul (M : Matrix n n R) : det (1 + (X : R[X]) • M.map C) = (1 : R[X]) + trace M • X + (det (1 + (X : R[X]) • M.map C)).divX.divX * X ^ 2 := by rw [Algebra.smul_def (trace M), ← C_eq_algebraMap, pow_two, ← mul_assoc, add_assoc, ← add_mul, ← coeff_det_one_add_X_smul_one, ← coeff_divX, add_comm (C _), divX_mul_X_add, add_comm (1 : R[X]), ← C.map_one] convert (divX_mul_X_add _).symm rw [coeff_zero_eq_eval_zero, eval_det_add_X_smul, det_one, eval_one] /-- The first two terms of the Taylor expansion of `det (1 + r • M)` at `r = 0`. -/ lemma det_one_add_smul (r : R) (M : Matrix n n R) : det (1 + r • M) = 1 + trace M * r + (det (1 + (X : R[X]) • M.map C)).divX.divX.eval r * r ^ 2 := by simpa [eval_det, ← smul_eq_mul_diagonal] using congr_arg (eval r) (Matrix.det_one_add_X_smul M) lemma charpoly_of_card_eq_two [Nontrivial R] (hn : Fintype.card n = 2) : M.charpoly = X ^ 2 - C M.trace * X + C M.det := by have : Nonempty n := by rw [← Fintype.card_pos_iff]; omega ext i by_cases hi : i ∈ Finset.range 3 · fin_cases hi · simp [det_eq_sign_charpoly_coeff, hn] · simp [trace_eq_neg_charpoly_coeff, hn] · simpa [leadingCoeff, charpoly_natDegree_eq_dim, hn, coeff_X] using M.charpoly_monic.leadingCoeff · rw [Finset.mem_range, not_lt, Nat.succ_le] at hi suffices M.charpoly.coeff i = 0 by simpa [show i ≠ 2 by cutsat, show 1 ≠ i by cutsat, show i ≠ 0 by cutsat, coeff_X, coeff_C] apply coeff_eq_zero_of_natDegree_lt simpa [charpoly_natDegree_eq_dim, hn] using hi lemma charpoly_fin_two [Nontrivial R] (M : Matrix (Fin 2) (Fin 2) R) : M.charpoly = X ^ 2 - C M.trace * X + C M.det := M.charpoly_of_card_eq_two <| Fintype.card_fin _ end Matrix theorem matPolyEquiv_eq_X_pow_sub_C {K : Type*} (k : ℕ) [CommRing K] (M : Matrix n n K) : matPolyEquiv ((expand K k : K[X] →+* K[X]).mapMatrix (charmatrix (M ^ k))) = X ^ k - C (M ^ k) := by ext m i j rw [coeff_sub, coeff_C, matPolyEquiv_coeff_apply, RingHom.mapMatrix_apply, Matrix.map_apply, AlgHom.coe_toRingHom, DMatrix.sub_apply, coeff_X_pow] by_cases hij : i = j · rw [hij, charmatrix_apply_eq, map_sub, expand_C, expand_X, coeff_sub, coeff_X_pow, coeff_C] split_ifs with mp m0 <;> simp · rw [charmatrix_apply_ne _ _ _ hij, map_neg, expand_C, coeff_neg, coeff_C] split_ifs with m0 mp <;> simp_all namespace Matrix /-- Any matrix polynomial `p` is equivalent under evaluation to `p %ₘ M.charpoly`; that is, `p` is equivalent to a polynomial with degree less than the dimension of the matrix. -/ theorem aeval_eq_aeval_mod_charpoly (M : Matrix n n R) (p : R[X]) : aeval M p = aeval M (p %ₘ M.charpoly) := (aeval_modByMonic_eq_self_of_root M.charpoly_monic M.aeval_self_charpoly).symm /-- Any matrix power can be computed as the sum of matrix powers less than `Fintype.card n`. TODO: add the statement for negative powers phrased with `zpow`. -/ theorem pow_eq_aeval_mod_charpoly (M : Matrix n n R) (k : ℕ) : M ^ k = aeval M (X ^ k %ₘ M.charpoly) := by rw [← aeval_eq_aeval_mod_charpoly, map_pow, aeval_X] section Ideal theorem coeff_charpoly_mem_ideal_pow {I : Ideal R} (h : ∀ i j, M i j ∈ I) (k : ℕ) : M.charpoly.coeff k ∈ I ^ (Fintype.card n - k) := by delta charpoly rw [Matrix.det_apply, finset_sum_coeff] apply sum_mem rintro c - rw [coeff_smul, Submodule.smul_mem_iff'] have : ∑ x : n, 1 = Fintype.card n := by rw [Finset.sum_const, card_univ, smul_eq_mul, mul_one] rw [← this] apply coeff_prod_mem_ideal_pow_tsub rintro i - (_ | k) · rw [tsub_zero, pow_one, charmatrix_apply, coeff_sub, ← smul_one_eq_diagonal, smul_apply, smul_eq_mul, coeff_X_mul_zero, coeff_C_zero, zero_sub, neg_mem_iff] exact h (c i) i · rw [add_comm, tsub_self_add, pow_zero, Ideal.one_eq_top] exact Submodule.mem_top end Ideal section reverse open LaurentPolynomial hiding C /-- The reverse of the characteristic polynomial of a matrix. It has some advantages over the characteristic polynomial, including the fact that it can be extended to infinite dimensions (for appropriate operators). In such settings it is known as the "characteristic power series". -/ def charpolyRev (M : Matrix n n R) : R[X] := det (1 - (X : R[X]) • M.map C) lemma reverse_charpoly (M : Matrix n n R) : M.charpoly.reverse = M.charpolyRev := by nontriviality R let t : R[T;T⁻¹] := T 1 let t_inv : R[T;T⁻¹] := T (-1) let p : R[T;T⁻¹] := det (scalar n t - M.map LaurentPolynomial.C) let q : R[T;T⁻¹] := det (1 - scalar n t * M.map LaurentPolynomial.C) have ht : t_inv * t = 1 := by rw [← T_add, neg_add_cancel, T_zero] have hp : toLaurentAlg M.charpoly = p := by simp [p, t, charpoly, charmatrix, AlgHom.map_det, map_sub] have hq : toLaurentAlg M.charpolyRev = q := by simp [q, t, charpolyRev, AlgHom.map_det, map_sub, smul_eq_diagonal_mul] suffices t_inv ^ Fintype.card n * p = invert q by apply toLaurent_injective rwa [toLaurent_reverse, ← coe_toLaurentAlg, hp, hq, ← involutive_invert.injective.eq_iff, map_mul, involutive_invert p, charpoly_natDegree_eq_dim, ← mul_one (Fintype.card n : ℤ), ← T_pow, map_pow, invert_T, mul_comm] rw [← det_smul, smul_sub, scalar_apply, ← diagonal_smul, Pi.smul_def, smul_eq_mul, ht, diagonal_one, invert.map_det] simp [t_inv, map_sub, map_one, map_mul, t, smul_eq_diagonal_mul] theorem charpoly_inv (A : Matrix n n R) (h : IsUnit A) : A⁻¹.charpoly = (-1) ^ Fintype.card n * C (Ring.inverse A.det) * A.charpolyRev := by have : Invertible A := h.invertible calc _ = (scalar n X - C.mapMatrix A⁻¹).det := rfl _ = C (A⁻¹ * A).det * (scalar n X - C.mapMatrix A⁻¹).det := by simp _ = C A⁻¹.det * C A.det * (scalar n X - C.mapMatrix A⁻¹).det := by rw [det_mul]; simp _ = C A⁻¹.det * (C A.det * (scalar n X - C.mapMatrix A⁻¹).det) := by ac_rfl _ = C A⁻¹.det * (C.mapMatrix A * (scalar n X - C.mapMatrix A⁻¹)).det := by simp [RingHom.map_det] _ = C A⁻¹.det * (C.mapMatrix A * scalar n X - 1).det := by rw [mul_sub, ← RingHom.map_mul]; simp _ = C A⁻¹.det * ((-1) ^ Fintype.card n * (1 - scalar n X * C.mapMatrix A).det) := by rw [← neg_sub, det_neg, det_one_sub_mul_comm] _ = _ := by simp [charpolyRev, smul_eq_diagonal_mul]; ac_rfl @[simp] lemma eval_charpolyRev : eval 0 M.charpolyRev = 1 := by rw [charpolyRev, ← coe_evalRingHom, RingHom.map_det, ← det_one (R := R) (n := n)] have : (1 - (X : R[X]) • M.map C).map (eval 0) = 1 := by ext i j; rcases eq_or_ne i j with hij | hij <;> simp [hij, one_apply] congr @[simp] lemma coeff_charpolyRev_eq_neg_trace (M : Matrix n n R) : coeff M.charpolyRev 1 = - trace M := by nontriviality R cases isEmpty_or_nonempty n · simp [charpolyRev, coeff_one] · simp [trace_eq_neg_charpoly_coeff M, ← M.reverse_charpoly, nextCoeff] lemma isUnit_charpolyRev_of_isNilpotent (hM : IsNilpotent M) : IsUnit M.charpolyRev := by obtain ⟨k, hk⟩ := hM replace hk : 1 - (X : R[X]) • M.map C ∣ 1 := by convert one_sub_dvd_one_sub_pow ((X : R[X]) • M.map C) k rw [← C.mapMatrix_apply, smul_pow, ← map_pow, hk, map_zero, smul_zero, sub_zero] apply isUnit_of_dvd_one rw [← det_one (R := R[X]) (n := n)] exact map_dvd detMonoidHom hk lemma isNilpotent_trace_of_isNilpotent (hM : IsNilpotent M) : IsNilpotent (trace M) := by cases isEmpty_or_nonempty n · simp suffices IsNilpotent (coeff (charpolyRev M) 1) by simpa using this exact (isUnit_iff_coeff_isUnit_isNilpotent.mp (isUnit_charpolyRev_of_isNilpotent hM)).2 _ one_ne_zero lemma isNilpotent_charpoly_sub_pow_of_isNilpotent (hM : IsNilpotent M) : IsNilpotent (M.charpoly - X ^ (Fintype.card n)) := by nontriviality R let p : R[X] := M.charpolyRev have hp : p - 1 = X * (p /ₘ X) := by conv_lhs => rw [← modByMonic_add_div p monic_X] simp [p, modByMonic_X] have : IsNilpotent (p /ₘ X) := (Polynomial.isUnit_iff'.mp (isUnit_charpolyRev_of_isNilpotent hM)).2 have aux : (M.charpoly - X ^ (Fintype.card n)).natDegree ≤ M.charpoly.natDegree := le_trans (natDegree_sub_le _ _) (by simp) rw [← isNilpotent_reflect_iff aux, reflect_sub, ← reverse, M.reverse_charpoly] simpa [p, hp] end reverse end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Disc.lean
import Mathlib.LinearAlgebra.Matrix.Charpoly.Coeff import Mathlib.RingTheory.Polynomial.Resultant.Basic /-! # The discriminant of a matrix -/ open Polynomial namespace Matrix variable {R n : Type*} [CommRing R] [Nontrivial R] [Fintype n] [DecidableEq n] /-- The discriminant of a matrix is defined to be the discriminant of its characteristic polynomial. -/ noncomputable def discr (A : Matrix n n R) : R := A.charpoly.discr lemma discr_of_card_eq_two (A : Matrix n n R) (hn : Fintype.card n = 2) : A.discr = A.trace ^ 2 - 4 * A.det := by rw [discr, Polynomial.discr_of_degree_eq_two (by simp; norm_cast)] simp [A.charpoly_of_card_eq_two hn] lemma discr_fin_two (A : Matrix (Fin 2) (Fin 2) R) : A.discr = A.trace ^ 2 - 4 * A.det := A.discr_of_card_eq_two <| Fintype.card_fin _ @[deprecated (since := "2025-10-20")] alias disc := discr @[deprecated (since := "2025-10-20")] alias disc_of_card_eq_two := discr_of_card_eq_two @[deprecated (since := "2025-10-20")] alias disc_fin_two := discr_fin_two end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Eigs.lean
import Mathlib.Algebra.Algebra.Spectrum.Basic import Mathlib.Algebra.Polynomial.Basic import Mathlib.FieldTheory.IsAlgClosed.Basic /-! # Eigenvalues are characteristic polynomial roots. In fields we show that: * `Matrix.mem_spectrum_iff_isRoot_charpoly`: the roots of the characteristic polynomial are exactly the spectrum of the matrix. * `Matrix.det_eq_prod_roots_charpoly_of_splits`: the determinant (in the field of the matrix) is the product of the roots of the characteristic polynomial if the polynomial splits in the field of the matrix. * `Matrix.trace_eq_sum_roots_charpoly_of_splits`: the trace is the sum of the roots of the characteristic polynomial if the polynomial splits in the field of the matrix. In an algebraically closed field we show that: * `Matrix.det_eq_prod_roots_charpoly`: the determinant is the product of the roots of the characteristic polynomial. * `Matrix.trace_eq_sum_roots_charpoly`: the trace is the sum of the roots of the characteristic polynomial. Note that over other fields such as `ℝ`, these results can be used by using `A.map (algebraMap ℝ ℂ)` as the matrix, and then applying `RingHom.map_det`. The two lemmas `Matrix.det_eq_prod_roots_charpoly` and `Matrix.trace_eq_sum_roots_charpoly` are more commonly stated as trace is the sum of eigenvalues and determinant is the product of eigenvalues. Mathlib has already defined eigenvalues in `LinearAlgebra.Eigenspace` as the roots of the minimal polynomial of a linear endomorphism. These do not have correct multiplicity and cannot be used in the theorems above. Hence we express these theorems in terms of the roots of the characteristic polynomial directly. ## TODO The proofs of `det_eq_prod_roots_charpoly_of_splits` and `trace_eq_sum_roots_charpoly_of_splits` closely resemble `norm_gen_eq_prod_roots` and `trace_gen_eq_sum_roots` respectively, but the dependencies are not general enough to unify them. We should refactor `Polynomial.coeff_zero_eq_prod_roots_of_monic_of_split` and `Polynomial.nextCoeff_eq_neg_sum_roots_of_monic_of_splits` to assume splitting over an arbitrary map. -/ variable {n : Type*} [Fintype n] [DecidableEq n] variable {R K : Type*} [CommRing R] [Field K] variable {A : Matrix n n K} {B : Matrix n n R} open Matrix Polynomial open scoped Matrix namespace Matrix /-- The roots of the characteristic polynomial are in the spectrum of the matrix. -/ theorem mem_spectrum_of_isRoot_charpoly [Nontrivial R] {r : R} (hr : IsRoot B.charpoly r) : r ∈ spectrum R B := by simp_all [eval_charpoly, spectrum.mem_iff, isUnit_iff_isUnit_det, algebraMap_eq_diagonal, Pi.algebraMap_def] /-- In fields, the roots of the characteristic polynomial are exactly the spectrum of the matrix. The weaker direction is true in nontrivial rings (see `Matrix.mem_spectrum_of_isRoot_charpoly`). -/ theorem mem_spectrum_iff_isRoot_charpoly {r : K} : r ∈ spectrum K A ↔ IsRoot A.charpoly r := by simp [eval_charpoly, spectrum.mem_iff, isUnit_iff_isUnit_det, algebraMap_eq_diagonal, Pi.algebraMap_def] theorem det_eq_prod_roots_charpoly_of_splits (hAps : A.charpoly.Splits (RingHom.id K)) : A.det = (Matrix.charpoly A).roots.prod := by rw [det_eq_sign_charpoly_coeff, ← charpoly_natDegree_eq_dim A, Polynomial.coeff_zero_eq_prod_roots_of_monic_of_splits A.charpoly_monic hAps, ← mul_assoc, ← pow_two, pow_right_comm, neg_one_sq, one_pow, one_mul] theorem trace_eq_sum_roots_charpoly_of_splits (hAps : A.charpoly.Splits (RingHom.id K)) : A.trace = (Matrix.charpoly A).roots.sum := by rcases isEmpty_or_nonempty n with h | _ · rw [Matrix.trace, Fintype.sum_empty, Matrix.charpoly, det_eq_one_of_card_eq_zero (Fintype.card_eq_zero_iff.2 h), Polynomial.roots_one, Multiset.empty_eq_zero, Multiset.sum_zero] · rw [trace_eq_neg_charpoly_nextCoeff, neg_eq_iff_eq_neg, ← Polynomial.nextCoeff_eq_neg_sum_roots_of_monic_of_splits A.charpoly_monic hAps] variable (A) theorem det_eq_prod_roots_charpoly [IsAlgClosed K] : A.det = (Matrix.charpoly A).roots.prod := det_eq_prod_roots_charpoly_of_splits (IsAlgClosed.splits A.charpoly) theorem trace_eq_sum_roots_charpoly [IsAlgClosed K] : A.trace = (Matrix.charpoly A).roots.sum := trace_eq_sum_roots_charpoly_of_splits (IsAlgClosed.splits A.charpoly) end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Charpoly/Minpoly.lean
import Mathlib.LinearAlgebra.Matrix.Charpoly.Coeff import Mathlib.LinearAlgebra.Matrix.ToLin import Mathlib.RingTheory.PowerBasis /-! # The minimal polynomial divides the characteristic polynomial of a matrix. This also includes some miscellaneous results about `minpoly` on matrices. -/ noncomputable section open Matrix Module Polynomial universe u v w variable {R : Type u} [CommRing R] variable {n : Type v} [DecidableEq n] [Fintype n] variable {N : Type w} [AddCommGroup N] [Module R N] namespace Matrix variable (M : Matrix n n R) @[simp] theorem minpoly_toLin' : minpoly R (toLin' M) = minpoly R M := minpoly.algEquiv_eq (toLinAlgEquiv' : Matrix n n R ≃ₐ[R] _) M @[simp] theorem minpoly_toLin (b : Basis n R N) (M : Matrix n n R) : minpoly R (toLin b b M) = minpoly R M := minpoly.algEquiv_eq (toLinAlgEquiv b : Matrix n n R ≃ₐ[R] _) M theorem isIntegral : IsIntegral R M := ⟨M.charpoly, ⟨charpoly_monic M, aeval_self_charpoly M⟩⟩ theorem minpoly_dvd_charpoly {K : Type*} [Field K] (M : Matrix n n K) : minpoly K M ∣ M.charpoly := minpoly.dvd _ _ (aeval_self_charpoly M) end Matrix namespace LinearMap @[simp] theorem minpoly_toMatrix' (f : (n → R) →ₗ[R] n → R) : minpoly R (toMatrix' f) = minpoly R f := minpoly.algEquiv_eq (toMatrixAlgEquiv' : _ ≃ₐ[R] Matrix n n R) f @[simp] theorem minpoly_toMatrix (b : Basis n R N) (f : N →ₗ[R] N) : minpoly R (toMatrix b b f) = minpoly R f := minpoly.algEquiv_eq (toMatrixAlgEquiv b : _ ≃ₐ[R] Matrix n n R) f end LinearMap section PowerBasis open Algebra /-- The characteristic polynomial of the map `fun x => a * x` is the minimal polynomial of `a`. In combination with `det_eq_sign_charpoly_coeff` or `trace_eq_neg_charpoly_coeff` and a bit of rewriting, this will allow us to conclude the field norm resp. trace of `x` is the product resp. sum of `x`'s conjugates. -/ theorem charpoly_leftMulMatrix {S : Type*} [Ring S] [Algebra R S] (h : PowerBasis R S) : (leftMulMatrix h.basis h.gen).charpoly = minpoly R h.gen := by cases subsingleton_or_nontrivial R; · subsingleton apply minpoly.unique' R h.gen (charpoly_monic _) · apply (injective_iff_map_eq_zero (G := S) (leftMulMatrix _)).mp (leftMulMatrix_injective h.basis) rw [← Polynomial.aeval_algHom_apply, aeval_self_charpoly] refine fun q hq => or_iff_not_imp_left.2 fun h0 => ?_ rw [Matrix.charpoly_degree_eq_dim, Fintype.card_fin] at hq contrapose! hq; exact h.dim_le_degree_of_root h0 hq end PowerBasis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/GeneralLinearGroup/Card.lean
import Mathlib.FieldTheory.Finiteness import Mathlib.LinearAlgebra.Matrix.GeneralLinearGroup.Defs import Mathlib.LinearAlgebra.Matrix.Rank /-! # Cardinal of the general linear group over finite rings This file computes the cardinal of the general linear group over finite rings. ## Main statements * `card_linearIndependent` gives the cardinal of the set of linearly independent vectors over a finite-dimensional vector space over a finite field. * `Matrix.card_GL_field` gives the cardinal of the general linear group over a finite field. -/ open LinearMap Module section LinearIndependent variable {K V : Type*} [DivisionRing K] [AddCommGroup V] [Module K V] variable [Fintype K] [Finite V] local notation "q" => Fintype.card K local notation "n" => Module.finrank K V attribute [local instance] Fintype.ofFinite in open Fintype in /-- The cardinal of the set of linearly independent vectors over a finite-dimensional vector space over a finite field. -/ theorem card_linearIndependent {k : ℕ} (hk : k ≤ n) : Nat.card { s : Fin k → V // LinearIndependent K s } = ∏ i : Fin k, (q ^ n - q ^ i.val) := by rw [Nat.card_eq_fintype_card] induction k with | zero => simp only [linearIndependent_iff_ker, Finsupp.linearCombination_fin_zero, ker_zero, card_ofSubsingleton, Finset.univ_eq_empty, Finset.prod_empty] | succ k ih => have (s : { s : Fin k → V // LinearIndependent K s }) : card ((Submodule.span K (Set.range (s : Fin k → V)))ᶜ : Set (V)) = (q) ^ n - (q) ^ k := by rw [card_compl_set, Module.card_eq_pow_finrank (K := K) (V := ((Submodule.span K (Set.range (s : Fin k → V))) : Set (V)))] simp only [SetLike.coe_sort_coe, finrank_span_eq_card s.2, card_fin] rw [Module.card_eq_pow_finrank (K := K)] simp [card_congr (equiv_linearIndependent k), sum_congr _ _ this, ih (Nat.le_of_succ_le hk), mul_comm, Fin.prod_univ_succAbove _ (Fin.last k)] end LinearIndependent namespace Matrix section field variable {𝔽 : Type*} [Field 𝔽] [Fintype 𝔽] local notation "q" => Fintype.card 𝔽 variable (n : ℕ) /-- Equivalence between `GL n F` and `n` vectors of length `n` that are linearly independent. Given by sending a matrix to its columns. -/ noncomputable def equiv_GL_linearindependent : GL (Fin n) 𝔽 ≃ { s : Fin n → Fin n → 𝔽 // LinearIndependent 𝔽 s } where toFun M := ⟨M.1.col, by apply linearIndependent_iff_card_eq_finrank_span.2 rw [Set.finrank, ← rank_eq_finrank_span_cols, rank_unit]⟩ invFun M := GeneralLinearGroup.mk'' (transpose (M.1)) <| by classical let b := basisOfPiSpaceOfLinearIndependent M.2 have := (Pi.basisFun 𝔽 (Fin n)).invertibleToMatrix b rw [← Basis.coePiBasisFun.toMatrix_eq_transpose, ← coe_basisOfPiSpaceOfLinearIndependent M.2] exact isUnit_det_of_invertible _ right_inv := by exact congrFun rfl /-- The cardinal of the general linear group over a finite field. -/ theorem card_GL_field : Nat.card (GL (Fin n) 𝔽) = ∏ i : (Fin n), (q ^ n - q ^ (i : ℕ)) := by rw [Nat.card_congr (equiv_GL_linearindependent n), card_linearIndependent, Module.finrank_fintype_fun_eq_card, Fintype.card_fin] simp only [Module.finrank_fintype_fun_eq_card, Fintype.card_fin, le_refl] end field end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/GeneralLinearGroup/Basic.lean
import Mathlib.LinearAlgebra.Matrix.GeneralLinearGroup.Defs /-! # Basic lemmas about the general linear group $GL(n, R)$ This file lists various basic lemmas about the general linear group $GL(n, R)$. For the definitions, see `LinearAlgebra/Matrix/GeneralLinearGroup/Defs.lean`. -/ namespace Matrix section Examples /-- The matrix [a, -b; b, a] (inspired by multiplication by a complex number); it is an element of $GL_2(R)$ if `a ^ 2 + b ^ 2` is nonzero. -/ @[simps! -fullyApplied val] def planeConformalMatrix {R} [Field R] (a b : R) (hab : a ^ 2 + b ^ 2 ≠ 0) : Matrix.GeneralLinearGroup (Fin 2) R := GeneralLinearGroup.mkOfDetNeZero !![a, -b; b, a] (by simpa [det_fin_two, sq] using hab) /- TODO: Add Iwasawa matrices `n_x=!![1,x; 0,1]`, `a_t=!![exp(t/2),0;0,exp(-t/2)]` and `k_θ=!![cos θ, sin θ; -sin θ, cos θ]` -/ end Examples namespace GeneralLinearGroup section Center variable {R n : Type*} [Fintype n] [DecidableEq n] [CommRing R] /-- The center of `GL n R` consists of scalar matrices. -/ lemma mem_center_iff_val_eq_scalar {g : GL n R} : g ∈ Subgroup.center (GL n R) ↔ g.val ∈ Set.range (scalar _) := by rcases isEmpty_or_nonempty n · simpa [Subsingleton.elim (Subgroup.center _) ⊤] using ⟨1, Subsingleton.elim _ _⟩ constructor · intro hg refine Matrix.mem_range_scalar_of_commute_transvectionStruct fun t ↦ ?_ simpa [Units.ext_iff] using Subgroup.mem_center_iff.mp hg (.mk _ _ t.mul_inv t.inv_mul) · refine fun ⟨a, ha⟩ ↦ Subgroup.mem_center_iff.mpr fun h ↦ ?_ simpa [Units.ext_iff, ← ha] using (scalar_commute a (mul_comm a ·) h.val).symm /-- The center of `GL n R` is the image of `Rˣ`. -/ lemma center_eq_range_units : Subgroup.center (GL n R) = (Units.map (algebraMap R _).toMonoidHom).range := by ext g -- eliminate tedious case `n = ∅` rcases isEmpty_or_nonempty n · simpa [Subsingleton.elim (Subgroup.center _) ⊤] using ⟨1, Subsingleton.elim _ _⟩ constructor · -- previous lemma shows the underlying matrix is scalar, but now need to show -- the scalar is a unit; so we apply argument both to `g` and `g⁻¹` intro hg obtain ⟨a, ha⟩ := mem_center_iff_val_eq_scalar.mp hg obtain ⟨b, hb⟩ := mem_center_iff_val_eq_scalar.mp (Subgroup.inv_mem _ hg) have hab : a * b = 1 := by simpa [-mul_inv_cancel, ← ha, ← hb, ← diagonal_one, Units.ext_iff] using mul_inv_cancel g refine ⟨⟨a, b, hab, mul_comm a b ▸ hab⟩, ?_⟩ simp [Units.ext_iff, ← ha, algebraMap_eq_diagonal] · rintro ⟨a, rfl⟩ exact mem_center_iff_val_eq_scalar.mpr ⟨a, rfl⟩ end Center end GeneralLinearGroup end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/GeneralLinearGroup/Defs.lean
import Mathlib.LinearAlgebra.Matrix.NonsingularInverse import Mathlib.LinearAlgebra.Matrix.SpecialLinearGroup import Mathlib.LinearAlgebra.GeneralLinearGroup import Mathlib.Algebra.Ring.Subring.Units /-! # The General Linear group $GL(n, R)$ This file defines the elements of the General Linear group `Matrix.GeneralLinearGroup n R`, consisting of all invertible `n` by `n` `R`-matrices. ## Main definitions * `Matrix.GeneralLinearGroup` is the type of matrices over R which are units in the matrix ring. * `Matrix.GLPos` gives the subgroup of matrices with positive determinant (over a linear ordered ring). ## Tags matrix group, group, matrix inverse -/ namespace Matrix universe u v open Matrix open LinearMap /-- `GL n R` is the group of `n` by `n` `R`-matrices with unit determinant. Defined as a subtype of matrices -/ abbrev GeneralLinearGroup (n : Type u) (R : Type v) [DecidableEq n] [Fintype n] [Semiring R] : Type _ := (Matrix n n R)ˣ @[inherit_doc] notation "GL" => GeneralLinearGroup namespace GeneralLinearGroup variable {n : Type u} [DecidableEq n] [Fintype n] {R : Type v} section CoeFnInstance instance instCoeFun [Semiring R] : CoeFun (GL n R) fun _ => n → n → R where coe A := (A : Matrix n n R) end CoeFnInstance variable [CommRing R] /-- The determinant of a unit matrix is itself a unit. -/ @[simps] def det : GL n R →* Rˣ where toFun A := { val := (↑A : Matrix n n R).det inv := (↑A⁻¹ : Matrix n n R).det val_inv := by rw [← det_mul, A.mul_inv, det_one] inv_val := by rw [← det_mul, A.inv_mul, det_one] } map_one' := Units.ext det_one map_mul' _ _ := Units.ext <| det_mul _ _ lemma det_ne_zero [Nontrivial R] (g : GL n R) : g.val.det ≠ 0 := g.det.ne_zero /-- The groups `GL n R` (notation for `Matrix.GeneralLinearGroup n R`) and `LinearMap.GeneralLinearGroup R (n → R)` are multiplicatively equivalent -/ def toLin : GL n R ≃* LinearMap.GeneralLinearGroup R (n → R) := Units.mapEquiv toLinAlgEquiv'.toMulEquiv /-- The isomorphism from `GL n R` to the general linear group of a module associated with a basis. -/ noncomputable def toLin' {V : Type*} [AddCommGroup V] [Module R V] (b : Module.Basis n R V) : GL n R ≃* LinearMap.GeneralLinearGroup R V := toLin.trans <| LinearMap.GeneralLinearGroup.congrLinearEquiv b.equivFun.symm lemma toLin'_apply {V : Type*} [AddCommGroup V] [Module R V] (b : Module.Basis n R V) (M : GL n R) (v : V) : (toLin' b M).toLinearEquiv v = Fintype.linearCombination R ⇑b (↑M *ᵥ (b.repr v)) := by simp [toLin', toLin, Fintype.linearCombination_apply, MulEquiv.trans_apply] /-- Given a matrix with invertible determinant, we get an element of `GL n R`. -/ @[simps! val] def mk' (A : Matrix n n R) (_ : Invertible (Matrix.det A)) : GL n R := unitOfDetInvertible A /-- Given a matrix with unit determinant, we get an element of `GL n R`. -/ @[simps! val] noncomputable def mk'' (A : Matrix n n R) (h : IsUnit (Matrix.det A)) : GL n R := nonsingInvUnit A h /-- Given a matrix with non-zero determinant over a field, we get an element of `GL n K`. -/ @[simps! val] def mkOfDetNeZero {K : Type*} [Field K] (A : Matrix n n K) (h : Matrix.det A ≠ 0) : GL n K := mk' A (invertibleOfNonzero h) theorem ext_iff (A B : GL n R) : A = B ↔ ∀ i j, (A : Matrix n n R) i j = (B : Matrix n n R) i j := Units.ext_iff.trans Matrix.ext_iff.symm /-- Not marked `@[ext]` as the `ext` tactic already solves this. -/ theorem ext ⦃A B : GL n R⦄ (h : ∀ i j, (A : Matrix n n R) i j = (B : Matrix n n R) i j) : A = B := Units.ext <| Matrix.ext h section CoeLemmas variable (A B : GL n R) @[simp] theorem coe_mul : ↑(A * B) = (↑A : Matrix n n R) * (↑B : Matrix n n R) := rfl @[simp] theorem coe_one : ↑(1 : GL n R) = (1 : Matrix n n R) := rfl theorem coe_inv : ↑A⁻¹ = (↑A : Matrix n n R)⁻¹ := letI := A.invertible invOf_eq_nonsing_inv (↑A : Matrix n n R) @[simp] theorem coe_toLin : (toLin A : (n → R) →ₗ[R] n → R) = Matrix.mulVecLin A := rfl @[simp] theorem toLin_apply (v : n → R) : (toLin A : _ → n → R) v = Matrix.mulVecLin A v := rfl end CoeLemmas variable {S T : Type*} [CommRing S] [CommRing T] /-- A ring homomorphism ``f : R →+* S`` induces a homomorphism ``GLₙ(f) : GLₙ(R) →* GLₙ(S)``. -/ @[simps! apply_val] def map (f : R →+* S) : GL n R →* GL n S := Units.map <| (RingHom.mapMatrix f).toMonoidHom @[simp] theorem map_id : map (RingHom.id R) = MonoidHom.id (GL n R) := rfl @[simp] protected lemma map_apply (f : R →+* S) (i j : n) (g : GL n R) : map f g i j = f (g i j) := by rfl @[simp] theorem map_comp (f : T →+* R) (g : R →+* S) : map (g.comp f) = (map g).comp (map (n := n) f) := rfl @[simp] theorem map_comp_apply (f : T →+* R) (g : R →+* S) (x : GL n T) : (map g).comp (map f) x = map g (map f x) := rfl variable (f : R →+* S) @[simp] protected lemma map_one : map f (1 : GL n R) = 1 := by simp only [map_one] protected lemma map_mul (g h : GL n R) : map f (g * h) = map f g * map f h := by simp only [map_mul] protected lemma map_inv (g : GL n R) : map f g⁻¹ = (map f g)⁻¹ := by simp only [map_inv] protected lemma map_det (g : GL n R) : Matrix.GeneralLinearGroup.det (map f g) = Units.map f (Matrix.GeneralLinearGroup.det g) := by ext simp only [map, Matrix.GeneralLinearGroup.val_det_apply, Units.coe_map, MonoidHom.coe_coe] exact Eq.symm (RingHom.map_det f g.1) lemma map_mul_map_inv (g : GL n R) : map f g * map f g⁻¹ = 1 := by simp only [map_inv, mul_inv_cancel] lemma map_inv_mul_map (g : GL n R) : map f g⁻¹ * map f g = 1 := by simp only [map_inv, inv_mul_cancel] @[simp] lemma coe_map_mul_map_inv (g : GL n R) : g.val.map f * g.val⁻¹.map f = 1 := by rw [← Matrix.map_mul] simp only [isUnits_det_units, mul_nonsing_inv, map_zero, map_one, Matrix.map_one] @[simp] lemma coe_map_inv_mul_map (g : GL n R) : g.val⁻¹.map f * g.val.map f = 1 := by rw [← Matrix.map_mul] simp only [isUnits_det_units, nonsing_inv_mul, map_zero, map_one, Matrix.map_one] section kronecker variable {R m : Type*} [CommSemiring R] [Fintype m] [DecidableEq m] open scoped Kronecker /-- The invertible kronecker matrix of invertible matrices. -/ protected def kronecker (x : GL n R) (y : GL m R) : GL (n × m) R where val := x ⊗ₖ y inv := ↑x⁻¹ ⊗ₖ ↑y⁻¹ val_inv := by simp only [← mul_kronecker_mul, Units.mul_inv, one_kronecker_one] inv_val := by simp only [← mul_kronecker_mul, Units.inv_mul, one_kronecker_one] theorem _root_.Matrix.IsUnit.kronecker {x : Matrix n n R} {y : Matrix m m R} (hx : IsUnit x) (hy : IsUnit y) : IsUnit (x ⊗ₖ y) := GeneralLinearGroup.kronecker hx.unit hy.unit |>.isUnit end kronecker end GeneralLinearGroup namespace SpecialLinearGroup variable {n : Type u} [DecidableEq n] [Fintype n] {R : Type v} [CommRing R] {S : Type*} [CommRing S] [Algebra R S] /-- `toGL` is the map from the special linear group to the general linear group. -/ def toGL : Matrix.SpecialLinearGroup n R →* Matrix.GeneralLinearGroup n R where toFun A := ⟨↑A, ↑A⁻¹, congr_arg (·.1) (mul_inv_cancel A), congr_arg (·.1) (inv_mul_cancel A)⟩ map_one' := Units.ext rfl map_mul' _ _ := Units.ext rfl instance hasCoeToGeneralLinearGroup : Coe (SpecialLinearGroup n R) (GL n R) := ⟨toGL⟩ lemma toGL_injective : Function.Injective (toGL : SpecialLinearGroup n R → GL n R) := fun g g' ↦ by simpa [toGL] using fun h _ ↦ Subtype.ext h @[simp] lemma toGL_inj (g g' : SpecialLinearGroup n R) : (g : GeneralLinearGroup n R) = g' ↔ g = g' := toGL_injective.eq_iff @[simp] theorem coeToGL_det (g : SpecialLinearGroup n R) : Matrix.GeneralLinearGroup.det (g : GL n R) = 1 := Units.ext g.prop @[simp] lemma coe_GL_coe_matrix (g : SpecialLinearGroup n R) : ((toGL g) : Matrix n n R) = g := rfl variable (S) in /-- `mapGL` is the map from the special linear group over `R` to the general linear group over `S`, where `S` is an `R`-algebra. -/ def mapGL : Matrix.SpecialLinearGroup n R →* Matrix.GeneralLinearGroup n S := toGL.comp (map (algebraMap R S)) @[simp] lemma mapGL_inj [FaithfulSMul R S] (g g' : SpecialLinearGroup n R) : mapGL S g = mapGL S g' ↔ g = g' := by refine ⟨fun h ↦ ?_, by tauto⟩ apply SpecialLinearGroup.ext simpa [mapGL, toGL_inj, ext_iff, (FaithfulSMul.algebraMap_injective R S).eq_iff] using h lemma mapGL_injective [FaithfulSMul R S] : Function.Injective (mapGL (R := R) (n := n) S) := fun a b ↦ by simp @[simp] lemma mapGL_coe_matrix (g : SpecialLinearGroup n R) : ((mapGL S g) : Matrix n n S) = g.map (algebraMap R S) := rfl @[simp] lemma map_mapGL {T : Type*} [CommRing T] [Algebra R T] [Algebra S T] [IsScalarTower R S T] (g : SpecialLinearGroup n R) : (mapGL S g).map (algebraMap S T) = mapGL T g := by ext simp [IsScalarTower.algebraMap_apply R S T] @[simp] lemma det_mapGL (g : SpecialLinearGroup n R) : (mapGL S g).det = 1 := by simp [mapGL] end SpecialLinearGroup section variable {n : Type u} {R : Type v} [DecidableEq n] [Fintype n] [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] section variable (n R) /-- This is the subgroup of `nxn` matrices with entries over a linear ordered ring and positive determinant. -/ def GLPos : Subgroup (GL n R) := (Units.posSubgroup R).comap GeneralLinearGroup.det @[inherit_doc] scoped[MatrixGroups] notation "GL(" n ", " R ")" "⁺" => GLPos (Fin n) R end @[simp] theorem mem_glpos (A : GL n R) : A ∈ GLPos n R ↔ 0 < (Matrix.GeneralLinearGroup.det A : R) := Iff.rfl theorem GLPos.det_ne_zero (A : GLPos n R) : ((A : GL n R) : Matrix n n R).det ≠ 0 := ne_of_gt A.prop end section Neg variable {n : Type u} {R : Type v} [DecidableEq n] [Fintype n] [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] [Fact (Even (Fintype.card n))] /-- Formal operation of negation on general linear group on even cardinality `n` given by negating each element. -/ instance : Neg (GLPos n R) := ⟨fun g => ⟨-g, by rw [mem_glpos, GeneralLinearGroup.val_det_apply, Units.val_neg, det_neg, (Fact.out (p := Even <| Fintype.card n)).neg_one_pow, one_mul] exact g.prop⟩⟩ @[simp] theorem GLPos.coe_neg_GL (g : GLPos n R) : ↑(-g) = -(g : GL n R) := rfl @[simp] theorem GLPos.coe_neg (g : GLPos n R) : (↑(-g) : GL n R) = -((g : GL n R) : Matrix n n R) := rfl @[simp] theorem GLPos.coe_neg_apply (g : GLPos n R) (i j : n) : ((↑(-g) : GL n R) : Matrix n n R) i j = -((g : GL n R) : Matrix n n R) i j := rfl instance : HasDistribNeg (GLPos n R) := Subtype.coe_injective.hasDistribNeg _ GLPos.coe_neg_GL (GLPos n R).coe_mul end Neg namespace SpecialLinearGroup variable {n : Type u} [DecidableEq n] [Fintype n] {R : Type v} [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] /-- `Matrix.SpecialLinearGroup n R` embeds into `GL_pos n R` -/ def toGLPos : SpecialLinearGroup n R →* GLPos n R where toFun A := ⟨(A : GL n R), show 0 < (↑A : Matrix n n R).det from A.prop.symm ▸ zero_lt_one⟩ map_one' := Subtype.ext <| Units.ext <| rfl map_mul' _ _ := Subtype.ext <| Units.ext <| rfl instance : Coe (SpecialLinearGroup n R) (GLPos n R) := ⟨toGLPos⟩ theorem toGLPos_injective : Function.Injective (toGLPos : SpecialLinearGroup n R → GLPos n R) := -- Porting note: had to rewrite this to hint the correct types to Lean -- (It can't find the coercion GLPos n R → Matrix n n R) Function.Injective.of_comp (f := fun (A : GLPos n R) ↦ ((A : GL n R) : Matrix n n R)) Subtype.coe_injective /-- Coercing a `Matrix.SpecialLinearGroup` via `GL_pos` and `GL` is the same as coercing straight to a matrix. -/ @[simp] theorem coe_GLPos_coe_GL_coe_matrix (g : SpecialLinearGroup n R) : (↑(↑(↑g : GLPos n R) : GL n R) : Matrix n n R) = ↑g := rfl @[simp] theorem coe_to_GLPos_to_GL_det (g : SpecialLinearGroup n R) : Matrix.GeneralLinearGroup.det ((g : GLPos n R) : GL n R) = 1 := Units.ext g.prop variable [Fact (Even (Fintype.card n))] @[norm_cast] theorem coe_GLPos_neg (g : SpecialLinearGroup n R) : ↑(-g) = -(↑g : GLPos n R) := Subtype.ext <| Units.ext rfl end SpecialLinearGroup end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/GeneralLinearGroup/FinTwo.lean
import Mathlib.Algebra.Group.AddChar import Mathlib.LinearAlgebra.Matrix.Charpoly.Disc import Mathlib.LinearAlgebra.Matrix.GeneralLinearGroup.Defs /-! # The group `GL (Fin 2) R` -/ open Polynomial namespace Matrix section CommRing variable {R : Type*} [CommRing R] [Nontrivial R] (m : Matrix (Fin 2) (Fin 2) R) (g : GL (Fin 2) R) /-- A `2 × 2` matrix is *parabolic* if it is non-scalar and its discriminant is 0. -/ def IsParabolic : Prop := m ∉ Set.range (scalar _) ∧ m.discr = 0 variable {m} section conjugation @[simp] lemma discr_conj : (g.val * m * g.val⁻¹).discr = m.discr := by simp only [discr_fin_two, ← Matrix.coe_units_inv, trace_units_conj, det_units_conj] @[simp] lemma discr_conj' : (g.val⁻¹ * m * g.val).discr = m.discr := by simpa using discr_conj g⁻¹ @[deprecated (since := "2025-10-20")] alias disc_conj := discr_conj @[deprecated (since := "2025-10-20")] alias disc_conj' := discr_conj' @[simp] lemma isParabolic_conj_iff : (g.val * m * g.val⁻¹).IsParabolic ↔ IsParabolic m := by simp_rw [IsParabolic, discr_conj, Set.mem_range, ← Matrix.coe_units_inv, Units.eq_mul_inv_iff_mul_eq, scalar_apply, ← smul_eq_diagonal_mul, smul_eq_mul_diagonal, Units.mul_right_inj] @[simp] lemma isParabolic_conj'_iff : (g.val⁻¹ * m * g.val).IsParabolic ↔ m.IsParabolic := by simpa using isParabolic_conj_iff g⁻¹ end conjugation lemma isParabolic_iff_of_upperTriangular [IsReduced R] (hm : m 1 0 = 0) : m.IsParabolic ↔ m 0 0 = m 1 1 ∧ m 0 1 ≠ 0 := by rw [IsParabolic] have aux : m.discr = 0 ↔ m 0 0 = m 1 1 := by suffices m.discr = (m 0 0 - m 1 1) ^ 2 by rw [this, IsReduced.pow_eq_zero_iff two_ne_zero, sub_eq_zero] grind [disc_fin_two, trace_fin_two, det_fin_two] have (h : m 0 0 = m 1 1) : m ∈ Set.range (scalar _) ↔ m 0 1 = 0 := by constructor · rintro ⟨a, rfl⟩ simp · intro h' use m 1 1 ext i j fin_cases i <;> fin_cases j <;> simp [h, h', hm] tauto end CommRing section Field variable {K : Type*} [Field K] {m : Matrix (Fin 2) (Fin 2) K} lemma sub_scalar_sq_eq_discr [NeZero (2 : K)] : (m - scalar _ (m.trace / 2)) ^ 2 = scalar _ (m.discr / 4) := by simp only [scalar_apply, trace_fin_two, discr_fin_two, trace_fin_two, det_fin_two, sq, (by norm_num : (4 : K) = 2 * 2)] ext i j fin_cases i <;> fin_cases j <;> · simp [Matrix.mul_apply] field @[deprecated (since := "2025-10-20")] alias sub_scalar_sq_eq_disc := sub_scalar_sq_eq_discr variable (m) in /-- The unique eigenvalue of a parabolic matrix (junk if `m` is not parabolic). -/ def parabolicEigenvalue : K := m.trace / 2 lemma IsParabolic.sub_eigenvalue_sq_eq_zero [NeZero (2 : K)] (hm : m.IsParabolic) : (m - scalar _ m.parabolicEigenvalue) ^ 2 = 0 := by simp [parabolicEigenvalue, -scalar_apply, sub_scalar_sq_eq_discr, hm.2] /-- Characterization of parabolic elements: they have the form `a + m` where `a` is scalar and `m` is nonzero and nilpotent. -/ lemma isParabolic_iff_exists [NeZero (2 : K)] : m.IsParabolic ↔ ∃ a n, m = scalar _ a + n ∧ n ≠ 0 ∧ n ^ 2 = 0 := by constructor · exact fun hm ↦ ⟨_, _, (add_sub_cancel ..).symm, sub_ne_zero.mpr fun h ↦ hm.1 ⟨_, h.symm⟩, hm.sub_eigenvalue_sq_eq_zero⟩ · rintro ⟨a, n, hm, hn0, hnsq⟩ constructor · refine fun ⟨b, hb⟩ ↦ hn0 ?_ rw [← sub_eq_iff_eq_add'] at hm simpa only [← hm, ← hb, ← map_sub, ← map_pow, ← map_zero (scalar (Fin 2)), scalar_inj, sq_eq_zero_iff] using hnsq · suffices scalar (Fin 2) (m.discr / 4) = 0 by rw [← map_zero (scalar (Fin 2)), scalar_inj, div_eq_zero_iff] at this have : (4 : K) ≠ 0 := by simpa [show (4 : K) = 2 ^ 2 by norm_num] using NeZero.ne _ tauto rw [← sub_scalar_sq_eq_discr, hm, trace_add, scalar_apply, trace_diagonal] simp [mul_div_cancel_left₀ _ (NeZero.ne (2 : K)), (Matrix.isNilpotent_trace_of_isNilpotent ⟨2, hnsq⟩).eq_zero , hnsq] end Field section LinearOrderedRing variable {R : Type*} [CommRing R] [Nontrivial R] [Preorder R] (m : Matrix (Fin 2) (Fin 2) R) (g : GL (Fin 2) R) /-- A `2 × 2` matrix is *hyperbolic* if its discriminant is strictly positive. -/ def IsHyperbolic : Prop := 0 < m.discr /-- A `2 × 2` matrix is *elliptic* if its discriminant is strictly negative. -/ def IsElliptic : Prop := m.discr < 0 variable {m} lemma isHyperbolic_conj_iff : (g.val * m * g.val⁻¹).IsHyperbolic ↔ m.IsHyperbolic := by simp [IsHyperbolic] lemma isHyperbolic_conj'_iff : (g.val⁻¹ * m * g.val).IsHyperbolic ↔ m.IsHyperbolic := by simpa using isHyperbolic_conj_iff g⁻¹ lemma isElliptic_conj_iff : (g.val * m * g.val⁻¹).IsElliptic ↔ m.IsElliptic := by simp [IsElliptic] lemma isElliptic_conj'_iff : (g.val⁻¹ * m * g.val).IsElliptic ↔ m.IsElliptic := by simpa using isElliptic_conj_iff g⁻¹ end LinearOrderedRing namespace GeneralLinearGroup section Ring variable {R : Type*} [Ring R] /-- The map sending `x` to `[1, x; 0, 1]` (bundled as an `AddChar`). -/ @[simps apply] def upperRightHom : AddChar R (GL (Fin 2) R) where toFun x := ⟨!![1, x; 0, 1], !![1, -x; 0, 1], by simp [one_fin_two], by simp [one_fin_two]⟩ map_zero_eq_one' := by simp [Units.ext_iff, one_fin_two] map_add_eq_mul' a b := by simp [Units.ext_iff, add_comm] lemma injective_upperRightHom : Function.Injective (upperRightHom (R := R)) := by refine (injective_iff_map_eq_zero (upperRightHom (R := R)).toAddMonoidHom).mpr ?_ simp [Units.ext_iff, one_fin_two] end Ring variable {R K : Type*} [CommRing R] [Field K] /-- Synonym of `Matrix.IsParabolic`, for dot-notation. -/ abbrev IsParabolic (g : GL (Fin 2) R) : Prop := g.val.IsParabolic @[simp] lemma isParabolic_conj_iff [Nontrivial R] (g h : GL (Fin 2) R) : IsParabolic (g * h * g⁻¹) ↔ IsParabolic h := by simp [IsParabolic] @[simp] lemma isParabolic_conj_iff' [Nontrivial R] (g h : GL (Fin 2) R) : IsParabolic (g⁻¹ * h * g) ↔ IsParabolic h := by simp [IsParabolic] /-- Synonym of `Matrix.IsElliptic`, for dot-notation. -/ abbrev IsElliptic [Preorder R] (g : GL (Fin 2) R) : Prop := g.val.IsElliptic /-- Synonym of `Matrix.IsHyperbolic`, for dot-notation. -/ abbrev IsHyperbolic [Preorder R] (g : GL (Fin 2) R) : Prop := g.val.IsHyperbolic /-- Polynomial whose roots are the fixed points of `g` considered as a Möbius transformation. See `Matrix.GeneralLinearGroup.fixpointPolynomial_aeval_eq_zero_iff`. -/ noncomputable def fixpointPolynomial (g : GL (Fin 2) R) : R[X] := C (g 1 0) * X ^ 2 + C (g 1 1 - g 0 0) * X - C (g 0 1) /-- The fixed-point polynomial is identically zero iff `g` is scalar. -/ lemma fixpointPolynomial_eq_zero_iff {g : GL (Fin 2) R} : g.fixpointPolynomial = 0 ↔ g.val ∈ Set.range (scalar _) := by rw [fixpointPolynomial] constructor · refine fun hP ↦ ⟨g 0 0, ?_⟩ have hb : g 0 1 = 0 := by simpa using congr_arg (coeff · 0) hP have hc : g 1 0 = 0 := by simpa using congr_arg (coeff · 2) hP have hd : g 1 1 = g 0 0 := by simpa [sub_eq_zero] using congr_arg (coeff · 1) hP ext i j fin_cases i <;> fin_cases j <;> simp [hb, hc, hd] · rintro ⟨a, ha⟩ simp [← ha] lemma parabolicEigenvalue_ne_zero {g : GL (Fin 2) K} [NeZero (2 : K)] (hg : IsParabolic g) : g.val.parabolicEigenvalue ≠ 0 := by have : g.val.trace ^ 2 = 4 * g.val.det := by simpa [sub_eq_zero, discr_fin_two] using hg.2 rw [parabolicEigenvalue, div_ne_zero_iff, eq_true_intro (two_ne_zero' K), and_true, Ne, ← sq_eq_zero_iff, this, show (4 : K) = 2 ^ 2 by norm_num, mul_eq_zero, sq_eq_zero_iff, not_or] exact ⟨NeZero.ne _, g.det_ne_zero⟩ /-- A non-zero power of a parabolic element is parabolic. -/ lemma IsParabolic.pow {g : GL (Fin 2) K} (hg : IsParabolic g) [CharZero K] {n : ℕ} (hn : n ≠ 0) : IsParabolic (g ^ n) := by rw [IsParabolic, isParabolic_iff_exists] at hg ⊢ obtain ⟨a, m, hg, hm0, hmsq⟩ := hg refine ⟨a ^ n, (n * a ^ (n - 1)) • m, ?_, ?_, by simp [smul_pow, hmsq]⟩ · rw [Units.val_pow_eq_pow_val, hg] rw [← Nat.one_le_iff_ne_zero] at hn induction n, hn using Nat.le_induction with | base => simp | succ n hn IH => simp only [pow_succ, IH, add_mul, Nat.add_sub_cancel, mul_add, ← map_mul, add_assoc] simp only [scalar_apply, ← smul_eq_mul_diagonal, ← MulAction.mul_smul, ← smul_eq_diagonal_mul, smul_mul, ← sq, hmsq, smul_zero, add_zero, ← add_smul, Nat.cast_add_one, add_mul, one_mul] rw [(by cutsat : n = n - 1 + 1), pow_succ, (by cutsat : n - 1 + 1 = n)] ring_nf · suffices a ≠ 0 by simp [this, hm0, hn] refine fun ha ↦ (g ^ 2).det_ne_zero ?_ rw [ha, map_zero, zero_add] at hg rw [← hg] at hmsq rw [Units.val_pow_eq_pow_val, hmsq, det_zero ⟨0⟩] lemma isParabolic_iff_of_upperTriangular {g : GL (Fin 2) K} (hg : g 1 0 = 0) : g.IsParabolic ↔ g 0 0 = g 1 1 ∧ g 0 1 ≠ 0 := Matrix.isParabolic_iff_of_upperTriangular hg /-- Specialized version of `isParabolic_iff_of_upperTriangular` intended for use with discrete subgroups of `GL(2, ℝ)`. -/ lemma isParabolic_iff_of_upperTriangular_of_det [LinearOrder K] [IsStrictOrderedRing K] {g : GL (Fin 2) K} (h_det : g.det = 1 ∨ g.det = -1) (hg10 : g 1 0 = 0) : g.IsParabolic ↔ (∃ x ≠ 0, g = upperRightHom x) ∨ (∃ x ≠ 0, g = -upperRightHom x) := by rw [isParabolic_iff_of_upperTriangular hg10] constructor · rintro ⟨hg00, hg01⟩ have : g 1 1 ^ 2 = 1 := by have : g.det = g 1 1 ^ 2 := by rw [val_det_apply, det_fin_two, hg10, hg00]; ring simp only [Units.ext_iff, Units.val_one, Units.val_neg, this] at h_det exact h_det.resolve_right (neg_one_lt_zero.trans_le <| sq_nonneg _).ne' apply (sq_eq_one_iff.mp this).imp <;> intro hg11 <;> simp only [Units.ext_iff] · refine ⟨g 0 1, hg01, ?_⟩ rw [g.val.eta_fin_two] simp_all · refine ⟨-g 0 1, neg_eq_zero.not.mpr hg01, ?_⟩ rw [g.val.eta_fin_two] simp_all · rintro (⟨x, hx, rfl⟩ | ⟨x, hx, rfl⟩) <;> simpa using hx end GeneralLinearGroup end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Determinant/Basic.lean
import Mathlib.Data.Matrix.Basic import Mathlib.Data.Matrix.Block import Mathlib.LinearAlgebra.Matrix.Notation import Mathlib.LinearAlgebra.Matrix.RowCol import Mathlib.GroupTheory.GroupAction.Ring import Mathlib.GroupTheory.Perm.Fin import Mathlib.LinearAlgebra.Alternating.Basic import Mathlib.LinearAlgebra.Matrix.SemiringInverse /-! # Determinant of a matrix This file defines the determinant of a matrix, `Matrix.det`, and its essential properties. ## Main definitions - `Matrix.det`: the determinant of a square matrix, as a sum over permutations - `Matrix.detRowAlternating`: the determinant, as an `AlternatingMap` in the rows of the matrix ## Main results - `det_mul`: the determinant of `A * B` is the product of determinants - `det_zero_of_row_eq`: the determinant is zero if there is a repeated row - `det_block_diagonal`: the determinant of a block diagonal matrix is a product of the blocks' determinants ## Implementation notes It is possible to configure `simp` to compute determinants. See the file `MathlibTest/matrix.lean` for some examples. -/ universe u v w z open Equiv Equiv.Perm Finset Function namespace Matrix variable {m n : Type*} [DecidableEq n] [Fintype n] [DecidableEq m] [Fintype m] variable {R : Type v} [CommRing R] local notation "ε " σ:arg => ((sign σ : ℤ) : R) /-- `det` is an `AlternatingMap` in the rows of the matrix. -/ def detRowAlternating : (n → R) [⋀^n]→ₗ[R] R := MultilinearMap.alternatization ((MultilinearMap.mkPiAlgebra R n R).compLinearMap LinearMap.proj) /-- The determinant of a matrix given by the Leibniz formula. -/ abbrev det (M : Matrix n n R) : R := detRowAlternating M theorem det_apply (M : Matrix n n R) : M.det = ∑ σ : Perm n, Equiv.Perm.sign σ • ∏ i, M (σ i) i := MultilinearMap.alternatization_apply _ M -- This is what the old definition was. We use it to avoid having to change the old proofs below theorem det_apply' (M : Matrix n n R) : M.det = ∑ σ : Perm n, ε σ * ∏ i, M (σ i) i := by simp [det_apply, Units.smul_def] theorem det_eq_detp_sub_detp (M : Matrix n n R) : M.det = M.detp 1 - M.detp (-1) := by rw [det_apply, ← Equiv.sum_comp (Equiv.inv (Perm n)), ← ofSign_disjUnion, sum_disjUnion] simp_rw [inv_apply, sign_inv, sub_eq_add_neg, detp, ← sum_neg_distrib] refine congr_arg₂ (· + ·) (sum_congr rfl fun σ hσ ↦ ?_) (sum_congr rfl fun σ hσ ↦ ?_) <;> rw [mem_ofSign.mp hσ, ← Equiv.prod_comp σ] <;> simp @[simp] theorem det_diagonal {d : n → R} : det (diagonal d) = ∏ i, d i := by rw [det_apply'] refine (Finset.sum_eq_single 1 ?_ ?_).trans ?_ · rintro σ - h2 obtain ⟨x, h3⟩ := not_forall.1 (mt Equiv.ext h2) convert mul_zero (ε σ) apply Finset.prod_eq_zero (mem_univ x) exact if_neg h3 · simp · simp theorem det_zero (_ : Nonempty n) : det (0 : Matrix n n R) = 0 := (detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_zero @[simp] theorem det_one : det (1 : Matrix n n R) = 1 := by rw [← diagonal_one]; simp [-diagonal_one] theorem det_isEmpty [IsEmpty n] {A : Matrix n n R} : det A = 1 := by simp [det_apply] @[simp] theorem coe_det_isEmpty [IsEmpty n] : (det : Matrix n n R → R) = Function.const _ 1 := by ext exact det_isEmpty theorem det_eq_one_of_card_eq_zero {A : Matrix n n R} (h : Fintype.card n = 0) : det A = 1 := haveI : IsEmpty n := Fintype.card_eq_zero_iff.mp h det_isEmpty /-- If `n` has only one element, the determinant of an `n` by `n` matrix is just that element. Although `Unique` implies `DecidableEq` and `Fintype`, the instances might not be syntactically equal. Thus, we need to fill in the args explicitly. -/ @[simp] theorem det_unique {n : Type*} [Unique n] [DecidableEq n] [Fintype n] (A : Matrix n n R) : det A = A default default := by simp [det_apply, univ_unique] theorem det_eq_elem_of_subsingleton [Subsingleton n] (A : Matrix n n R) (k : n) : det A = A k k := by have := uniqueOfSubsingleton k convert det_unique A theorem det_eq_elem_of_card_eq_one {A : Matrix n n R} (h : Fintype.card n = 1) (k : n) : det A = A k k := haveI : Subsingleton n := Fintype.card_le_one_iff_subsingleton.mp h.le det_eq_elem_of_subsingleton _ _ theorem det_mul_aux {M N : Matrix n n R} {p : n → n} (H : ¬Bijective p) : (∑ σ : Perm n, ε σ * ∏ x, M (σ x) (p x) * N (p x) x) = 0 := by obtain ⟨i, j, hpij, hij⟩ : ∃ i j, p i = p j ∧ i ≠ j := by rw [← Finite.injective_iff_bijective, Injective] at H push_neg at H exact H exact sum_involution (fun σ _ => σ * Equiv.swap i j) (fun σ _ => by have : (∏ x, M (σ x) (p x)) = ∏ x, M ((σ * Equiv.swap i j) x) (p x) := Fintype.prod_equiv (swap i j) _ _ (by simp [apply_swap_eq_self hpij]) simp [this, sign_swap hij, -sign_swap', prod_mul_distrib]) (fun σ _ _ => (not_congr mul_swap_eq_iff).mpr hij) (fun _ _ => mem_univ _) fun σ _ => mul_swap_involutive i j σ @[simp] theorem det_mul (M N : Matrix n n R) : det (M * N) = det M * det N := calc det (M * N) = ∑ p : n → n, ∑ σ : Perm n, ε σ * ∏ i, M (σ i) (p i) * N (p i) i := by simp only [det_apply', mul_apply, prod_univ_sum, mul_sum, Fintype.piFinset_univ] rw [Finset.sum_comm] _ = ∑ p : n → n with Bijective p, ∑ σ : Perm n, ε σ * ∏ i, M (σ i) (p i) * N (p i) i := by refine (sum_subset (filter_subset _ _) fun f _ hbij ↦ det_mul_aux ?_).symm simpa only [mem_filter_univ] using hbij _ = ∑ τ : Perm n, ∑ σ : Perm n, ε σ * ∏ i, M (σ i) (τ i) * N (τ i) i := sum_bij (fun p h ↦ Equiv.ofBijective p (mem_filter.1 h).2) (fun _ _ ↦ mem_univ _) (fun _ _ _ _ h ↦ by injection h) (fun b _ ↦ ⟨b, mem_filter.2 ⟨mem_univ _, b.bijective⟩, coe_fn_injective rfl⟩) fun _ _ ↦ rfl _ = ∑ σ : Perm n, ∑ τ : Perm n, (∏ i, N (σ i) i) * ε τ * ∏ j, M (τ j) (σ j) := by simp only [mul_comm, mul_left_comm, prod_mul_distrib, mul_assoc] _ = ∑ σ : Perm n, ∑ τ : Perm n, (∏ i, N (σ i) i) * (ε σ * ε τ) * ∏ i, M (τ i) i := (sum_congr rfl fun σ _ => Fintype.sum_equiv (Equiv.mulRight σ⁻¹) _ _ fun τ => by have : (∏ j, M (τ j) (σ j)) = ∏ j, M ((τ * σ⁻¹) j) j := by rw [← (σ⁻¹ : _ ≃ _).prod_comp] simp have h : ε σ * ε (τ * σ⁻¹) = ε τ := calc ε σ * ε (τ * σ⁻¹) = ε (τ * σ⁻¹ * σ) := by rw [mul_comm, sign_mul (τ * σ⁻¹)] simp only [Int.cast_mul, Units.val_mul] _ = ε τ := by simp only [inv_mul_cancel_right] simp_rw [Equiv.coe_mulRight, h] simp only [this]) _ = det M * det N := by simp only [det_apply', Finset.mul_sum, mul_comm, mul_left_comm, mul_assoc] /-- The determinant of a matrix, as a monoid homomorphism. -/ def detMonoidHom : Matrix n n R →* R where toFun := det map_one' := det_one map_mul' := det_mul @[simp] theorem coe_detMonoidHom : (detMonoidHom : Matrix n n R → R) = det := rfl /-- On square matrices, `mul_comm` applies under `det`. -/ theorem det_mul_comm (M N : Matrix m m R) : det (M * N) = det (N * M) := by rw [det_mul, det_mul, mul_comm] /-- On square matrices, `mul_left_comm` applies under `det`. -/ theorem det_mul_left_comm (M N P : Matrix m m R) : det (M * (N * P)) = det (N * (M * P)) := by rw [← Matrix.mul_assoc, ← Matrix.mul_assoc, det_mul, det_mul_comm M N, ← det_mul] /-- On square matrices, `mul_right_comm` applies under `det`. -/ theorem det_mul_right_comm (M N P : Matrix m m R) : det (M * N * P) = det (M * P * N) := by rw [Matrix.mul_assoc, Matrix.mul_assoc, det_mul, det_mul_comm N P, ← det_mul] -- TODO(https://github.com/leanprover-community/mathlib4/issues/6607): fix elaboration so `val` isn't needed theorem det_units_conj (M : (Matrix m m R)ˣ) (N : Matrix m m R) : det (M.val * N * M⁻¹.val) = det N := by rw [det_mul_right_comm, Units.mul_inv, one_mul] -- TODO(https://github.com/leanprover-community/mathlib4/issues/6607): fix elaboration so `val` isn't needed theorem det_units_conj' (M : (Matrix m m R)ˣ) (N : Matrix m m R) : det (M⁻¹.val * N * ↑M.val) = det N := det_units_conj M⁻¹ N /-- Transposing a matrix preserves the determinant. -/ @[simp] theorem det_transpose (M : Matrix n n R) : Mᵀ.det = M.det := by rw [det_apply', det_apply'] refine Fintype.sum_bijective _ inv_involutive.bijective _ _ ?_ intro σ rw [sign_inv] congr 1 apply Fintype.prod_equiv σ simp /-- Permuting the columns changes the sign of the determinant. -/ theorem det_permute (σ : Perm n) (M : Matrix n n R) : (M.submatrix σ id).det = Perm.sign σ * M.det := ((detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_perm M σ).trans (by simp [Units.smul_def]) /-- Permuting the rows changes the sign of the determinant. -/ theorem det_permute' (σ : Perm n) (M : Matrix n n R) : (M.submatrix id σ).det = Perm.sign σ * M.det := by rw [← det_transpose, transpose_submatrix, det_permute, det_transpose] /-- Permuting rows and columns with the same equivalence does not change the determinant. -/ @[simp] theorem det_submatrix_equiv_self (e : n ≃ m) (A : Matrix m m R) : det (A.submatrix e e) = det A := by rw [det_apply', det_apply'] apply Fintype.sum_equiv (Equiv.permCongr e) intro σ rw [Equiv.Perm.sign_permCongr e σ] congr 1 apply Fintype.prod_equiv e intro i rw [Equiv.permCongr_apply, Equiv.symm_apply_apply, submatrix_apply] /-- Permuting rows and columns with two equivalences does not change the absolute value of the determinant. -/ @[simp] theorem abs_det_submatrix_equiv_equiv {R : Type*} [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] (e₁ e₂ : n ≃ m) (A : Matrix m m R) : |(A.submatrix e₁ e₂).det| = |A.det| := by have hee : e₂ = e₁.trans (e₁.symm.trans e₂) := by ext; simp rw [hee] change |((A.submatrix id (e₁.symm.trans e₂)).submatrix e₁ e₁).det| = |A.det| rw [Matrix.det_submatrix_equiv_self, Matrix.det_permute', abs_mul, abs_unit_intCast, one_mul] /-- Reindexing both indices along the same equivalence preserves the determinant. For the `simp` version of this lemma, see `det_submatrix_equiv_self`; this one is unsuitable because `Matrix.reindex_apply` unfolds `reindex` first. -/ theorem det_reindex_self (e : m ≃ n) (A : Matrix m m R) : det (reindex e e A) = det A := det_submatrix_equiv_self e.symm A /-- Reindexing both indices along equivalences preserves the absolute of the determinant. For the `simp` version of this lemma, see `abs_det_submatrix_equiv_equiv`; this one is unsuitable because `Matrix.reindex_apply` unfolds `reindex` first. -/ theorem abs_det_reindex {R : Type*} [CommRing R] [LinearOrder R] [IsStrictOrderedRing R] (e₁ e₂ : m ≃ n) (A : Matrix m m R) : |det (reindex e₁ e₂ A)| = |det A| := abs_det_submatrix_equiv_equiv e₁.symm e₂.symm A theorem det_smul (A : Matrix n n R) (c : R) : det (c • A) = c ^ Fintype.card n * det A := calc det (c • A) = det ((diagonal fun _ => c) * A) := by rw [smul_eq_diagonal_mul] _ = det (diagonal fun _ => c) * det A := det_mul _ _ _ = c ^ Fintype.card n * det A := by simp @[simp] theorem det_smul_of_tower {α} [Monoid α] [MulAction α R] [IsScalarTower α R R] [SMulCommClass α R R] (c : α) (A : Matrix n n R) : det (c • A) = c ^ Fintype.card n • det A := by rw [← smul_one_smul R c A, det_smul, smul_pow, one_pow, smul_mul_assoc, one_mul] theorem det_neg (A : Matrix n n R) : det (-A) = (-1) ^ Fintype.card n * det A := by rw [← det_smul, neg_one_smul] /-- A variant of `Matrix.det_neg` with scalar multiplication by `Units ℤ` instead of multiplication by `R`. -/ theorem det_neg_eq_smul (A : Matrix n n R) : det (-A) = (-1 : Units ℤ) ^ Fintype.card n • det A := by rw [← det_smul_of_tower, Units.neg_smul, one_smul] /-- Multiplying each row by a fixed `v i` multiplies the determinant by the product of the `v`s. -/ theorem det_mul_row (v : n → R) (A : Matrix n n R) : det (of fun i j => v j * A i j) = (∏ i, v i) * det A := calc det (of fun i j => v j * A i j) = det (A * diagonal v) := congr_arg det <| by ext simp [mul_comm] _ = (∏ i, v i) * det A := by rw [det_mul, det_diagonal, mul_comm] /-- Multiplying each column by a fixed `v j` multiplies the determinant by the product of the `v`s. -/ theorem det_mul_column (v : n → R) (A : Matrix n n R) : det (of fun i j => v i * A i j) = (∏ i, v i) * det A := MultilinearMap.map_smul_univ _ v A @[simp] theorem det_pow (M : Matrix m m R) (n : ℕ) : det (M ^ n) = det M ^ n := (detMonoidHom : Matrix m m R →* R).map_pow M n section HomMap variable {S : Type w} [CommRing S] theorem _root_.RingHom.map_det (f : R →+* S) (M : Matrix n n R) : f M.det = Matrix.det (f.mapMatrix M) := by simp [Matrix.det_apply', map_sum f, map_prod f] theorem _root_.RingEquiv.map_det (f : R ≃+* S) (M : Matrix n n R) : f M.det = Matrix.det (f.mapMatrix M) := f.toRingHom.map_det _ theorem _root_.AlgHom.map_det [Algebra R S] {T : Type z} [CommRing T] [Algebra R T] (f : S →ₐ[R] T) (M : Matrix n n S) : f M.det = Matrix.det (f.mapMatrix M) := f.toRingHom.map_det _ theorem _root_.AlgEquiv.map_det [Algebra R S] {T : Type z} [CommRing T] [Algebra R T] (f : S ≃ₐ[R] T) (M : Matrix n n S) : f M.det = Matrix.det (f.mapMatrix M) := f.toAlgHom.map_det _ @[norm_cast] theorem _root_.Int.cast_det (M : Matrix n n ℤ) : (M.det : R) = (M.map fun x ↦ (x : R)).det := Int.castRingHom R |>.map_det M @[norm_cast] theorem _root_.Rat.cast_det {F : Type*} [Field F] [CharZero F] (M : Matrix n n ℚ) : (M.det : F) = (M.map fun x ↦ (x : F)).det := Rat.castHom F |>.map_det M end HomMap @[simp] theorem det_conjTranspose [StarRing R] (M : Matrix m m R) : det Mᴴ = star (det M) := ((starRingEnd R).map_det _).symm.trans <| congr_arg star M.det_transpose section DetZero /-! ### `det_zero` section Prove that a matrix with a repeated column has determinant equal to zero. -/ theorem det_eq_zero_of_row_eq_zero {A : Matrix n n R} (i : n) (h : ∀ j, A i j = 0) : det A = 0 := (detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_coord_zero i (funext h) theorem det_eq_zero_of_column_eq_zero {A : Matrix n n R} (j : n) (h : ∀ i, A i j = 0) : det A = 0 := by rw [← det_transpose] exact det_eq_zero_of_row_eq_zero j h variable {M : Matrix n n R} {i j : n} /-- If a matrix has a repeated row, the determinant will be zero. -/ theorem det_zero_of_row_eq (i_ne_j : i ≠ j) (hij : M i = M j) : M.det = 0 := (detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_eq_zero_of_eq M hij i_ne_j /-- If a matrix has a repeated column, the determinant will be zero. -/ theorem det_zero_of_column_eq (i_ne_j : i ≠ j) (hij : ∀ k, M k i = M k j) : M.det = 0 := by rw [← det_transpose, det_zero_of_row_eq i_ne_j] exact funext hij /-- If we repeat a row of a matrix, we get a matrix of determinant zero. -/ theorem det_updateRow_eq_zero (h : i ≠ j) : (M.updateRow j (M i)).det = 0 := det_zero_of_row_eq h (by simp [h]) /-- If we repeat a column of a matrix, we get a matrix of determinant zero. -/ theorem det_updateCol_eq_zero (h : i ≠ j) : (M.updateCol j (fun k ↦ M k i)).det = 0 := det_zero_of_column_eq h (by simp [h]) end DetZero theorem det_updateRow_add (M : Matrix n n R) (j : n) (u v : n → R) : det (updateRow M j <| u + v) = det (updateRow M j u) + det (updateRow M j v) := (detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_update_add M j u v theorem det_updateCol_add (M : Matrix n n R) (j : n) (u v : n → R) : det (updateCol M j <| u + v) = det (updateCol M j u) + det (updateCol M j v) := by rw [← det_transpose, ← updateRow_transpose, det_updateRow_add] simp [updateRow_transpose, det_transpose] theorem det_updateRow_smul (M : Matrix n n R) (j : n) (s : R) (u : n → R) : det (updateRow M j <| s • u) = s * det (updateRow M j u) := (detRowAlternating : (n → R) [⋀^n]→ₗ[R] R).map_update_smul M j s u theorem det_updateCol_smul (M : Matrix n n R) (j : n) (s : R) (u : n → R) : det (updateCol M j <| s • u) = s * det (updateCol M j u) := by rw [← det_transpose, ← updateRow_transpose, det_updateRow_smul] simp [updateRow_transpose, det_transpose] theorem det_updateRow_smul_left (M : Matrix n n R) (j : n) (s : R) (u : n → R) : det (updateRow (s • M) j u) = s ^ (Fintype.card n - 1) * det (updateRow M j u) := MultilinearMap.map_update_smul_left _ M j s u theorem det_updateCol_smul_left (M : Matrix n n R) (j : n) (s : R) (u : n → R) : det (updateCol (s • M) j u) = s ^ (Fintype.card n - 1) * det (updateCol M j u) := by rw [← det_transpose, ← updateRow_transpose, transpose_smul, det_updateRow_smul_left] simp [updateRow_transpose, det_transpose] theorem det_updateRow_sum_aux (M : Matrix n n R) {j : n} (s : Finset n) (hj : j ∉ s) (c : n → R) (a : R) : (M.updateRow j (a • M j + ∑ k ∈ s, (c k) • M k)).det = a • M.det := by induction s using Finset.induction_on with | empty => rw [Finset.sum_empty, add_zero, smul_eq_mul, det_updateRow_smul, updateRow_eq_self] | insert k _ hk h_ind => have h : k ≠ j := fun h ↦ (h ▸ hj) (Finset.mem_insert_self _ _) rw [Finset.sum_insert hk, add_comm ((c k) • M k), ← add_assoc, det_updateRow_add, det_updateRow_smul, det_updateRow_eq_zero h, mul_zero, add_zero, h_ind] exact fun h ↦ hj (Finset.mem_insert_of_mem h) /-- If we replace a row of a matrix by a linear combination of its rows, then the determinant is multiplied by the coefficient of that row. -/ theorem det_updateRow_sum (A : Matrix n n R) (j : n) (c : n → R) : (A.updateRow j (∑ k, (c k) • A k)).det = (c j) • A.det := by convert det_updateRow_sum_aux A (Finset.univ.erase j) (Finset.univ.notMem_erase j) c (c j) rw [← Finset.univ.add_sum_erase _ (Finset.mem_univ j)] /-- If we replace a column of a matrix by a linear combination of its columns, then the determinant is multiplied by the coefficient of that column. -/ theorem det_updateCol_sum (A : Matrix n n R) (j : n) (c : n → R) : (A.updateCol j (fun k ↦ ∑ i, (c i) • A k i)).det = (c j) • A.det := by rw [← det_transpose, ← updateRow_transpose, ← det_transpose A] convert det_updateRow_sum A.transpose j c simp only [smul_eq_mul, Finset.sum_apply, Pi.smul_apply, transpose_apply] section DetEq /-! ### `det_eq` section Lemmas showing the determinant is invariant under a variety of operations. -/ theorem det_eq_of_eq_mul_det_one {A B : Matrix n n R} (C : Matrix n n R) (hC : det C = 1) (hA : A = B * C) : det A = det B := calc det A = det (B * C) := congr_arg _ hA _ = det B * det C := det_mul _ _ _ = det B := by rw [hC, mul_one] theorem det_eq_of_eq_det_one_mul {A B : Matrix n n R} (C : Matrix n n R) (hC : det C = 1) (hA : A = C * B) : det A = det B := calc det A = det (C * B) := congr_arg _ hA _ = det C * det B := det_mul _ _ _ = det B := by rw [hC, one_mul] theorem det_updateRow_add_self (A : Matrix n n R) {i j : n} (hij : i ≠ j) : det (updateRow A i (A i + A j)) = det A := by simp [det_updateRow_add, det_zero_of_row_eq hij (updateRow_self.trans (updateRow_ne hij.symm).symm)] theorem det_updateCol_add_self (A : Matrix n n R) {i j : n} (hij : i ≠ j) : det (updateCol A i fun k => A k i + A k j) = det A := by rw [← det_transpose, ← updateRow_transpose, ← det_transpose A] exact det_updateRow_add_self Aᵀ hij theorem det_updateRow_add_smul_self (A : Matrix n n R) {i j : n} (hij : i ≠ j) (c : R) : det (updateRow A i (A i + c • A j)) = det A := by simp [det_updateRow_add, det_updateRow_smul, det_zero_of_row_eq hij (updateRow_self.trans (updateRow_ne hij.symm).symm)] theorem det_updateCol_add_smul_self (A : Matrix n n R) {i j : n} (hij : i ≠ j) (c : R) : det (updateCol A i fun k => A k i + c • A k j) = det A := by rw [← det_transpose, ← updateRow_transpose, ← det_transpose A] exact det_updateRow_add_smul_self Aᵀ hij c theorem det_eq_zero_of_not_linearIndependent_rows [IsDomain R] {A : Matrix m m R} (hA : ¬ LinearIndependent R (fun i ↦ A i)) : det A = 0 := detRowAlternating.map_linearDependent A hA theorem linearIndependent_rows_of_det_ne_zero [IsDomain R] {A : Matrix m m R} (hA : A.det ≠ 0) : LinearIndependent R (fun i ↦ A i) := by contrapose! hA exact det_eq_zero_of_not_linearIndependent_rows hA theorem linearIndependent_cols_of_det_ne_zero [IsDomain R] {A : Matrix m m R} (hA : A.det ≠ 0) : LinearIndependent R A.col := Matrix.linearIndependent_rows_of_det_ne_zero (by simpa [Matrix.col]) theorem det_eq_zero_of_not_linearIndependent_cols [IsDomain R] {A : Matrix m m R} (hA : ¬ LinearIndependent R (fun i ↦ Aᵀ i)) : det A = 0 := by contrapose! hA exact linearIndependent_cols_of_det_ne_zero hA theorem det_vecMulVec [Nontrivial n] (u v : n → R) : (vecMulVec u v).det = 0 := by obtain ⟨i, j, hij⟩ := exists_pair_ne n let uv' := ((vecMulVec u v).updateRow i v).updateRow j v have huv' : uv'.det = 0 := by refine detRowAlternating.map_eq_zero_of_eq _ ?_ hij simp [uv', hij] have : vecMulVec u v = (uv'.updateRow i (u i • uv' i)).updateRow j (u j • uv'.updateRow i (u i • uv' i) j) := by unfold uv' rw [updateRow_comm _ hij, updateRow_idem, updateRow_ne hij.symm, updateRow_ne hij, updateRow_self, updateRow_self, updateRow_comm _ hij, updateRow_idem, ← update_vecMulVec u v j, update_eq_self, ← update_vecMulVec u v i, update_eq_self] rw [this, det_updateRow_smul, updateRow_eq_self, det_updateRow_smul, updateRow_eq_self, huv', mul_zero, mul_zero] theorem det_eq_of_forall_row_eq_smul_add_const_aux {A B : Matrix n n R} {s : Finset n} : ∀ (c : n → R) (_ : ∀ i, i ∉ s → c i = 0) (k : n) (_ : k ∉ s) (_ : ∀ i j, A i j = B i j + c i * B k j), det A = det B := by induction s using Finset.induction_on generalizing B with | empty => rintro c hs k - A_eq have : ∀ i, c i = 0 := by grind congr ext i j rw [A_eq, this, zero_mul, add_zero] | insert i s _hi ih => intro c hs k hk A_eq have hAi : A i = B i + c i • B k := funext (A_eq i) rw [@ih (updateRow B i (A i)) (Function.update c i 0), hAi, det_updateRow_add_smul_self] · exact mt (fun h => show k ∈ insert i s from h ▸ Finset.mem_insert_self _ _) hk · intro i' hi' rw [Function.update_apply] split_ifs with hi'i · rfl · exact hs i' fun h => hi' ((Finset.mem_insert.mp h).resolve_left hi'i) · exact k · exact fun h => hk (Finset.mem_insert_of_mem h) · intro i' j' rw [updateRow_apply, Function.update_apply] split_ifs with hi'i · simp [hi'i] rw [A_eq, updateRow_ne fun h : k = i => hk <| h ▸ Finset.mem_insert_self k s] /-- If you add multiples of row `B k` to other rows, the determinant doesn't change. -/ theorem det_eq_of_forall_row_eq_smul_add_const {A B : Matrix n n R} (c : n → R) (k : n) (hk : c k = 0) (A_eq : ∀ i j, A i j = B i j + c i * B k j) : det A = det B := det_eq_of_forall_row_eq_smul_add_const_aux c (fun i => not_imp_comm.mp fun hi => Finset.mem_erase.mpr ⟨mt (fun h : i = k => show c i = 0 from h.symm ▸ hk) hi, Finset.mem_univ i⟩) k (Finset.notMem_erase k Finset.univ) A_eq theorem det_eq_of_forall_row_eq_smul_add_pred_aux {n : ℕ} (k : Fin (n + 1)) : ∀ (c : Fin n → R) (_hc : ∀ i : Fin n, k < i.succ → c i = 0) {M N : Matrix (Fin n.succ) (Fin n.succ) R} (_h0 : ∀ j, M 0 j = N 0 j) (_hsucc : ∀ (i : Fin n) (j), M i.succ j = N i.succ j + c i * M (Fin.castSucc i) j), det M = det N := by refine Fin.induction ?_ (fun k ih => ?_) k <;> intro c hc M N h0 hsucc · congr ext i j refine Fin.cases (h0 j) (fun i => ?_) i rw [hsucc, hc i (Fin.succ_pos _), zero_mul, add_zero] set M' := updateRow M k.succ (N k.succ) with hM' have hM : M = updateRow M' k.succ (M' k.succ + c k • M (Fin.castSucc k)) := by ext i j by_cases hi : i = k.succ · simp [hi, hM', hsucc, updateRow_self] rw [updateRow_ne hi, hM', updateRow_ne hi] have k_ne_succ : (Fin.castSucc k) ≠ k.succ := (Fin.castSucc_lt_succ k).ne have M_k : M (Fin.castSucc k) = M' (Fin.castSucc k) := (updateRow_ne k_ne_succ).symm rw [hM, M_k, det_updateRow_add_smul_self M' k_ne_succ.symm, ih (Function.update c k 0)] · intro i hi rw [Fin.lt_iff_val_lt_val, Fin.coe_castSucc, Fin.val_succ, Nat.lt_succ_iff] at hi rw [Function.update_apply] split_ifs with hik · rfl exact hc _ (Fin.succ_lt_succ_iff.mpr (lt_of_le_of_ne hi (Ne.symm hik))) · rwa [hM', updateRow_ne (Fin.succ_ne_zero _).symm] intro i j rw [Function.update_apply] split_ifs with hik · rw [zero_mul, add_zero, hM', hik, updateRow_self] rw [hM', updateRow_ne ((Fin.succ_injective _).ne hik), hsucc] by_cases hik2 : k < i · simp [hc i (Fin.succ_lt_succ_iff.mpr hik2)] rw [updateRow_ne] apply ne_of_lt rwa [Fin.lt_iff_val_lt_val, Fin.coe_castSucc, Fin.val_succ, Nat.lt_succ_iff, ← not_lt] /-- If you add multiples of previous rows to the next row, the determinant doesn't change. -/ theorem det_eq_of_forall_row_eq_smul_add_pred {n : ℕ} {A B : Matrix (Fin (n + 1)) (Fin (n + 1)) R} (c : Fin n → R) (A_zero : ∀ j, A 0 j = B 0 j) (A_succ : ∀ (i : Fin n) (j), A i.succ j = B i.succ j + c i * A (Fin.castSucc i) j) : det A = det B := det_eq_of_forall_row_eq_smul_add_pred_aux (Fin.last _) c (fun _ hi => absurd hi (not_lt_of_ge (Fin.le_last _))) A_zero A_succ /-- If you add multiples of previous columns to the next columns, the determinant doesn't change. -/ theorem det_eq_of_forall_col_eq_smul_add_pred {n : ℕ} {A B : Matrix (Fin (n + 1)) (Fin (n + 1)) R} (c : Fin n → R) (A_zero : ∀ i, A i 0 = B i 0) (A_succ : ∀ (i) (j : Fin n), A i j.succ = B i j.succ + c j * A i (Fin.castSucc j)) : det A = det B := by rw [← det_transpose A, ← det_transpose B] exact det_eq_of_forall_row_eq_smul_add_pred c A_zero fun i j => A_succ j i end DetEq @[simp] theorem det_blockDiagonal {o : Type*} [Fintype o] [DecidableEq o] (M : o → Matrix n n R) : (blockDiagonal M).det = ∏ k, (M k).det := by -- Rewrite the determinants as a sum over permutations. simp_rw [det_apply'] -- The right-hand side is a product of sums, rewrite it as a sum of products. rw [Finset.prod_sum] simp_rw [Finset.prod_attach_univ, Finset.univ_pi_univ] -- We claim that the only permutations contributing to the sum are those that -- preserve their second component. let preserving_snd : Finset (Equiv.Perm (n × o)) := {σ | ∀ x, (σ x).snd = x.snd} have mem_preserving_snd : ∀ {σ : Equiv.Perm (n × o)}, σ ∈ preserving_snd ↔ ∀ x, (σ x).snd = x.snd := fun {σ} => Finset.mem_filter.trans ⟨fun h => h.2, fun h => ⟨Finset.mem_univ _, h⟩⟩ rw [← Finset.sum_subset (Finset.subset_univ preserving_snd) _] -- And that these are in bijection with `o → Equiv.Perm m`. · refine (Finset.sum_bij (fun σ _ => prodCongrLeft fun k ↦ σ k (mem_univ k)) ?_ ?_ ?_ ?_).symm · intro σ _ rw [mem_preserving_snd] rintro ⟨-, x⟩ simp only [prodCongrLeft_apply] · intro σ _ σ' _ eq ext x hx k simp only at eq have : ∀ k x, prodCongrLeft (fun k => σ k (Finset.mem_univ _)) (k, x) = prodCongrLeft (fun k => σ' k (Finset.mem_univ _)) (k, x) := fun k x => by rw [eq] simp only [prodCongrLeft_apply, Prod.mk_inj] at this exact (this k x).1 · intro σ hσ rw [mem_preserving_snd] at hσ have hσ' x : (σ⁻¹ x).snd = x.snd := by simpa [eq_comm] using hσ (σ⁻¹ x) have mk_apply_eq : ∀ k x, ((σ (x, k)).fst, k) = σ (x, k) := by intro k x ext · simp only · simp only [hσ] have mk_inv_apply_eq : ∀ k x, ((σ⁻¹ (x, k)).fst, k) = σ⁻¹ (x, k) := by grind refine ⟨fun k _ => ⟨fun x => (σ (x, k)).fst, fun x => (σ⁻¹ (x, k)).fst, ?_, ?_⟩, ?_, ?_⟩ · intro x simp [mk_apply_eq] · intro x simp [mk_inv_apply_eq] · apply Finset.mem_univ · ext ⟨k, x⟩ · simp only [coe_fn_mk, prodCongrLeft_apply] · simp only [prodCongrLeft_apply, hσ] · intro σ _ rw [Finset.prod_mul_distrib, ← Finset.univ_product_univ, Finset.prod_product_right] simp only [sign_prodCongrLeft, Units.coe_prod, Int.cast_prod, blockDiagonal_apply_eq, prodCongrLeft_apply] · intro σ _ hσ rw [mem_preserving_snd] at hσ obtain ⟨⟨k, x⟩, hkx⟩ := not_forall.mp hσ rw [Finset.prod_eq_zero (Finset.mem_univ (k, x)), mul_zero] rw [blockDiagonal_apply_ne] exact hkx /-- The determinant of a 2×2 block matrix with the lower-left block equal to zero is the product of the determinants of the diagonal blocks. For the generalization to any number of blocks, see `Matrix.det_of_upperTriangular`. -/ @[simp] theorem det_fromBlocks_zero₂₁ (A : Matrix m m R) (B : Matrix m n R) (D : Matrix n n R) : (Matrix.fromBlocks A B 0 D).det = A.det * D.det := by classical simp_rw [det_apply'] convert Eq.symm <| sum_subset (M := R) (subset_univ ((sumCongrHom m n).range : Set (Perm (m ⊕ n))).toFinset) ?_ · simp_rw [sum_mul_sum, ← sum_product', univ_product_univ] refine sum_nbij (fun σ ↦ σ.fst.sumCongr σ.snd) ?_ ?_ ?_ ?_ · intro σ₁₂ _ simp · intro σ₁ _ σ₂ _ dsimp only intro h have h2 : ∀ x, Perm.sumCongr σ₁.fst σ₁.snd x = Perm.sumCongr σ₂.fst σ₂.snd x := DFunLike.congr_fun h simp only [Sum.map_inr, Sum.map_inl, Perm.sumCongr_apply, Sum.forall, Sum.inl.injEq, Sum.inr.injEq] at h2 ext x · exact h2.left x · exact h2.right x · intro σ hσ rw [mem_coe, Set.mem_toFinset] at hσ obtain ⟨σ₁₂, hσ₁₂⟩ := hσ use σ₁₂ rw [← hσ₁₂] simp · simp only [forall_prop_of_true, Prod.forall, mem_univ] intro σ₁ σ₂ rw [Fintype.prod_sum_type] simp_rw [Equiv.sumCongr_apply, Sum.map_inr, Sum.map_inl, fromBlocks_apply₁₁, fromBlocks_apply₂₂] rw [mul_mul_mul_comm] congr rw [sign_sumCongr, Units.val_mul, Int.cast_mul] · rintro σ - hσn have h1 : ¬∀ x, ∃ y, Sum.inl y = σ (Sum.inl x) := by rw [Set.mem_toFinset] at hσn simpa only [Set.MapsTo, Set.mem_range, forall_exists_index, forall_apply_eq_imp_iff] using mt mem_sumCongrHom_range_of_perm_mapsTo_inl hσn obtain ⟨a, ha⟩ := not_forall.mp h1 rcases hx : σ (Sum.inl a) with a2 | b · have hn := (not_exists.mp ha) a2 exact absurd hx.symm hn · rw [Finset.prod_eq_zero (Finset.mem_univ (Sum.inl a)), mul_zero] rw [hx, fromBlocks_apply₂₁, zero_apply] /-- The determinant of a 2×2 block matrix with the upper-right block equal to zero is the product of the determinants of the diagonal blocks. For the generalization to any number of blocks, see `Matrix.det_of_lowerTriangular`. -/ @[simp] theorem det_fromBlocks_zero₁₂ (A : Matrix m m R) (C : Matrix n m R) (D : Matrix n n R) : (Matrix.fromBlocks A 0 C D).det = A.det * D.det := by rw [← det_transpose, fromBlocks_transpose, transpose_zero, det_fromBlocks_zero₂₁, det_transpose, det_transpose] /-- Laplacian expansion of the determinant of an `n+1 × n+1` matrix along column 0. -/ theorem det_succ_column_zero {n : ℕ} (A : Matrix (Fin n.succ) (Fin n.succ) R) : det A = ∑ i : Fin n.succ, (-1) ^ (i : ℕ) * A i 0 * det (A.submatrix i.succAbove Fin.succ) := by rw [Matrix.det_apply, Finset.univ_perm_fin_succ, ← Finset.univ_product_univ] simp only [Finset.sum_map, Equiv.toEmbedding_apply, Finset.sum_product, Matrix.submatrix] refine Finset.sum_congr rfl fun i _ => Fin.cases ?_ (fun i => ?_) i · simp only [Fin.prod_univ_succ, Matrix.det_apply, Finset.mul_sum, Equiv.Perm.decomposeFin_symm_apply_zero, Fin.val_zero, one_mul, Equiv.Perm.decomposeFin.symm_sign, Equiv.swap_self, if_true, id, Equiv.Perm.decomposeFin_symm_apply_succ, Fin.succAbove_zero, Equiv.coe_refl, pow_zero, mul_smul_comm, of_apply] -- `univ_perm_fin_succ` gives a different embedding of `Perm (Fin n)` into -- `Perm (Fin n.succ)` than the determinant of the submatrix we want, -- permute `A` so that we get the correct one. have : (-1 : R) ^ (i : ℕ) = (Perm.sign i.cycleRange) := by simp [Fin.sign_cycleRange] rw [Fin.val_succ, pow_succ', this, mul_assoc, mul_assoc, mul_left_comm (ε _), ← det_permute, Matrix.det_apply, Finset.mul_sum, Finset.mul_sum] -- now we just need to move the corresponding parts to the same place refine Finset.sum_congr rfl fun σ _ => ?_ rw [Equiv.Perm.decomposeFin.symm_sign, if_neg (Fin.succ_ne_zero i)] calc ((-1 * Perm.sign σ : ℤ) • ∏ i', A (Perm.decomposeFin.symm (Fin.succ i, σ) i') i') = (-1 * Perm.sign σ : ℤ) • (A (Fin.succ i) 0 * ∏ i', A ((Fin.succ i).succAbove (Fin.cycleRange i (σ i'))) i'.succ) := by simp only [Fin.prod_univ_succ, Fin.succAbove_cycleRange, Equiv.Perm.decomposeFin_symm_apply_zero, Equiv.Perm.decomposeFin_symm_apply_succ] _ = -1 * (A (Fin.succ i) 0 * (Perm.sign σ : ℤ) • ∏ i', A ((Fin.succ i).succAbove (Fin.cycleRange i (σ i'))) i'.succ) := by simp [_root_.neg_mul, one_mul, zsmul_eq_mul, neg_smul, Fin.succAbove_cycleRange, mul_left_comm] /-- Laplacian expansion of the determinant of an `n+1 × n+1` matrix along row 0. -/ theorem det_succ_row_zero {n : ℕ} (A : Matrix (Fin n.succ) (Fin n.succ) R) : det A = ∑ j : Fin n.succ, (-1) ^ (j : ℕ) * A 0 j * det (A.submatrix Fin.succ j.succAbove) := by rw [← det_transpose A, det_succ_column_zero] refine Finset.sum_congr rfl fun i _ => ?_ rw [← det_transpose] simp only [transpose_apply, transpose_submatrix, transpose_transpose] /-- Laplacian expansion of the determinant of an `n+1 × n+1` matrix along row `i`. -/ theorem det_succ_row {n : ℕ} (A : Matrix (Fin n.succ) (Fin n.succ) R) (i : Fin n.succ) : det A = ∑ j : Fin n.succ, (-1) ^ (i + j : ℕ) * A i j * det (A.submatrix i.succAbove j.succAbove) := by simp_rw [pow_add, mul_assoc, ← mul_sum] have : det A = (-1 : R) ^ (i : ℕ) * (Perm.sign i.cycleRange⁻¹) * det A := by calc det A = ↑((-1 : ℤˣ) ^ (i : ℕ) * (-1 : ℤˣ) ^ (i : ℕ) : ℤˣ) * det A := by simp _ = (-1 : R) ^ (i : ℕ) * (Perm.sign i.cycleRange⁻¹) * det A := by simp [-Int.units_mul_self] rw [this, mul_assoc] congr rw [← det_permute, det_succ_row_zero] refine Finset.sum_congr rfl fun j _ => ?_ rw [mul_assoc, Matrix.submatrix_apply, submatrix_submatrix, id_comp, Function.comp_def, id] congr 3 · rw [Equiv.Perm.inv_def, Fin.cycleRange_symm_zero] · ext i' j' rw [Equiv.Perm.inv_def, Matrix.submatrix_apply, Matrix.submatrix_apply, Fin.cycleRange_symm_succ] /-- Laplacian expansion of the determinant of an `n+1 × n+1` matrix along column `j`. -/ theorem det_succ_column {n : ℕ} (A : Matrix (Fin n.succ) (Fin n.succ) R) (j : Fin n.succ) : det A = ∑ i : Fin n.succ, (-1) ^ (i + j : ℕ) * A i j * det (A.submatrix i.succAbove j.succAbove) := by rw [← det_transpose, det_succ_row _ j] refine Finset.sum_congr rfl fun i _ => ?_ rw [add_comm, ← det_transpose, transpose_apply, transpose_submatrix, transpose_transpose] /-- Determinant of 0x0 matrix -/ @[simp] theorem det_fin_zero {A : Matrix (Fin 0) (Fin 0) R} : det A = 1 := det_isEmpty /-- Determinant of 1x1 matrix -/ theorem det_fin_one (A : Matrix (Fin 1) (Fin 1) R) : det A = A 0 0 := det_unique A theorem det_fin_one_of (a : R) : det !![a] = a := det_fin_one _ /-- Determinant of 2x2 matrix -/ theorem det_fin_two (A : Matrix (Fin 2) (Fin 2) R) : det A = A 0 0 * A 1 1 - A 0 1 * A 1 0 := by simp only [det_succ_row_zero, det_unique, Fin.default_eq_zero, submatrix_apply, Fin.succ_zero_eq_one, Fin.sum_univ_succ, Fin.val_zero, Fin.zero_succAbove, univ_unique, Fin.val_succ, Fin.val_eq_zero, Fin.succ_succAbove_zero, sum_singleton] ring @[simp] theorem det_fin_two_of (a b c d : R) : Matrix.det !![a, b; c, d] = a * d - b * c := det_fin_two _ /-- Determinant of 3x3 matrix -/ theorem det_fin_three (A : Matrix (Fin 3) (Fin 3) R) : det A = A 0 0 * A 1 1 * A 2 2 - A 0 0 * A 1 2 * A 2 1 - A 0 1 * A 1 0 * A 2 2 + A 0 1 * A 1 2 * A 2 0 + A 0 2 * A 1 0 * A 2 1 - A 0 2 * A 1 1 * A 2 0 := by simp only [det_succ_row_zero, submatrix_apply, Fin.succ_zero_eq_one, submatrix_submatrix, det_unique, Fin.default_eq_zero, Function.comp_apply, Fin.succ_one_eq_two, Fin.sum_univ_succ, Fin.val_zero, Fin.zero_succAbove, univ_unique, Fin.val_succ, Fin.val_eq_zero, Fin.succ_succAbove_zero, sum_singleton, Fin.succ_succAbove_one] ring end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Determinant/Misc.lean
import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.Algebra.Ring.NegOnePow /-! # Miscellaneous results about determinant In this file, we collect various formulas about determinant of matrices. -/ assert_not_exists TwoSidedIdeal namespace Matrix variable {R : Type*} [CommRing R] /-- Let `M` be a `(n+1) × n` matrix whose row sums to zero. Then all the matrices obtained by deleting one row have the same determinant up to a sign. -/ theorem submatrix_succAbove_det_eq_negOnePow_submatrix_succAbove_det {n : ℕ} (M : Matrix (Fin (n + 1)) (Fin n) R) (hv : ∑ j, M j = 0) (j₁ j₂ : Fin (n + 1)) : (M.submatrix (Fin.succAbove j₁) id).det = Int.negOnePow (j₁ - j₂) • (M.submatrix (Fin.succAbove j₂) id).det := by suffices ∀ j, (M.submatrix (Fin.succAbove j) id).det = Int.negOnePow j • (M.submatrix (Fin.succAbove 0) id).det by rw [this j₁, this j₂, smul_smul, ← Int.negOnePow_add, sub_add_cancel] intro j induction j using Fin.induction with | zero => rw [Fin.val_zero, Nat.cast_zero, Int.negOnePow_zero, one_smul] | succ i h_ind => rw [Fin.val_succ, Nat.cast_add, Nat.cast_one, Int.negOnePow_succ, Units.neg_smul, ← neg_eq_iff_eq_neg, ← neg_one_smul R, ← det_updateRow_sum (M.submatrix i.succ.succAbove id) i (fun _ ↦ -1), ← Fin.coe_castSucc i, ← h_ind] congr ext a b simp_rw [neg_one_smul, updateRow_apply, Finset.sum_neg_distrib, Pi.neg_apply, Finset.sum_apply, submatrix_apply, id_eq] split_ifs with h · replace hv := congr_fun hv b rw [Fin.sum_univ_succAbove _ i.succ, Pi.add_apply, Finset.sum_apply] at hv rwa [h, Fin.succAbove_castSucc_self, neg_eq_iff_add_eq_zero, add_comm] · obtain h|h := ne_iff_lt_or_gt.mp h · rw [Fin.succAbove_castSucc_of_lt _ _ h, Fin.succAbove_of_succ_le _ _ (Fin.succ_lt_succ_iff.mpr h).le] · rw [Fin.succAbove_succ_of_lt _ _ h, Fin.succAbove_castSucc_of_le _ _ h.le] /-- Let `M` be a `(n+1) × n` matrix whose column sums to zero. Then all the matrices obtained by deleting one column have the same determinant up to a sign. -/ theorem submatrix_succAbove_det_eq_negOnePow_submatrix_succAbove_det' {n : ℕ} (M : Matrix (Fin n) (Fin (n + 1)) R) (hv : ∀ i, ∑ j, M i j = 0) (j₁ j₂ : Fin (n + 1)) : (M.submatrix id (Fin.succAbove j₁)).det = Int.negOnePow (j₁ - j₂) • (M.submatrix id (Fin.succAbove j₂)).det := by rw [← det_transpose, transpose_submatrix, submatrix_succAbove_det_eq_negOnePow_submatrix_succAbove_det M.transpose ?_ j₁ j₂, ← det_transpose, transpose_submatrix, transpose_transpose] ext simp_rw [Finset.sum_apply, transpose_apply, hv, Pi.zero_apply] /-- Let `M` be a `(n+1) × (n+1)` matrix. Assume that all columns, but the `j₀`-column, sums to zero. Then its determinant is, up to sign, the sum of the `j₀`-column times the determinant of the matrix obtained by deleting any row and the `j₀`-column. -/ theorem det_eq_sum_column_mul_submatrix_succAbove_succAbove_det {n : ℕ} (M : Matrix (Fin (n + 1)) (Fin (n + 1)) R) (i₀ j₀ : Fin (n + 1)) (hv : ∀ j ≠ j₀, ∑ i, M i j = 0) : M.det = (-1) ^ (i₀ + j₀ : ℕ) * (∑ i, M i j₀) * (M.submatrix (Fin.succAbove i₀) (Fin.succAbove j₀)).det := by rw [← one_smul R M.det, ← Matrix.det_updateRow_sum _ i₀ (fun _ ↦ 1), Matrix.det_succ_row _ i₀] simp only [updateRow_apply, if_true, one_smul, submatrix_updateRow_succAbove, Finset.sum_apply] rw [Fintype.sum_eq_add_sum_subtype_ne _ j₀] conv_lhs => enter [2, 2, i] rw [hv _ i.prop, mul_zero, zero_mul] simp [Finset.sum_const_zero, add_zero] /-- Let `M` be a `(n+1) × (n+1)` matrix. Assume that all rows, but the `i₀`-row, sums to zero. Then its determinant is, up to sign, the sum of the `i₀`-row times the determinant of the matrix obtained by deleting the `i₀`-row and any column. -/ theorem det_eq_sum_row_mul_submatrix_succAbove_succAbove_det {n : ℕ} (M : Matrix (Fin (n + 1)) (Fin (n + 1)) R) (i₀ j₀ : Fin (n + 1)) (hv : ∀ i ≠ i₀, ∑ j, M i j = 0) : M.det = (-1) ^ (i₀ + j₀ : ℕ) * (∑ j, M i₀ j) * (M.submatrix (Fin.succAbove i₀) (Fin.succAbove j₀)).det := by rw [← det_transpose, det_eq_sum_column_mul_submatrix_succAbove_succAbove_det _ j₀ i₀ (by simpa using hv), ← det_transpose, transpose_submatrix, transpose_transpose, add_comm] simp_rw [transpose_apply] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Matrix/Determinant/TotallyUnimodular.lean
import Mathlib.LinearAlgebra.Matrix.Determinant.Basic import Mathlib.Data.Matrix.ColumnRowPartitioned import Mathlib.Data.Sign.Basic /-! # Totally unimodular matrices This file defines totally unimodular matrices and provides basic API for them. ## Main definitions - `Matrix.IsTotallyUnimodular`: a matrix is totally unimodular iff every square submatrix (not necessarily contiguous) has determinant `0` or `1` or `-1`. ## Main results - `Matrix.isTotallyUnimodular_iff`: a matrix is totally unimodular iff every square submatrix (possibly with repeated rows and/or repeated columns) has determinant `0` or `1` or `-1`. - `Matrix.IsTotallyUnimodular.apply`: entry in a totally unimodular matrix is `0` or `1` or `-1`. -/ namespace Matrix variable {m m' n n' R : Type*} [CommRing R] /-- `A.IsTotallyUnimodular` means that every square submatrix of `A` (not necessarily contiguous) has determinant `0` or `1` or `-1`; that is, the determinant is in the range of `SignType.cast`. -/ def IsTotallyUnimodular (A : Matrix m n R) : Prop := ∀ k : ℕ, ∀ f : Fin k → m, ∀ g : Fin k → n, f.Injective → g.Injective → (A.submatrix f g).det ∈ Set.range SignType.cast lemma isTotallyUnimodular_iff (A : Matrix m n R) : A.IsTotallyUnimodular ↔ ∀ k : ℕ, ∀ f : Fin k → m, ∀ g : Fin k → n, (A.submatrix f g).det ∈ Set.range SignType.cast := by constructor <;> intro hA · intro k f g by_cases hfg : f.Injective ∧ g.Injective · exact hA k f g hfg.1 hfg.2 · use 0 rw [SignType.coe_zero, eq_comm] simp_rw [not_and_or, Function.not_injective_iff] at hfg obtain ⟨i, j, hfij, hij⟩ | ⟨i, j, hgij, hij⟩ := hfg · rw [← det_transpose, transpose_submatrix] apply det_zero_of_column_eq hij.symm simp [hfij] · apply det_zero_of_column_eq hij simp [hgij] · intro _ _ _ _ _ apply hA lemma isTotallyUnimodular_iff_fintype.{w} (A : Matrix m n R) : A.IsTotallyUnimodular ↔ ∀ (ι : Type w) [Fintype ι] [DecidableEq ι], ∀ f : ι → m, ∀ g : ι → n, (A.submatrix f g).det ∈ Set.range SignType.cast := by rw [isTotallyUnimodular_iff] constructor · intro hA ι _ _ f g specialize hA (Fintype.card ι) (f ∘ (Fintype.equivFin ι).symm) (g ∘ (Fintype.equivFin ι).symm) rwa [← submatrix_submatrix, det_submatrix_equiv_self] at hA · intro hA k f g specialize hA (ULift (Fin k)) (f ∘ Equiv.ulift) (g ∘ Equiv.ulift) rwa [← submatrix_submatrix, det_submatrix_equiv_self] at hA lemma IsTotallyUnimodular.apply {A : Matrix m n R} (hA : A.IsTotallyUnimodular) (i : m) (j : n) : A i j ∈ Set.range SignType.cast := by rw [isTotallyUnimodular_iff] at hA simpa using hA 1 (fun _ => i) (fun _ => j) lemma IsTotallyUnimodular.submatrix {A : Matrix m n R} (f : m' → m) (g : n' → n) (hA : A.IsTotallyUnimodular) : (A.submatrix f g).IsTotallyUnimodular := by simp only [isTotallyUnimodular_iff, submatrix_submatrix] at hA ⊢ intro _ _ _ apply hA lemma IsTotallyUnimodular.transpose {A : Matrix m n R} (hA : A.IsTotallyUnimodular) : Aᵀ.IsTotallyUnimodular := by simp only [isTotallyUnimodular_iff, ← transpose_submatrix, det_transpose] at hA ⊢ intro _ _ _ apply hA lemma transpose_isTotallyUnimodular_iff (A : Matrix m n R) : Aᵀ.IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by constructor <;> apply IsTotallyUnimodular.transpose lemma IsTotallyUnimodular.reindex {A : Matrix m n R} (em : m ≃ m') (en : n ≃ n') (hA : A.IsTotallyUnimodular) : (A.reindex em en).IsTotallyUnimodular := hA.submatrix _ _ lemma reindex_isTotallyUnimodular (A : Matrix m n R) (em : m ≃ m') (en : n ≃ n') : (A.reindex em en).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := ⟨fun hA => by simpa [Equiv.symm_apply_eq] using hA.reindex em.symm en.symm, fun hA => hA.reindex _ _⟩ /-- If `A` has no rows, then it is totally unimodular. -/ @[simp] lemma emptyRows_isTotallyUnimodular [IsEmpty m] (A : Matrix m n R) : A.IsTotallyUnimodular := by intro k f _ _ _ cases k with | zero => use 1; rw [submatrix_empty, det_fin_zero, SignType.coe_one] | succ => exact (IsEmpty.false (f 0)).elim /-- If `A` has no columns, then it is totally unimodular. -/ @[simp] lemma emptyCols_isTotallyUnimodular [IsEmpty n] (A : Matrix m n R) : A.IsTotallyUnimodular := A.transpose.emptyRows_isTotallyUnimodular.transpose /-- If `A` is totally unimodular and each row of `B` is all zeros except for at most a single `1` or a single `-1` then `fromRows A B` is totally unimodular. -/ lemma IsTotallyUnimodular.fromRows_unitlike [DecidableEq n] {A : Matrix m n R} {B : Matrix m' n R} (hA : A.IsTotallyUnimodular) (hB : Nonempty n → ∀ i : m', ∃ j : n, ∃ s : SignType, B i = Pi.single j s.cast) : (fromRows A B).IsTotallyUnimodular := by intro k f g hf hg induction k with | zero => use 1; simp | succ k ih => specialize hB ⟨g 0⟩ -- Either `f` is `inr` somewhere or `inl` everywhere obtain ⟨i, j, hfi⟩ | ⟨f', rfl⟩ : (∃ i j, f i = .inr j) ∨ (∃ f', f = .inl ∘ f') := by simp_rw [← Sum.isRight_iff, or_iff_not_imp_left, not_exists, Bool.not_eq_true, Sum.isRight_eq_false, Sum.isLeft_iff] intro hfr choose f' hf' using hfr exact ⟨f', funext hf'⟩ · have hAB := det_succ_row ((fromRows A B).submatrix f g) i simp only [submatrix_apply, hfi, fromRows_apply_inr] at hAB obtain ⟨j', s, hj'⟩ := hB j · simp only [hj'] at hAB by_cases hj'' : ∃ x, g x = j' · obtain ⟨x, rfl⟩ := hj'' rw [Fintype.sum_eq_single x fun y hxy => ?_, Pi.single_eq_same] at hAB · rw [hAB] change _ ∈ MonoidHom.mrange SignType.castHom.toMonoidHom refine mul_mem (mul_mem ?_ (Set.mem_range_self s)) ?_ · apply pow_mem exact ⟨-1, by simp⟩ · exact ih _ _ (hf.comp Fin.succAbove_right_injective) (hg.comp Fin.succAbove_right_injective) · simp [Pi.single_eq_of_ne, hg.ne_iff.mpr hxy] · rw [not_exists] at hj'' use 0 simpa [hj''] using hAB.symm · rw [isTotallyUnimodular_iff] at hA apply hA /-- If `A` is totally unimodular and each row of `B` is all zeros except for at most a single `1`, then `fromRows A B` is totally unimodular. -/ lemma fromRows_isTotallyUnimodular_iff_rows [DecidableEq n] {A : Matrix m n R} {B : Matrix m' n R} (hB : Nonempty n → ∀ i : m', ∃ j : n, ∃ s : SignType, B i = Pi.single j s.cast) : (fromRows A B).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := ⟨.submatrix Sum.inl id, fun hA => hA.fromRows_unitlike hB⟩ lemma fromRows_one_isTotallyUnimodular_iff [DecidableEq n] (A : Matrix m n R) : (fromRows A (1 : Matrix n n R)).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := fromRows_isTotallyUnimodular_iff_rows <| fun h i ↦ ⟨i, 1, funext fun j ↦ by simp [one_apply, Pi.single_apply, eq_comm]⟩ lemma one_fromRows_isTotallyUnimodular_iff [DecidableEq n] (A : Matrix m n R) : (fromRows (1 : Matrix n n R) A).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by have hA : fromRows (1 : Matrix n n R) A = (fromRows A (1 : Matrix n n R)).reindex (Equiv.sumComm m n) (Equiv.refl n) := by aesop rw [hA, reindex_isTotallyUnimodular, fromRows_one_isTotallyUnimodular_iff] lemma fromCols_one_isTotallyUnimodular_iff [DecidableEq m] (A : Matrix m n R) : (fromCols A (1 : Matrix m m R)).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by rw [← transpose_isTotallyUnimodular_iff, transpose_fromCols, transpose_one, fromRows_one_isTotallyUnimodular_iff, transpose_isTotallyUnimodular_iff] lemma one_fromCols_isTotallyUnimodular_iff [DecidableEq m] (A : Matrix m n R) : (fromCols (1 : Matrix m m R) A).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by rw [← transpose_isTotallyUnimodular_iff, transpose_fromCols, transpose_one, one_fromRows_isTotallyUnimodular_iff, transpose_isTotallyUnimodular_iff] alias ⟨_, IsTotallyUnimodular.fromRows_one⟩ := fromRows_one_isTotallyUnimodular_iff alias ⟨_, IsTotallyUnimodular.one_fromRows⟩ := one_fromRows_isTotallyUnimodular_iff alias ⟨_, IsTotallyUnimodular.fromCols_one⟩ := fromCols_one_isTotallyUnimodular_iff alias ⟨_, IsTotallyUnimodular.one_fromCols⟩ := one_fromCols_isTotallyUnimodular_iff lemma fromRows_replicateRow0_isTotallyUnimodular_iff (A : Matrix m n R) : (fromRows A (replicateRow m' 0)).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by classical refine fromRows_isTotallyUnimodular_iff_rows <| fun _ _ => ?_ inhabit n refine ⟨default, 0, ?_⟩ ext x simp [Pi.single_apply] lemma fromCols_replicateCol0_isTotallyUnimodular_iff (A : Matrix m n R) : (fromCols A (replicateCol n' 0)).IsTotallyUnimodular ↔ A.IsTotallyUnimodular := by rw [← transpose_isTotallyUnimodular_iff, transpose_fromCols, transpose_replicateCol, fromRows_replicateRow0_isTotallyUnimodular_iff, transpose_isTotallyUnimodular_iff] end Matrix
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/SMul.lean
import Mathlib.Algebra.Algebra.Defs import Mathlib.LinearAlgebra.Basis.Basic /-! # Bases and scalar multiplication This file defines the scalar multiplication of bases by multiplying each basis vector. -/ assert_not_exists Ordinal noncomputable section universe u open Function Set Submodule Finsupp variable {ι R R₂ M : Type*} namespace Module.Basis variable [Semiring R] [AddCommMonoid M] [Module R M] (b : Basis ι R M) section SMul variable {G G'} variable [Group G] [Group G'] variable [DistribMulAction G M] [DistribMulAction G' M] variable [SMulCommClass G R M] [SMulCommClass G' R M] /-- The action on a `Basis` by acting on each element. See also `Basis.unitsSMul` and `Basis.groupSMul`, for the cases when a different action is applied to each basis element. -/ instance : SMul G (Basis ι R M) where smul g b := b.map <| DistribMulAction.toLinearEquiv _ _ g @[simp] theorem smul_apply (g : G) (b : Basis ι R M) (i : ι) : (g • b) i = g • b i := rfl @[norm_cast] theorem coe_smul (g : G) (b : Basis ι R M) : ⇑(g • b) = g • ⇑b := rfl /-- When the group in question is the automorphisms, `•` coincides with `Basis.map`. -/ @[simp] theorem smul_eq_map (g : M ≃ₗ[R] M) (b : Basis ι R M) : g • b = b.map g := rfl @[simp] theorem repr_smul (g : G) (b : Basis ι R M) : (g • b).repr = (DistribMulAction.toLinearEquiv _ _ g).symm.trans b.repr := rfl instance : MulAction G (Basis ι R M) := Function.Injective.mulAction _ DFunLike.coe_injective coe_smul instance [SMulCommClass G G' M] : SMulCommClass G G' (Basis ι R M) where smul_comm _g _g' _b := DFunLike.ext _ _ fun _ => smul_comm _ _ _ instance [SMul G G'] [IsScalarTower G G' M] : IsScalarTower G G' (Basis ι R M) where smul_assoc _g _g' _b := DFunLike.ext _ _ fun _ => smul_assoc _ _ _ end SMul section CommSemiring variable {v : ι → M} {x y : M} theorem groupSMul_span_eq_top {G : Type*} [Group G] [SMul G R] [MulAction G M] [IsScalarTower G R M] {v : ι → M} (hv : Submodule.span R (Set.range v) = ⊤) {w : ι → G} : Submodule.span R (Set.range (w • v)) = ⊤ := by rw [eq_top_iff] intro j hj rw [← hv] at hj rw [Submodule.mem_span] at hj ⊢ refine fun p hp => hj p fun u hu => ?_ obtain ⟨i, rfl⟩ := hu have : ((w i)⁻¹ • (1 : R)) • w i • v i ∈ p := p.smul_mem ((w i)⁻¹ • (1 : R)) (hp ⟨i, rfl⟩) rwa [smul_one_smul, inv_smul_smul] at this /-- Given a basis `v` and a map `w` such that for all `i`, `w i` are elements of a group, `groupSMul` provides the basis corresponding to `w • v`. -/ def groupSMul {G : Type*} [Group G] [DistribMulAction G R] [DistribMulAction G M] [IsScalarTower G R M] [SMulCommClass G R M] (v : Basis ι R M) (w : ι → G) : Basis ι R M := Basis.mk (LinearIndependent.group_smul v.linearIndependent w) (groupSMul_span_eq_top v.span_eq).ge theorem groupSMul_apply {G : Type*} [Group G] [DistribMulAction G R] [DistribMulAction G M] [IsScalarTower G R M] [SMulCommClass G R M] {v : Basis ι R M} {w : ι → G} (i : ι) : v.groupSMul w i = (w • (v : ι → M)) i := mk_apply (LinearIndependent.group_smul v.linearIndependent w) (groupSMul_span_eq_top v.span_eq).ge i theorem units_smul_span_eq_top {v : ι → M} (hv : Submodule.span R (Set.range v) = ⊤) {w : ι → Rˣ} : Submodule.span R (Set.range (w • v)) = ⊤ := groupSMul_span_eq_top hv /-- Given a basis `v` and a map `w` such that for all `i`, `w i` is a unit, `unitsSMul` provides the basis corresponding to `w • v`. -/ def unitsSMul (v : Basis ι R M) (w : ι → Rˣ) : Basis ι R M := Basis.mk (LinearIndependent.units_smul v.linearIndependent w) (units_smul_span_eq_top v.span_eq).ge theorem unitsSMul_apply {v : Basis ι R M} {w : ι → Rˣ} (i : ι) : unitsSMul v w i = w i • v i := mk_apply (LinearIndependent.units_smul v.linearIndependent w) (units_smul_span_eq_top v.span_eq).ge i variable [CommSemiring R₂] [Module R₂ M] @[simp] theorem coord_unitsSMul (e : Basis ι R₂ M) (w : ι → R₂ˣ) (i : ι) : (unitsSMul e w).coord i = (w i)⁻¹ • e.coord i := by classical apply e.ext intro j trans ((unitsSMul e w).coord i) ((w j)⁻¹ • (unitsSMul e w) j) · simp [Basis.unitsSMul, ← mul_smul] simp only [Basis.coord_apply, LinearMap.smul_apply, Basis.repr_self, Units.smul_def, map_smul, Finsupp.single_apply] split_ifs with h <;> simp [h] @[simp] theorem repr_unitsSMul (e : Basis ι R₂ M) (w : ι → R₂ˣ) (v : M) (i : ι) : (e.unitsSMul w).repr v i = (w i)⁻¹ • e.repr v i := congr_arg (fun f : M →ₗ[R₂] R₂ => f v) (e.coord_unitsSMul w i) /-- A version of `unitsSMul` that uses `IsUnit`. -/ def isUnitSMul (v : Basis ι R M) {w : ι → R} (hw : ∀ i, IsUnit (w i)) : Basis ι R M := unitsSMul v fun i => (hw i).unit theorem isUnitSMul_apply {v : Basis ι R M} {w : ι → R} (hw : ∀ i, IsUnit (w i)) (i : ι) : v.isUnitSMul hw i = w i • v i := unitsSMul_apply i theorem repr_isUnitSMul {v : Basis ι R₂ M} {w : ι → R₂} (hw : ∀ i, IsUnit (w i)) (x : M) (i : ι) : (v.isUnitSMul hw).repr x i = (hw i).unit⁻¹ • v.repr x i := repr_unitsSMul _ _ _ _ end CommSemiring end Module.Basis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Fin.lean
import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.Pi /-! # Bases indexed by `Fin` -/ assert_not_exists Ordinal noncomputable section universe u open Function Set Submodule Finsupp variable {ι : Type*} {ι' : Type*} {R : Type*} {R₂ : Type*} {M : Type*} {M' : Type*} namespace Module open LinearMap variable {v : ι → M} variable [Ring R] [CommRing R₂] [AddCommGroup M] variable [Module R M] [Module R₂ M] variable {x y : M} variable (b : Basis ι R M) namespace Basis section Fin /-- Let `b` be a basis for a submodule `N` of `M`. If `y : M` is linear independent of `N` and `y` and `N` together span the whole of `M`, then there is a basis for `M` whose basis vectors are given by `Fin.cons y b`. -/ noncomputable def mkFinCons {n : ℕ} {N : Submodule R M} (y : M) (b : Basis (Fin n) R N) (hli : ∀ (c : R), ∀ x ∈ N, c • y + x = 0 → c = 0) (hsp : ∀ z : M, ∃ c : R, z + c • y ∈ N) : Basis (Fin (n + 1)) R M := have span_b : Submodule.span R (Set.range (N.subtype ∘ b)) = N := by rw [Set.range_comp, Submodule.span_image, b.span_eq, Submodule.map_subtype_top] Basis.mk (v := Fin.cons y (N.subtype ∘ b)) ((b.linearIndependent.map' N.subtype (Submodule.ker_subtype _)).fin_cons' _ _ (by rintro c ⟨x, hx⟩ hc rw [span_b] at hx exact hli c x hx hc)) fun x _ => by rw [Fin.range_cons, Submodule.mem_span_insert', span_b] exact hsp x @[simp] theorem coe_mkFinCons {n : ℕ} {N : Submodule R M} (y : M) (b : Basis (Fin n) R N) (hli : ∀ (c : R), ∀ x ∈ N, c • y + x = 0 → c = 0) (hsp : ∀ z : M, ∃ c : R, z + c • y ∈ N) : (mkFinCons y b hli hsp : Fin (n + 1) → M) = Fin.cons y ((↑) ∘ b) := by unfold mkFinCons exact coe_mk (v := Fin.cons y (N.subtype ∘ b)) _ _ /-- Let `b` be a basis for a submodule `N ≤ O`. If `y ∈ O` is linear independent of `N` and `y` and `N` together span the whole of `O`, then there is a basis for `O` whose basis vectors are given by `Fin.cons y b`. -/ noncomputable def mkFinConsOfLE {n : ℕ} {N O : Submodule R M} (y : M) (yO : y ∈ O) (b : Basis (Fin n) R N) (hNO : N ≤ O) (hli : ∀ (c : R), ∀ x ∈ N, c • y + x = 0 → c = 0) (hsp : ∀ z ∈ O, ∃ c : R, z + c • y ∈ N) : Basis (Fin (n + 1)) R O := mkFinCons ⟨y, yO⟩ (b.map (Submodule.comapSubtypeEquivOfLe hNO).symm) (fun c x hc hx => hli c x (Submodule.mem_comap.mp hc) (congr_arg ((↑) : O → M) hx)) fun z => hsp z z.2 @[simp] theorem coe_mkFinConsOfLE {n : ℕ} {N O : Submodule R M} (y : M) (yO : y ∈ O) (b : Basis (Fin n) R N) (hNO : N ≤ O) (hli : ∀ (c : R), ∀ x ∈ N, c • y + x = 0 → c = 0) (hsp : ∀ z ∈ O, ∃ c : R, z + c • y ∈ N) : (mkFinConsOfLE y yO b hNO hli hsp : Fin (n + 1) → O) = Fin.cons ⟨y, yO⟩ (Submodule.inclusion hNO ∘ b) := coe_mkFinCons _ _ _ _ /-- The basis of `R × R` given by the two vectors `(1, 0)` and `(0, 1)`. -/ protected def finTwoProd (R : Type*) [Semiring R] : Basis (Fin 2) R (R × R) := Basis.ofEquivFun (LinearEquiv.finTwoArrow R R).symm @[simp] theorem finTwoProd_zero (R : Type*) [Semiring R] : Basis.finTwoProd R 0 = (1, 0) := by simp [Basis.finTwoProd, LinearEquiv.finTwoArrow] @[simp] theorem finTwoProd_one (R : Type*) [Semiring R] : Basis.finTwoProd R 1 = (0, 1) := by simp [Basis.finTwoProd, LinearEquiv.finTwoArrow] @[simp] theorem coe_finTwoProd_repr {R : Type*} [Semiring R] (x : R × R) : ⇑((Basis.finTwoProd R).repr x) = ![x.fst, x.snd] := rfl end Fin end Basis end Module
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Bilinear.lean
import Mathlib.LinearAlgebra.BilinearMap import Mathlib.LinearAlgebra.Basis.Defs /-! # Lemmas about bilinear maps with a basis over each argument -/ open Module namespace LinearMap variable {ι₁ ι₂ : Type*} variable {R R₂ S S₂ M N P Rₗ : Type*} variable {Mₗ Nₗ Pₗ : Type*} -- Could weaken [CommSemiring Rₗ] to [SMulCommClass Rₗ Rₗ Pₗ], but might impact performance variable [Semiring R] [Semiring S] [Semiring R₂] [Semiring S₂] [CommSemiring Rₗ] section AddCommMonoid variable [AddCommMonoid M] [AddCommMonoid N] [AddCommMonoid P] variable [AddCommMonoid Mₗ] [AddCommMonoid Nₗ] [AddCommMonoid Pₗ] variable [Module R M] [Module S N] [Module R₂ P] [Module S₂ P] variable [Module Rₗ Mₗ] [Module Rₗ Nₗ] [Module Rₗ Pₗ] variable [SMulCommClass S₂ R₂ P] variable {ρ₁₂ : R →+* R₂} {σ₁₂ : S →+* S₂} variable (b₁ : Basis ι₁ R M) (b₂ : Basis ι₂ S N) (b₁' : Basis ι₁ Rₗ Mₗ) (b₂' : Basis ι₂ Rₗ Nₗ) /-- Two bilinear maps are equal when they are equal on all basis vectors. -/ theorem ext_basis {B B' : M →ₛₗ[ρ₁₂] N →ₛₗ[σ₁₂] P} (h : ∀ i j, B (b₁ i) (b₂ j) = B' (b₁ i) (b₂ j)) : B = B' := b₁.ext fun i => b₂.ext fun j => h i j /-- Write out `B x y` as a sum over `B (b i) (b j)` if `b` is a basis. Version for semi-bilinear maps, see `sum_repr_mul_repr_mul` for the bilinear version. -/ theorem sum_repr_mul_repr_mulₛₗ {B : M →ₛₗ[ρ₁₂] N →ₛₗ[σ₁₂] P} (x y) : ((b₁.repr x).sum fun i xi => (b₂.repr y).sum fun j yj => ρ₁₂ xi • σ₁₂ yj • B (b₁ i) (b₂ j)) = B x y := by conv_rhs => rw [← b₁.linearCombination_repr x, ← b₂.linearCombination_repr y] simp_rw [Finsupp.linearCombination_apply, Finsupp.sum, map_sum₂, map_sum, LinearMap.map_smulₛₗ₂, LinearMap.map_smulₛₗ] /-- Write out `B x y` as a sum over `B (b i) (b j)` if `b` is a basis. Version for bilinear maps, see `sum_repr_mul_repr_mulₛₗ` for the semi-bilinear version. -/ theorem sum_repr_mul_repr_mul {B : Mₗ →ₗ[Rₗ] Nₗ →ₗ[Rₗ] Pₗ} (x y) : ((b₁'.repr x).sum fun i xi => (b₂'.repr y).sum fun j yj => xi • yj • B (b₁' i) (b₂' j)) = B x y := by conv_rhs => rw [← b₁'.linearCombination_repr x, ← b₂'.linearCombination_repr y] simp_rw [Finsupp.linearCombination_apply, Finsupp.sum, map_sum₂, map_sum, LinearMap.map_smul₂, LinearMap.map_smul] end AddCommMonoid end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/MulOpposite.lean
import Mathlib.LinearAlgebra.FiniteDimensional.Defs /-! # Basis of an opposite space This file defines the basis of an opposite space and shows that the opposite space is finite-dimensional and free when the original space is. -/ open Module MulOpposite variable {R H : Type*} namespace Module.Basis variable {ι : Type*} [Semiring R] [AddCommMonoid H] [Module R H] /-- The multiplicative opposite of a basis: `b.mulOpposite i ↦ op (b i)`. -/ noncomputable def mulOpposite (b : Basis ι R H) : Basis ι R Hᵐᵒᵖ := b.map (opLinearEquiv R) @[simp] theorem mulOpposite_apply (b : Basis ι R H) (i : ι) : b.mulOpposite i = op (b i) := rfl theorem mulOpposite_repr_eq (b : Basis ι R H) : b.mulOpposite.repr = (opLinearEquiv R).symm.trans b.repr := rfl @[simp] theorem repr_unop_eq_mulOpposite_repr (b : Basis ι R H) (x : Hᵐᵒᵖ) : b.repr (unop x) = b.mulOpposite.repr x := rfl @[simp] theorem mulOpposite_repr_op (b : Basis ι R H) (x : H) : b.mulOpposite.repr (op x) = b.repr x := rfl end Module.Basis namespace MulOpposite instance [DivisionRing R] [AddCommGroup H] [Module R H] [FiniteDimensional R H] : FiniteDimensional R Hᵐᵒᵖ := FiniteDimensional.of_finite_basis (Basis.ofVectorSpace R H).mulOpposite (Basis.ofVectorSpaceIndex R H).toFinite instance [Semiring R] [AddCommMonoid H] [Module R H] [Module.Free R H] : Module.Free R Hᵐᵒᵖ := let ⟨b⟩ := Module.Free.exists_basis (R := R) (M := H) Module.Free.of_basis b.2.mulOpposite theorem rank [Semiring R] [StrongRankCondition R] [AddCommMonoid H] [Module R H] [Module.Free R H] : Module.rank R Hᵐᵒᵖ = Module.rank R H := LinearEquiv.nonempty_equiv_iff_rank_eq.mp ⟨(opLinearEquiv R).symm⟩ theorem finrank [DivisionRing R] [AddCommGroup H] [Module R H] : Module.finrank R Hᵐᵒᵖ = Module.finrank R H := by let b := Basis.ofVectorSpace R H rw [Module.finrank_eq_nat_card_basis b, Module.finrank_eq_nat_card_basis b.mulOpposite] end MulOpposite
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Flag.lean
import Mathlib.Data.Fin.FlagRange import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.Dual.Basis import Mathlib.RingTheory.SimpleRing.Basic /-! # Flag of submodules defined by a basis In this file we define `Basis.flag b k`, where `b : Basis (Fin n) R M`, `k : Fin (n + 1)`, to be the subspace spanned by the first `k` vectors of the basis `b`. We also prove some lemmas about this definition. -/ open Set Submodule namespace Module.Basis section Semiring variable {R M : Type*} [Semiring R] [AddCommMonoid M] [Module R M] {n : ℕ} {b : Basis (Fin n) R M} {i j : Fin (n + 1)} /-- The subspace spanned by the first `k` vectors of the basis `b`. -/ def flag (b : Basis (Fin n) R M) (k : Fin (n + 1)) : Submodule R M := .span R <| b '' {i | i.castSucc < k} @[simp] theorem flag_zero (b : Basis (Fin n) R M) : b.flag 0 = ⊥ := by simp [flag] @[simp] theorem flag_last (b : Basis (Fin n) R M) : b.flag (.last n) = ⊤ := by simp [flag] theorem flag_le_iff (b : Basis (Fin n) R M) {k p} : b.flag k ≤ p ↔ ∀ i : Fin n, i.castSucc < k → b i ∈ p := span_le.trans forall_mem_image theorem flag_succ (b : Basis (Fin n) R M) (k : Fin n) : b.flag k.succ = (R ∙ b k) ⊔ b.flag k.castSucc := by simp only [flag, Fin.castSucc_lt_castSucc_iff] simp [Fin.castSucc_lt_iff_succ_le, le_iff_eq_or_lt, setOf_or, image_insert_eq, span_insert] theorem self_mem_flag (b : Basis (Fin n) R M) {i : Fin n} {k : Fin (n + 1)} (h : i.castSucc < k) : b i ∈ b.flag k := subset_span <| mem_image_of_mem _ h @[simp] theorem self_mem_flag_iff [Nontrivial R] (b : Basis (Fin n) R M) {i : Fin n} {k : Fin (n + 1)} : b i ∈ b.flag k ↔ i.castSucc < k := b.self_mem_span_image @[gcongr, mono] theorem flag_mono (b : Basis (Fin n) R M) : Monotone b.flag := Fin.monotone_iff_le_succ.2 fun k ↦ by rw [flag_succ]; exact le_sup_right theorem isChain_range_flag (b : Basis (Fin n) R M) : IsChain (· ≤ ·) (range b.flag) := b.flag_mono.isChain_range @[gcongr, mono] theorem flag_strictMono [Nontrivial R] (b : Basis (Fin n) R M) : StrictMono b.flag := Fin.strictMono_iff_lt_succ.2 fun _ ↦ by simp [flag_succ] @[deprecated flag_mono (since := "2025-10-20")] lemma flag_le_flag (hij : i ≤ j) : b.flag i ≤ b.flag j := flag_mono _ hij @[deprecated flag_strictMono (since := "2025-10-20")] lemma flag_lt_flag [Nontrivial R] (hij : i < j) : b.flag i < b.flag j := flag_strictMono _ hij end Semiring section CommRing variable {R M : Type*} [CommRing R] [AddCommGroup M] [Module R M] {n : ℕ} @[simp] theorem flag_le_ker_coord_iff [Nontrivial R] (b : Basis (Fin n) R M) {k : Fin (n + 1)} {l : Fin n} : b.flag k ≤ LinearMap.ker (b.coord l) ↔ k ≤ l.castSucc := by simp [flag_le_iff, Finsupp.single_apply_eq_zero, imp_false, imp_not_comm] theorem flag_le_ker_coord (b : Basis (Fin n) R M) {k : Fin (n + 1)} {l : Fin n} (h : k ≤ l.castSucc) : b.flag k ≤ LinearMap.ker (b.coord l) := by nontriviality R exact b.flag_le_ker_coord_iff.2 h theorem flag_le_ker_dual (b : Basis (Fin n) R M) (k : Fin n) : b.flag k.castSucc ≤ LinearMap.ker (b.dualBasis k) := by nontriviality R rw [coe_dualBasis, b.flag_le_ker_coord_iff] end CommRing section DivisionRing variable {K V : Type*} [DivisionRing K] [AddCommGroup V] [Module K V] {n : ℕ} theorem flag_covBy (b : Basis (Fin n) K V) (i : Fin n) : b.flag i.castSucc ⋖ b.flag i.succ := by rw [flag_succ] apply covBy_span_singleton_sup simp theorem flag_wcovBy (b : Basis (Fin n) K V) (i : Fin n) : b.flag i.castSucc ⩿ b.flag i.succ := (b.flag_covBy i).wcovBy /-- Range of `Basis.flag` as a `Flag`. -/ @[simps!] def toFlag (b : Basis (Fin n) K V) : Flag (Submodule K V) := .rangeFin b.flag b.flag_zero b.flag_last b.flag_wcovBy @[simp] theorem mem_toFlag (b : Basis (Fin n) K V) {p : Submodule K V} : p ∈ b.toFlag ↔ ∃ k, b.flag k = p := Iff.rfl theorem isMaxChain_range_flag (b : Basis (Fin n) K V) : IsMaxChain (· ≤ ·) (range b.flag) := b.toFlag.maxChain end DivisionRing end Module.Basis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Prod.lean
import Mathlib.LinearAlgebra.Prod import Mathlib.LinearAlgebra.Basis.Defs import Mathlib.LinearAlgebra.Finsupp.SumProd import Mathlib.LinearAlgebra.FreeModule.Basic /-! # Bases for the product of modules -/ assert_not_exists Ordinal noncomputable section universe u open Function Set Submodule Finsupp variable {ι : Type*} {ι' : Type*} {R : Type*} {R₂ : Type*} {M : Type*} {M' : Type*} namespace Module.Basis variable [Semiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid M'] [Module R M'] (b : Basis ι R M) section Prod variable (b' : Basis ι' R M') /-- `Basis.prod` maps an `ι`-indexed basis for `M` and an `ι'`-indexed basis for `M'` to an `ι ⊕ ι'`-index basis for `M × M'`. For the specific case of `R × R`, see also `Basis.finTwoProd`. -/ protected def prod : Basis (ι ⊕ ι') R (M × M') := ofRepr ((b.repr.prodCongr b'.repr).trans (Finsupp.sumFinsuppLEquivProdFinsupp R).symm) @[simp] theorem prod_repr_inl (x) (i) : (b.prod b').repr x (Sum.inl i) = b.repr x.1 i := rfl @[simp] theorem prod_repr_inr (x) (i) : (b.prod b').repr x (Sum.inr i) = b'.repr x.2 i := rfl theorem prod_apply_inl_fst (i) : (b.prod b' (Sum.inl i)).1 = b i := b.repr.injective <| by ext j simp only [Basis.prod, Basis.coe_ofRepr, LinearEquiv.symm_trans_apply, LinearEquiv.prodCongr_symm, LinearEquiv.prodCongr_apply, b.repr.apply_symm_apply, LinearEquiv.symm_symm, repr_self, Finsupp.fst_sumFinsuppLEquivProdFinsupp] apply Finsupp.single_apply_left Sum.inl_injective theorem prod_apply_inr_fst (i) : (b.prod b' (Sum.inr i)).1 = 0 := b.repr.injective <| by ext i simp only [Basis.prod, Basis.coe_ofRepr, LinearEquiv.symm_trans_apply, LinearEquiv.prodCongr_symm, LinearEquiv.prodCongr_apply, b.repr.apply_symm_apply, LinearEquiv.symm_symm, Finsupp.fst_sumFinsuppLEquivProdFinsupp, LinearEquiv.map_zero, Finsupp.zero_apply] apply Finsupp.single_eq_of_ne Sum.inl_ne_inr theorem prod_apply_inl_snd (i) : (b.prod b' (Sum.inl i)).2 = 0 := b'.repr.injective <| by ext j simp only [Basis.prod, Basis.coe_ofRepr, LinearEquiv.symm_trans_apply, LinearEquiv.prodCongr_symm, LinearEquiv.prodCongr_apply, b'.repr.apply_symm_apply, LinearEquiv.symm_symm, Finsupp.snd_sumFinsuppLEquivProdFinsupp, LinearEquiv.map_zero, Finsupp.zero_apply] apply Finsupp.single_eq_of_ne Sum.inr_ne_inl theorem prod_apply_inr_snd (i) : (b.prod b' (Sum.inr i)).2 = b' i := b'.repr.injective <| by ext i simp only [Basis.prod, Basis.coe_ofRepr, LinearEquiv.symm_trans_apply, LinearEquiv.prodCongr_symm, LinearEquiv.prodCongr_apply, b'.repr.apply_symm_apply, LinearEquiv.symm_symm, repr_self, Finsupp.snd_sumFinsuppLEquivProdFinsupp] apply Finsupp.single_apply_left Sum.inr_injective @[simp] theorem prod_apply (i) : b.prod b' i = Sum.elim (LinearMap.inl R M M' ∘ b) (LinearMap.inr R M M' ∘ b') i := by ext <;> cases i <;> simp only [prod_apply_inl_fst, Sum.elim_inl, LinearMap.inl_apply, prod_apply_inr_fst, Sum.elim_inr, LinearMap.inr_apply, prod_apply_inl_snd, prod_apply_inr_snd, Function.comp] end Prod end Basis namespace Free variable (R M N : Type*) [Semiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid N] [Module R N] instance prod [Module.Free R M] [Module.Free R N] : Module.Free R (M × N) := .of_basis <| (Module.Free.chooseBasis R M).prod (Module.Free.chooseBasis R N) end Free end Module
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/VectorSpace.lean
import Mathlib.LinearAlgebra.FreeModule.Basic import Mathlib.LinearAlgebra.LinearIndependent.Lemmas import Mathlib.LinearAlgebra.LinearPMap import Mathlib.LinearAlgebra.Projection /-! # Bases in a vector space This file provides results for bases of a vector space. Some of these results should be merged with the results on free modules. We state these results in a separate file to the results on modules to avoid an import cycle. ## Main statements * `Basis.ofVectorSpace` states that every vector space has a basis. * `Module.Free.of_divisionRing` states that every vector space is a free module. ## Tags basis, bases -/ open Function Module Set Submodule variable {ι : Type*} {ι' : Type*} {K : Type*} {V : Type*} {V' : Type*} section DivisionRing variable [DivisionRing K] [AddCommGroup V] [AddCommGroup V'] [Module K V] [Module K V'] variable {v : ι → V} {s t : Set V} {x y z : V} open Submodule namespace Module.Basis section ExistsBasis /-- If `s` is a linear independent set of vectors, we can extend it to a basis. -/ noncomputable def extend (hs : LinearIndepOn K id s) : Basis (hs.extend (subset_univ s)) K V := Basis.mk (hs.linearIndepOn_extend _).linearIndependent_restrict (SetLike.coe_subset_coe.mp <| by simpa using hs.subset_span_extend (subset_univ s)) theorem extend_apply_self (hs : LinearIndepOn K id s) (x : hs.extend _) : Basis.extend hs x = x := Basis.mk_apply _ _ _ @[simp] theorem coe_extend (hs : LinearIndepOn K id s) : ⇑(Basis.extend hs) = ((↑) : _ → _) := funext (extend_apply_self hs) theorem range_extend (hs : LinearIndepOn K id s) : range (Basis.extend hs) = hs.extend (subset_univ _) := by rw [coe_extend, Subtype.range_coe_subtype, setOf_mem_eq] /-- Auxiliary definition: the index for the new basis vectors in `Basis.sumExtend`. The specific value of this definition should be considered an implementation detail. -/ def sumExtendIndex (hs : LinearIndependent K v) : Set V := LinearIndepOn.extend hs.linearIndepOn_id (subset_univ _) \ range v /-- If `v` is a linear independent family of vectors, extend it to a basis indexed by a sum type. -/ noncomputable def sumExtend (hs : LinearIndependent K v) : Basis (ι ⊕ sumExtendIndex hs) K V := let s := Set.range v let e : ι ≃ s := Equiv.ofInjective v hs.injective let b := hs.linearIndepOn_id.extend (subset_univ (Set.range v)) (Basis.extend hs.linearIndepOn_id).reindex <| Equiv.symm <| calc ι ⊕ (b \ s : Set V) ≃ s ⊕ (b \ s : Set V) := Equiv.sumCongr e (Equiv.refl _) _ ≃ b := haveI := Classical.decPred (· ∈ s) Equiv.Set.sumDiffSubset (hs.linearIndepOn_id.subset_extend _) theorem subset_extend {s : Set V} (hs : LinearIndepOn K id s) : s ⊆ hs.extend (Set.subset_univ _) := hs.subset_extend _ /-- If `s` is a family of linearly independent vectors contained in a set `t` spanning `V`, then one can get a basis of `V` containing `s` and contained in `t`. -/ noncomputable def extendLe (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) : Basis (hs.extend hst) K V := Basis.mk ((hs.linearIndepOn_extend _).linearIndependent ..) (le_trans ht <| Submodule.span_le.2 <| by simpa using hs.subset_span_extend hst) theorem extendLe_apply_self (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) (x : hs.extend hst) : Basis.extendLe hs hst ht x = x := Basis.mk_apply _ _ _ @[simp] theorem coe_extendLe (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) : ⇑(Basis.extendLe hs hst ht) = ((↑) : _ → _) := funext (extendLe_apply_self hs hst ht) theorem range_extendLe (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) : range (Basis.extendLe hs hst ht) = hs.extend hst := by rw [coe_extendLe, Subtype.range_coe_subtype, setOf_mem_eq] theorem subset_extendLe (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) : s ⊆ range (Basis.extendLe hs hst ht) := (range_extendLe hs hst ht).symm ▸ hs.subset_extend hst theorem extendLe_subset (hs : LinearIndepOn K id s) (hst : s ⊆ t) (ht : ⊤ ≤ span K t) : range (Basis.extendLe hs hst ht) ⊆ t := (range_extendLe hs hst ht).symm ▸ hs.extend_subset hst /-- If a set `s` spans the space, this is a basis contained in `s`. -/ noncomputable def ofSpan (hs : ⊤ ≤ span K s) : Basis ((linearIndepOn_empty K id).extend (empty_subset s)) K V := extendLe (linearIndependent_empty K V) (empty_subset s) hs theorem ofSpan_apply_self (hs : ⊤ ≤ span K s) (x : (linearIndepOn_empty K id).extend (empty_subset s)) : Basis.ofSpan hs x = x := extendLe_apply_self (linearIndependent_empty K V) (empty_subset s) hs x @[simp] theorem coe_ofSpan (hs : ⊤ ≤ span K s) : ⇑(ofSpan hs) = ((↑) : _ → _) := funext (ofSpan_apply_self hs) theorem range_ofSpan (hs : ⊤ ≤ span K s) : range (ofSpan hs) = (linearIndepOn_empty K id).extend (empty_subset s) := by rw [coe_ofSpan, Subtype.range_coe_subtype, setOf_mem_eq] theorem ofSpan_subset (hs : ⊤ ≤ span K s) : range (ofSpan hs) ⊆ s := extendLe_subset (linearIndependent_empty K V) (empty_subset s) hs section variable (K V) /-- A set used to index `Basis.ofVectorSpace`. -/ noncomputable def ofVectorSpaceIndex : Set V := (linearIndepOn_empty K id).extend (subset_univ _) /-- Each vector space has a basis. -/ noncomputable def ofVectorSpace : Basis (ofVectorSpaceIndex K V) K V := Basis.extend (linearIndependent_empty K V) @[stacks 09FN "Generalized from fields to division rings."] instance (priority := 100) _root_.Module.Free.of_divisionRing : Module.Free K V := Module.Free.of_basis (ofVectorSpace K V) theorem ofVectorSpace_apply_self (x : ofVectorSpaceIndex K V) : ofVectorSpace K V x = x := by unfold ofVectorSpace exact Basis.mk_apply _ _ _ @[simp] theorem coe_ofVectorSpace : ⇑(ofVectorSpace K V) = ((↑) : _ → _) := funext fun x => ofVectorSpace_apply_self K V x theorem ofVectorSpaceIndex.linearIndependent : LinearIndependent K ((↑) : ofVectorSpaceIndex K V → V) := by convert (ofVectorSpace K V).linearIndependent ext x rw [ofVectorSpace_apply_self] theorem range_ofVectorSpace : range (ofVectorSpace K V) = ofVectorSpaceIndex K V := range_extend _ theorem exists_basis : ∃ s : Set V, Nonempty (Basis s K V) := ⟨ofVectorSpaceIndex K V, ⟨ofVectorSpace K V⟩⟩ end end ExistsBasis end Module.Basis open Fintype variable (K V) theorem VectorSpace.card_fintype [Fintype K] [Fintype V] : ∃ n : ℕ, card V = card K ^ n := by classical exact ⟨card (Basis.ofVectorSpaceIndex K V), Module.card_fintype (Basis.ofVectorSpace K V)⟩ section AtomsOfSubmoduleLattice variable {K V} /-- For a module over a division ring, the span of a nonzero element is an atom of the lattice of submodules. -/ theorem nonzero_span_atom (v : V) (hv : v ≠ 0) : IsAtom (span K {v} : Submodule K V) := by constructor · rw [Submodule.ne_bot_iff] exact ⟨v, ⟨mem_span_singleton_self v, hv⟩⟩ · intro T hT by_contra h apply hT.2 change span K {v} ≤ T simp_rw [span_singleton_le_iff_mem, ← Ne.eq_def, Submodule.ne_bot_iff] at * rcases h with ⟨s, ⟨hs, hz⟩⟩ rcases mem_span_singleton.1 (hT.1 hs) with ⟨a, rfl⟩ rcases eq_or_ne a 0 with rfl | h · simp only [zero_smul, ne_eq, not_true] at hz · rwa [T.smul_mem_iff h] at hs /-- The atoms of the lattice of submodules of a module over a division ring are the submodules equal to the span of a nonzero element of the module. -/ theorem atom_iff_nonzero_span (W : Submodule K V) : IsAtom W ↔ ∃ v ≠ 0, W = span K {v} := by refine ⟨fun h => ?_, fun h => ?_⟩ · obtain ⟨hbot, h⟩ := h rcases (Submodule.ne_bot_iff W).1 hbot with ⟨v, ⟨hW, hv⟩⟩ refine ⟨v, ⟨hv, ?_⟩⟩ by_contra heq specialize h (span K {v}) rw [span_singleton_eq_bot, lt_iff_le_and_ne] at h exact hv (h ⟨(span_singleton_le_iff_mem v W).2 hW, Ne.symm heq⟩) · rcases h with ⟨v, ⟨hv, rfl⟩⟩ exact nonzero_span_atom v hv /-- The lattice of submodules of a module over a division ring is atomistic. -/ instance : IsAtomistic (Submodule K V) := CompleteLattice.isAtomistic_iff.2 fun W => by refine ⟨_, submodule_eq_sSup_le_nonzero_spans W, ?_⟩ rintro _ ⟨w, ⟨_, ⟨hw, rfl⟩⟩⟩ exact nonzero_span_atom w hw end AtomsOfSubmoduleLattice variable {K V} theorem LinearMap.exists_leftInverse_of_injective (f : V →ₗ[K] V') (hf_inj : LinearMap.ker f = ⊥) : ∃ g : V' →ₗ[K] V, g.comp f = LinearMap.id := by let B := Basis.ofVectorSpaceIndex K V let hB := Basis.ofVectorSpace K V have hB₀ : _ := hB.linearIndependent.linearIndepOn_id have : LinearIndepOn K _root_.id (f '' B) := by have h₁ : LinearIndepOn K _root_.id (f '' Set.range (Basis.ofVectorSpace K V)) := LinearIndepOn.image (f := f) hB₀ (show Disjoint _ _ by simp [hf_inj]) rwa [Basis.range_ofVectorSpace K V] at h₁ let C := this.extend (subset_univ _) have BC := this.subset_extend (subset_univ _) let hC := Basis.extend this haveI Vinh : Inhabited V := ⟨0⟩ refine ⟨(hC.constr ℕ : _ → _) (C.restrict (invFun f)), hB.ext fun b => ?_⟩ rw [image_subset_iff] at BC have fb_eq : f b = hC ⟨f b, BC b.2⟩ := by change f b = Basis.extend this _ simp_rw [Basis.extend_apply_self] dsimp rw [Basis.ofVectorSpace_apply_self, fb_eq, hC.constr_basis] exact leftInverse_invFun (LinearMap.ker_eq_bot.1 hf_inj) _ theorem Submodule.exists_isCompl (p : Submodule K V) : ∃ q : Submodule K V, IsCompl p q := let ⟨f, hf⟩ := p.subtype.exists_leftInverse_of_injective p.ker_subtype ⟨LinearMap.ker f, LinearMap.isCompl_of_proj <| LinearMap.ext_iff.1 hf⟩ instance Submodule.complementedLattice : ComplementedLattice (Submodule K V) := ⟨Submodule.exists_isCompl⟩ /-- Any linear map `f : p →ₗ[K] V'` defined on a subspace `p` can be extended to the whole space. -/ theorem LinearMap.exists_extend {p : Submodule K V} (f : p →ₗ[K] V') : ∃ g : V →ₗ[K] V', g.comp p.subtype = f := let ⟨g, hg⟩ := p.subtype.exists_leftInverse_of_injective p.ker_subtype ⟨f.comp g, by rw [LinearMap.comp_assoc, hg, f.comp_id]⟩ theorem LinearMap.exists_extend_of_notMem {p : Submodule K V} {v : V} (f : p →ₗ[K] V') (hv : v ∉ p) (y : V') : ∃ g : V →ₗ[K] V', g.comp p.subtype = f ∧ g v = y := by rcases (LinearPMap.supSpanSingleton ⟨p, f⟩ v y hv).toFun.exists_extend with ⟨g, hg⟩ refine ⟨g, ?_, ?_⟩ · ext x have := LinearPMap.supSpanSingleton_apply_mk_of_mem ⟨p, f⟩ y hv x.2 simpa using congr($hg _).trans this · have := LinearPMap.supSpanSingleton_apply_self ⟨p, f⟩ y hv simpa using congr($hg _).trans this @[deprecated (since := "2025-05-23")] alias LinearMap.exists_extend_of_not_mem := LinearMap.exists_extend_of_notMem open Submodule LinearMap theorem Submodule.exists_le_ker_of_notMem {p : Submodule K V} {v : V} (hv : v ∉ p) : ∃ f : V →ₗ[K] K, f v ≠ 0 ∧ p ≤ ker f := by rcases LinearMap.exists_extend_of_notMem (0 : p →ₗ[K] K) hv 1 with ⟨f, hpf, hfv⟩ refine ⟨f, by simp [hfv], fun x hx ↦ ?_⟩ simpa using congr($hpf ⟨x, hx⟩) /-- If `V` and `V'` are nontrivial vector spaces over a field `K`, the space of `K`-linear maps between them is nontrivial. -/ instance [Nontrivial V] [Nontrivial V'] : Nontrivial (V →ₗ[K] V') := by obtain ⟨v, hv⟩ := exists_ne (0 : V) obtain ⟨w, hw⟩ := exists_ne (0 : V') have : v ∉ (⊥ : Submodule K V) := by simp only [mem_bot, hv, not_false_eq_true] obtain ⟨g, _, hg⟩ := LinearMap.exists_extend_of_notMem (K := K) 0 this w exact ⟨g, 0, DFunLike.ne_iff.mpr ⟨v, by simp_all⟩⟩ @[deprecated (since := "2025-05-23")] alias Submodule.exists_le_ker_of_not_mem := Submodule.exists_le_ker_of_notMem /-- If `p < ⊤` is a subspace of a vector space `V`, then there exists a nonzero linear map `f : V →ₗ[K] K` such that `p ≤ ker f`. -/ theorem Submodule.exists_le_ker_of_lt_top (p : Submodule K V) (hp : p < ⊤) : ∃ (f : V →ₗ[K] K), f ≠ 0 ∧ p ≤ ker f := by rcases SetLike.exists_of_lt hp with ⟨v, -, hpv⟩ rcases exists_le_ker_of_notMem hpv with ⟨f, hfv, hpf⟩ exact ⟨f, ne_of_apply_ne (· v) hfv, hpf⟩ theorem quotient_prod_linearEquiv (p : Submodule K V) : Nonempty (((V ⧸ p) × p) ≃ₗ[K] V) := let ⟨q, hq⟩ := p.exists_isCompl Nonempty.intro <| ((quotientEquivOfIsCompl p q hq).prodCongr (LinearEquiv.refl _ _)).trans (prodEquivOfIsCompl q p hq.symm) end DivisionRing
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Basic.lean
import Mathlib.LinearAlgebra.Basis.Defs import Mathlib.LinearAlgebra.LinearIndependent.Basic import Mathlib.LinearAlgebra.Span.Basic /-! # Basic results on bases The main goal of this file is to show the equivalence between bases and families of vectors that are linearly independent and whose span is the whole space. There are also various lemmas on bases on specific spaces (such as empty or singletons). ## Main results * `Basis.linearIndependent`: the basis vectors are linear independent. * `Basis.span_eq`: the basis vectors span the whole space. * `Basis.mk`: construct a basis out of `v : ι → M` such that `LinearIndependent v` and `span (range v) = ⊤`. -/ assert_not_exists Ordinal noncomputable section universe u open Function Set Submodule Finsupp variable {ι : Type*} {ι' : Type*} {R : Type*} {R₂ : Type*} {M : Type*} {M' : Type*} namespace Module.Basis variable [Semiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid M'] [Module R M'] (b : Basis ι R M) section Properties theorem repr_range : LinearMap.range (b.repr : M →ₗ[R] ι →₀ R) = Finsupp.supported R R univ := by rw [LinearEquiv.range, Finsupp.supported_univ] theorem mem_span_repr_support (m : M) : m ∈ span R (b '' (b.repr m).support) := (Finsupp.mem_span_image_iff_linearCombination _).2 ⟨b.repr m, by simp [Finsupp.mem_supported_support]⟩ theorem repr_support_subset_of_mem_span (s : Set ι) {m : M} (hm : m ∈ span R (b '' s)) : ↑(b.repr m).support ⊆ s := by rcases (Finsupp.mem_span_image_iff_linearCombination _).1 hm with ⟨l, hl, rfl⟩ rwa [repr_linearCombination, ← Finsupp.mem_supported R l] theorem mem_span_image {m : M} {s : Set ι} : m ∈ span R (b '' s) ↔ ↑(b.repr m).support ⊆ s := ⟨repr_support_subset_of_mem_span _ _, fun h ↦ span_mono (Set.image_mono h) (mem_span_repr_support b _)⟩ @[simp] theorem self_mem_span_image [Nontrivial R] {i : ι} {s : Set ι} : b i ∈ span R (b '' s) ↔ i ∈ s := by simp [mem_span_image, Finsupp.support_single_ne_zero] protected theorem mem_span (x : M) : x ∈ span R (range b) := span_mono (image_subset_range _ _) (mem_span_repr_support b x) @[simp] protected theorem span_eq : span R (range b) = ⊤ := eq_top_iff.mpr fun x _ => b.mem_span x theorem _root_.Submodule.eq_top_iff_forall_basis_mem {p : Submodule R M} : p = ⊤ ↔ ∀ i, b i ∈ p := by refine ⟨fun h ↦ by simp [h], fun h ↦ ?_⟩ replace h : range b ⊆ p := by rintro - ⟨i, rfl⟩; exact h i simpa using span_mono (R := R) h theorem index_nonempty (b : Basis ι R M) [Nontrivial M] : Nonempty ι := by obtain ⟨x, y, ne⟩ : ∃ x y : M, x ≠ y := Nontrivial.exists_pair_ne obtain ⟨i, _⟩ := not_forall.mp (mt b.ext_elem_iff.2 ne) exact ⟨i⟩ protected theorem linearIndependent : LinearIndependent R b := fun x y hxy => by rw [← b.repr_linearCombination x, hxy, b.repr_linearCombination y] protected theorem ne_zero [Nontrivial R] (i) : b i ≠ 0 := b.linearIndependent.ne_zero i end Properties variable {v : ι → M} {x y : M} section Mk variable (hli : LinearIndependent R v) (hsp : ⊤ ≤ span R (range v)) /-- A linear independent family of vectors spanning the whole module is a basis. -/ protected noncomputable def mk : Basis ι R M := .ofRepr { hli.repr.comp (LinearMap.id.codRestrict _ fun _ => hsp Submodule.mem_top) with invFun := Finsupp.linearCombination _ v left_inv := fun x => hli.linearCombination_repr ⟨x, _⟩ right_inv := fun _ => hli.repr_eq rfl } @[simp] theorem mk_repr : (Basis.mk hli hsp).repr x = hli.repr ⟨x, hsp Submodule.mem_top⟩ := rfl theorem mk_apply (i : ι) : Basis.mk hli hsp i = v i := show Finsupp.linearCombination _ v _ = v i by simp @[simp] theorem coe_mk : ⇑(Basis.mk hli hsp) = v := funext (mk_apply _ _) end Mk section Coord variable (hli : LinearIndependent R v) (hsp : ⊤ ≤ span R (range v)) variable {hli hsp} /-- Given a basis, the `i`th element of the dual basis evaluates to 1 on the `i`th element of the basis. -/ theorem mk_coord_apply_eq (i : ι) : (Basis.mk hli hsp).coord i (v i) = 1 := show hli.repr ⟨v i, Submodule.subset_span (mem_range_self i)⟩ i = 1 by simp [hli.repr_eq_single i] /-- Given a basis, the `i`th element of the dual basis evaluates to 0 on the `j`th element of the basis if `j ≠ i`. -/ theorem mk_coord_apply_ne {i j : ι} (h : j ≠ i) : (Basis.mk hli hsp).coord i (v j) = 0 := show hli.repr ⟨v j, Submodule.subset_span (mem_range_self j)⟩ i = 0 by simp [hli.repr_eq_single j, h] /-- Given a basis, the `i`th element of the dual basis evaluates to the Kronecker delta on the `j`th element of the basis. -/ theorem mk_coord_apply [DecidableEq ι] {i j : ι} : (Basis.mk hli hsp).coord i (v j) = if j = i then 1 else 0 := by rcases eq_or_ne j i with h | h · simp only [h, if_true, mk_coord_apply_eq i] · simp only [h, if_false, mk_coord_apply_ne h] end Coord section Span variable (hli : LinearIndependent R v) /-- A linear independent family of vectors is a basis for their span. -/ protected noncomputable def span : Basis ι R (span R (range v)) := Basis.mk (linearIndependent_span hli) <| by intro x _ have : ∀ i, v i ∈ span R (range v) := fun i ↦ subset_span (Set.mem_range_self _) have h₁ : (((↑) : span R (range v) → M) '' range fun i => ⟨v i, this i⟩) = range v := by simp only [← Set.range_comp] rfl have h₂ : map (Submodule.subtype (span R (range v))) (span R (range fun i => ⟨v i, this i⟩)) = span R (range v) := by rw [← span_image, Submodule.coe_subtype, h₁] have h₃ : (x : M) ∈ map (Submodule.subtype (span R (range v))) (span R (Set.range fun i => Subtype.mk (v i) (this i))) := by rw [h₂] apply Subtype.mem x rcases mem_map.1 h₃ with ⟨y, hy₁, hy₂⟩ have h_x_eq_y : x = y := by rw [Subtype.ext_iff, ← hy₂] simp rwa [h_x_eq_y] protected theorem span_apply (i : ι) : (Basis.span hli i : M) = v i := congr_arg ((↑) : span R (range v) → M) <| Basis.mk_apply _ _ _ end Span /-- Any basis is a maximal linear independent set. -/ theorem maximal [Nontrivial R] (b : Basis ι R M) : b.linearIndependent.Maximal := fun w hi h => by -- If `w` is strictly bigger than `range b`, apply le_antisymm h -- then choose some `x ∈ w \ range b`, intro x p by_contra q -- and write it in terms of the basis. have e := b.linearCombination_repr x -- This then expresses `x` as a linear combination -- of elements of `w` which are in the range of `b`, let u : ι ↪ w := ⟨fun i => ⟨b i, h ⟨i, rfl⟩⟩, fun i i' r => b.injective (by simpa only [Subtype.mk_eq_mk] using r)⟩ simp_rw [Finsupp.linearCombination_apply] at e change ((b.repr x).sum fun (i : ι) (a : R) ↦ a • (u i : M)) = ((⟨x, p⟩ : w) : M) at e rw [← Finsupp.sum_embDomain (f := u) (g := fun x r ↦ r • (x : M)), ← Finsupp.linearCombination_apply] at e -- Now we can contradict the linear independence of `hi` refine hi.linearCombination_ne_of_notMem_support _ ?_ e simp only [Finset.mem_map, Finsupp.support_embDomain] rintro ⟨j, -, W⟩ simp only [u, Embedding.coeFn_mk, Subtype.mk_eq_mk] at W apply q ⟨j, W⟩ instance uniqueBasis [Subsingleton R] : Unique (Basis ι R M) := ⟨⟨⟨default⟩⟩, fun ⟨b⟩ => by rw [Subsingleton.elim b]⟩ variable (b : Basis ι R M) section Singleton /-- `Basis.singleton ι R` is the basis sending the unique element of `ι` to `1 : R`. -/ protected def singleton (ι R : Type*) [Unique ι] [Semiring R] : Basis ι R R := ofRepr { toFun := fun x => Finsupp.single default x invFun := fun f => f default left_inv := fun x => by simp right_inv := fun f => Finsupp.unique_ext (by simp) map_add' := fun x y => by simp map_smul' := fun c x => by simp } @[simp] theorem singleton_apply (ι R : Type*) [Unique ι] [Semiring R] (i) : Basis.singleton ι R i = 1 := apply_eq_iff.mpr (by simp [Basis.singleton]) @[simp] theorem singleton_repr (ι R : Type*) [Unique ι] [Semiring R] (x i) : (Basis.singleton ι R).repr x i = x := by simp [Basis.singleton, Unique.eq_default i] @[simp] theorem coe_singleton {ι R : Type*} [Unique ι] [Semiring R] : ⇑(Basis.singleton ι R) = 1 := by ext; simp end Singleton section Empty variable (M) /-- If `M` is a subsingleton and `ι` is empty, this is the unique `ι`-indexed basis for `M`. -/ protected def empty [Subsingleton M] [IsEmpty ι] : Basis ι R M := ofRepr 0 instance emptyUnique [Subsingleton M] [IsEmpty ι] : Unique (Basis ι R M) where default := Basis.empty M uniq := fun _ => congr_arg ofRepr <| Subsingleton.elim _ _ end Empty section NoZeroSMulDivisors -- Can't be an instance because the basis can't be inferred. protected theorem noZeroSMulDivisors [NoZeroDivisors R] (b : Basis ι R M) : NoZeroSMulDivisors R M := ⟨fun {c x} hcx => by exact or_iff_not_imp_right.mpr fun hx => by rw [← b.linearCombination_repr x, ← LinearMap.map_smul, ← map_zero (linearCombination R b)] at hcx have := b.linearIndependent hcx rw [smul_eq_zero] at this exact this.resolve_right fun hr => hx (b.repr.map_eq_zero_iff.mp hr)⟩ protected theorem smul_eq_zero [NoZeroDivisors R] (b : Basis ι R M) {c : R} {x : M} : c • x = 0 ↔ c = 0 ∨ x = 0 := @smul_eq_zero _ _ _ _ _ b.noZeroSMulDivisors _ _ end NoZeroSMulDivisors section Singleton theorem basis_singleton_iff {R M : Type*} [Ring R] [Nontrivial R] [AddCommGroup M] [Module R M] [NoZeroSMulDivisors R M] (ι : Type*) [Unique ι] : Nonempty (Basis ι R M) ↔ ∃ x ≠ 0, ∀ y : M, ∃ r : R, r • x = y := by constructor · rintro ⟨b⟩ refine ⟨b default, b.linearIndependent.ne_zero _, ?_⟩ simpa [span_singleton_eq_top_iff, Set.range_unique] using b.span_eq · rintro ⟨x, nz, w⟩ refine ⟨ofRepr <| LinearEquiv.symm { toFun := fun f => f default • x invFun := fun y => Finsupp.single default (w y).choose left_inv := fun f => Finsupp.unique_ext ?_ right_inv := fun y => ?_ map_add' := fun y z => ?_ map_smul' := fun c y => ?_ }⟩ · simp [Finsupp.add_apply, add_smul] · simp only [Finsupp.coe_smul, Pi.smul_apply, RingHom.id_apply] rw [← smul_assoc] · refine smul_left_injective _ nz ?_ simp only [Finsupp.single_eq_same] exact (w (f default • x)).choose_spec · simp only [Finsupp.single_eq_same] exact (w y).choose_spec end Singleton end Module.Basis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Cardinality.lean
import Mathlib.LinearAlgebra.Basis.Defs import Mathlib.LinearAlgebra.LinearIndependent.Defs import Mathlib.LinearAlgebra.Span.Basic import Mathlib.SetTheory.Cardinal.Pigeonhole /-! # Results relating bases and cardinality. -/ section Finite open Module Basis Cardinal Set Submodule Finsupp universe u v w w' variable {R : Type u} {M : Type v} section Semiring variable [Semiring R] [AddCommMonoid M] [Module R M] lemma finite_of_span_finite_eq_top_finsupp [Nontrivial M] {ι : Type*} {s : Set (ι →₀ M)} (hs : s.Finite) (hsspan : span R s = ⊤) : Finite ι := suffices ⋃ i ∈ s, i.support = .univ from .of_finite_univ (this ▸ hs.biUnion fun _ _ ↦ by simp) have ⟨x, hx⟩ := exists_ne (0 : M) eq_univ_of_forall fun j ↦ (top_unique (hsspan.ge.trans (span_le_supported_biUnion_support R s)) ▸ mem_top (x := single j x)) ((mem_support_single ..).mpr ⟨rfl, hx⟩) -- One might hope that a finite spanning set implies that any linearly independent set is finite. -- While this is true over a division ring -- (simply because any linearly independent set can be extended to a basis), -- or more generally over a ring satisfying the strong rank condition -- (which covers all commutative rings; see `LinearIndependent.finite_of_le_span_finite`), -- this is not true in general. -- For example, the left ideal generated by the variables in a noncommutative polynomial ring -- (`FreeAlgebra R ι`) in infinitely many variables (indexed by `ι`) is free -- with an infinite basis (consisting of the variables). -- As another example, for any commutative ring R, the ring of column-finite matrices -- `Module.End R (ℕ →₀ R)` is isomorphic to `ℕ → Module.End R (ℕ →₀ R)` as a module over itself, -- which also clearly contains an infinite linearly independent set. /-- Over any nontrivial ring, the existence of a finite spanning set implies that any basis is finite. -/ lemma basis_finite_of_finite_spans [Nontrivial R] {s : Set M} (hs : s.Finite) (hsspan : span R s = ⊤) {ι : Type w} (b : Basis ι R M) : Finite ι := by have := congr(($hsspan).map b.repr) rw [← span_image, Submodule.map_top, LinearEquivClass.range] at this exact finite_of_span_finite_eq_top_finsupp (hs.image _) this end Semiring section Ring variable [Semiring R] [AddCommMonoid M] [Nontrivial R] [Module R M] -- From [Les familles libres maximales d'un module ont-elles le meme cardinal?][lazarus1973] /-- Over any ring `R`, if `b` is a basis for a module `M`, and `s` is a maximal linearly independent set, then the union of the supports of `x ∈ s` (when written out in the basis `b`) is all of `b`. -/ theorem union_support_maximal_linearIndependent_eq_range_basis {ι : Type w} (b : Basis ι R M) {κ : Type w'} (v : κ → M) (ind : LinearIndependent R v) (m : ind.Maximal) : ⋃ k, ((b.repr (v k)).support : Set ι) = Set.univ := by -- If that's not the case, by_contra h simp only [← Ne.eq_def, ne_univ_iff_exists_notMem, mem_iUnion, not_exists_not, Finsupp.mem_support_iff, Finset.mem_coe] at h -- We have some basis element `b i` which is not in the support of any of the `v k`. obtain ⟨i, w⟩ := h have repr_eq_zero (l) : b.repr (linearCombination R v l) i = 0 := by simp [linearCombination_apply, Finsupp.sum, w] -- Using this, we'll construct a linearly independent family strictly larger than `v`, -- by also using this `b i`. let v' (o : Option κ) : M := o.elim (b i) v have r : range v ⊆ range v' := by rintro - ⟨k, rfl⟩; exact ⟨some k, rfl⟩ have r' : b i ∉ range v := fun ⟨k, p⟩ ↦ by simpa [w] using congr(b.repr $p i) have r'' : range v ≠ range v' := (r' <| · ▸ ⟨none, rfl⟩) -- The key step in the proof is checking that this strictly larger family is linearly independent. have i' : LinearIndepOn R id (range v') := by apply LinearIndependent.linearIndepOn_id rw [linearIndependent_iffₛ] intro l l' z simp_rw [linearCombination_option, v', Option.elim] at z change _ + linearCombination R v l.some = _ + linearCombination R v l'.some at z -- We have some equality between linear combinations of `b i` and the `v k`, -- and want to show the coefficients are equal. ext (_ | a) -- We'll first show the coefficient of `b i` is zero, -- by expressing the `v k` in the basis `b`, and using that the `v k` have no `b i` term. · simpa [repr_eq_zero] using congr(b.repr $z i) -- All the other coefficients are also equal, because `v` is linear independent, -- by comparing the coefficients in the basis `b`. have l₁ : l.some = l'.some := ind <| b.repr.injective <| ext fun j ↦ by obtain rfl | ne := eq_or_ne i j · simp_rw [repr_eq_zero] classical simpa [single_apply, ne] using congr(b.repr $z j) exact DFunLike.congr_fun l₁ a exact r'' (m (range v') i' r) /-- Over any ring `R`, if `b` is an infinite basis for a module `M`, and `s` is a maximal linearly independent set, then the cardinality of `b` is bounded by the cardinality of `s`. -/ theorem infinite_basis_le_maximal_linearIndependent' {ι : Type w} (b : Basis ι R M) [Infinite ι] {κ : Type w'} (v : κ → M) (i : LinearIndependent R v) (m : i.Maximal) : Cardinal.lift.{w'} #ι ≤ Cardinal.lift.{w} #κ := by let Φ := fun k : κ => (b.repr (v k)).support have w₁ : #ι ≤ #(Set.range Φ) := by apply Cardinal.le_range_of_union_finset_eq_top exact union_support_maximal_linearIndependent_eq_range_basis b v i m have w₂ : Cardinal.lift.{w'} #(Set.range Φ) ≤ Cardinal.lift.{w} #κ := Cardinal.mk_range_le_lift exact (Cardinal.lift_le.mpr w₁).trans w₂ -- (See `infinite_basis_le_maximal_linearIndependent'` for the more general version -- where the index types can live in different universes.) /-- Over any ring `R`, if `b` is an infinite basis for a module `M`, and `s` is a maximal linearly independent set, then the cardinality of `b` is bounded by the cardinality of `s`. -/ theorem infinite_basis_le_maximal_linearIndependent {ι : Type w} (b : Basis ι R M) [Infinite ι] {κ : Type w} (v : κ → M) (i : LinearIndependent R v) (m : i.Maximal) : #ι ≤ #κ := Cardinal.lift_le.mp (infinite_basis_le_maximal_linearIndependent' b v i m) end Ring end Finite
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Defs.lean
import Mathlib.Data.Fintype.BigOperators import Mathlib.LinearAlgebra.Finsupp.LinearCombination /-! # Bases This file defines bases in a module or vector space. It is inspired by Isabelle/HOL's linear algebra, and hence indirectly by HOL Light. ## Main definitions All definitions are given for families of vectors, i.e. `v : ι → M` where `M` is the module or vector space and `ι : Type*` is an arbitrary indexing type. * `Basis ι R M` is the type of `ι`-indexed `R`-bases for a module `M`, represented by a linear equiv `M ≃ₗ[R] ι →₀ R`. * the basis vectors of a basis `b : Basis ι R M` are available as `b i`, where `i : ι` * `Basis.repr` is the isomorphism sending `x : M` to its coordinates `Basis.repr x : ι →₀ R`. The converse, turning this isomorphism into a basis, is called `Basis.ofRepr`. * If `ι` is finite, there is a variant of `repr` called `Basis.equivFun b : M ≃ₗ[R] ι → R` (saving you from having to work with `Finsupp`). The converse, turning this isomorphism into a basis, is called `Basis.ofEquivFun`. * `Basis.reindex` uses an equiv to map a basis to a different indexing set. * `Basis.map` uses a linear equiv to map a basis to a different module. * `Basis.constr`: given `b : Basis ι R M` and `f : ι → M`, construct a linear map `g` so that `g (b i) = f i`. * `Basis.coord`: `b.coord i x` is the `i`-th coordinate of a vector `x` with respect to the basis `b`. ## Main results * `Basis.ext` states that two linear maps are equal if they coincide on a basis. Similar results are available for linear equivs (if they coincide on the basis vectors), elements (if their coordinates coincide) and the functions `b.repr` and `⇑b`. ## Implementation notes We use families instead of sets because it allows us to say that two identical vectors are linearly dependent. For bases, this is useful as well because we can easily derive ordered bases by using an ordered index type `ι`. ## Tags basis, bases -/ assert_not_exists LinearMap.pi LinearIndependent Cardinal -- TODO: assert_not_exists Submodule -- (should be possible after splitting `Mathlib/LinearAlgebra/Finsupp/LinearCombination.lean`) noncomputable section universe u open Function Set Submodule Finsupp variable {ι : Type*} {ι' : Type*} {R : Type*} {R₂ : Type*} {K : Type*} variable {M : Type*} {M' M'' : Type*} {V : Type u} {V' : Type*} namespace Module variable [Semiring R] variable [AddCommMonoid M] [Module R M] [AddCommMonoid M'] [Module R M'] variable (ι R M) in /-- A `Basis ι R M` for a module `M` is the type of `ι`-indexed `R`-bases of `M`. The basis vectors are available as `DFunLike.coe (b : Basis ι R M) : ι → M`. To turn a linear independent family of vectors spanning `M` into a basis, use `Basis.mk`. They are internally represented as linear equivs `M ≃ₗ[R] (ι →₀ R)`, available as `Basis.repr`. -/ structure Basis where /-- `Basis.ofRepr` constructs a basis given an assignment of coordinates to each vector. -/ ofRepr :: /-- `repr` is the linear equivalence sending a vector `x` to its coordinates: the `c`s such that `x = ∑ i, c i`. -/ repr : M ≃ₗ[R] ι →₀ R namespace Basis instance : Inhabited (Basis ι R (ι →₀ R)) := ⟨.ofRepr (LinearEquiv.refl _ _)⟩ variable (b b₁ : Basis ι R M) (i : ι) (c : R) (x : M) section repr theorem repr_injective : Injective (repr : Basis ι R M → M ≃ₗ[R] ι →₀ R) := fun f g h => by cases f; cases g; congr /-- `b i` is the `i`th basis vector. -/ instance instFunLike : FunLike (Basis ι R M) ι M where coe b i := b.repr.symm (Finsupp.single i 1) coe_injective' f g h := repr_injective <| LinearEquiv.symm_bijective.injective <| LinearEquiv.toLinearMap_injective <| by ext; exact congr_fun h _ @[simp] theorem coe_ofRepr (e : M ≃ₗ[R] ι →₀ R) : ⇑(ofRepr e) = fun i => e.symm (Finsupp.single i 1) := rfl protected theorem injective [Nontrivial R] : Injective b := b.repr.symm.injective.comp fun _ _ => (Finsupp.single_left_inj (one_ne_zero : (1 : R) ≠ 0)).mp theorem repr_symm_single_one : b.repr.symm (Finsupp.single i 1) = b i := rfl theorem repr_symm_single : b.repr.symm (Finsupp.single i c) = c • b i := calc b.repr.symm (Finsupp.single i c) = b.repr.symm (c • Finsupp.single i (1 : R)) := by { rw [Finsupp.smul_single', mul_one] } _ = c • b i := by rw [LinearEquiv.map_smul, repr_symm_single_one] @[simp] theorem repr_self : b.repr (b i) = Finsupp.single i 1 := LinearEquiv.apply_symm_apply _ _ theorem repr_self_apply (j) [Decidable (i = j)] : b.repr (b i) j = if i = j then 1 else 0 := by rw [repr_self, Finsupp.single_apply] @[simp] theorem repr_symm_apply (v) : b.repr.symm v = Finsupp.linearCombination R b v := calc b.repr.symm v = b.repr.symm (v.sum Finsupp.single) := by simp _ = v.sum fun i vi => b.repr.symm (Finsupp.single i vi) := map_finsuppSum .. _ = Finsupp.linearCombination R b v := by simp only [repr_symm_single, Finsupp.linearCombination_apply] @[simp] theorem coe_repr_symm : ↑b.repr.symm = Finsupp.linearCombination R b := LinearMap.ext fun v => b.repr_symm_apply v @[simp] theorem repr_linearCombination (v) : b.repr (Finsupp.linearCombination _ b v) = v := by rw [← b.coe_repr_symm] exact b.repr.apply_symm_apply v @[simp] theorem linearCombination_repr : Finsupp.linearCombination _ b (b.repr x) = x := by rw [← b.coe_repr_symm] exact b.repr.symm_apply_apply x end repr section Map variable (f : M ≃ₗ[R] M') /-- Apply the linear equivalence `f` to the basis vectors. -/ @[simps] protected def map : Basis ι R M' := ofRepr (f.symm.trans b.repr) @[simp] theorem map_apply (i) : b.map f i = f (b i) := rfl theorem coe_map : (b.map f : ι → M') = f ∘ b := rfl end Map section Reindex variable (b' : Basis ι' R M') variable (e : ι ≃ ι') /-- `b.reindex (e : ι ≃ ι')` is a basis indexed by `ι'` -/ def reindex : Basis ι' R M := .ofRepr (b.repr.trans (Finsupp.domLCongr e)) theorem reindex_apply (i' : ι') : b.reindex e i' = b (e.symm i') := show (b.repr.trans (Finsupp.domLCongr e)).symm (Finsupp.single i' 1) = b.repr.symm (Finsupp.single (e.symm i') 1) by rw [LinearEquiv.symm_trans_apply, Finsupp.domLCongr_symm, Finsupp.domLCongr_single] @[simp] theorem coe_reindex : (b.reindex e : ι' → M) = b ∘ e.symm := funext (b.reindex_apply e) theorem repr_reindex_apply (i' : ι') : (b.reindex e).repr x i' = b.repr x (e.symm i') := show (Finsupp.domLCongr e : _ ≃ₗ[R] _) (b.repr x) i' = _ by simp @[simp] theorem repr_reindex : (b.reindex e).repr x = (b.repr x).mapDomain e := DFunLike.ext _ _ <| by simp [repr_reindex_apply] @[simp] theorem reindex_refl : b.reindex (Equiv.refl ι) = b := by simp [reindex] /-- `simp` can prove this as `Basis.coe_reindex` + `EquivLike.range_comp` -/ theorem range_reindex : Set.range (b.reindex e) = Set.range b := by simp [coe_reindex, range_comp] end Reindex end Basis section Fintype open Basis open Fintype /-- A module over `R` with a finite basis is linearly equivalent to functions from its basis to `R`. -/ def Basis.equivFun [Finite ι] (b : Basis ι R M) : M ≃ₗ[R] ι → R := LinearEquiv.trans b.repr ({ Finsupp.equivFunOnFinite with toFun := (↑) map_add' := Finsupp.coe_add map_smul' := Finsupp.coe_smul } : (ι →₀ R) ≃ₗ[R] ι → R) /-- A module over a finite ring that admits a finite basis is finite. -/ def fintypeOfFintype [Fintype ι] (b : Basis ι R M) [Fintype R] : Fintype M := haveI := Classical.decEq ι Fintype.ofEquiv _ b.equivFun.toEquiv.symm theorem card_fintype [Fintype ι] (b : Basis ι R M) [Fintype R] [Fintype M] : card M = card R ^ card ι := by classical calc card M = card (ι → R) := card_congr b.equivFun.toEquiv _ = card R ^ card ι := card_fun /-- Given a basis `v` indexed by `ι`, the canonical linear equivalence between `ι → R` and `M` maps a function `x : ι → R` to the linear combination `∑_i x i • v i`. -/ @[simp] theorem Basis.equivFun_symm_apply [Fintype ι] (b : Basis ι R M) (x : ι → R) : b.equivFun.symm x = ∑ i, x i • b i := by simp [Basis.equivFun, Finsupp.linearCombination_apply, sum_fintype, equivFunOnFinite] @[simp] theorem Basis.equivFun_apply [Finite ι] (b : Basis ι R M) (u : M) : b.equivFun u = b.repr u := rfl @[simp] theorem Basis.map_equivFun [Finite ι] (b : Basis ι R M) (f : M ≃ₗ[R] M') : (b.map f).equivFun = f.symm.trans b.equivFun := rfl theorem Basis.sum_equivFun [Fintype ι] (b : Basis ι R M) (u : M) : ∑ i, b.equivFun u i • b i = u := by rw [← b.equivFun_symm_apply, b.equivFun.symm_apply_apply] @[simp] theorem Basis.sum_repr [Fintype ι] (b : Basis ι R M) (u : M) : ∑ i, b.repr u i • b i = u := b.sum_equivFun u @[simp] theorem Basis.equivFun_self [Finite ι] [DecidableEq ι] (b : Basis ι R M) (i j : ι) : b.equivFun (b i) j = if i = j then 1 else 0 := by rw [b.equivFun_apply, b.repr_self_apply] theorem Basis.repr_sum_self [Fintype ι] (b : Basis ι R M) (c : ι → R) : b.repr (∑ i, c i • b i) = c := by simp_rw [← b.equivFun_symm_apply, ← b.equivFun_apply, b.equivFun.apply_symm_apply] /-- Define a basis by mapping each vector `x : M` to its coordinates `e x : ι → R`, as long as `ι` is finite. -/ def Basis.ofEquivFun [Finite ι] (e : M ≃ₗ[R] ι → R) : Basis ι R M := .ofRepr <| e.trans <| LinearEquiv.symm <| Finsupp.linearEquivFunOnFinite R R ι @[simp] theorem Basis.ofEquivFun_repr_apply [Finite ι] (e : M ≃ₗ[R] ι → R) (x : M) (i : ι) : (Basis.ofEquivFun e).repr x i = e x i := rfl @[simp] theorem Basis.coe_ofEquivFun [Finite ι] [DecidableEq ι] (e : M ≃ₗ[R] ι → R) : (Basis.ofEquivFun e : ι → M) = fun i => e.symm (Pi.single i 1) := funext fun i => e.injective <| funext fun j => by simp [Basis.ofEquivFun, ← Finsupp.single_eq_pi_single] @[simp] theorem Basis.ofEquivFun_equivFun [Finite ι] (v : Basis ι R M) : Basis.ofEquivFun v.equivFun = v := Basis.repr_injective <| by ext; rfl @[simp] theorem Basis.equivFun_ofEquivFun [Finite ι] (e : M ≃ₗ[R] ι → R) : (Basis.ofEquivFun e).equivFun = e := by ext j simp_rw [Basis.equivFun_apply, Basis.ofEquivFun_repr_apply] end Fintype variable {ι R M : Type*} variable [Semiring R] [AddCommMonoid M] [Module R M] namespace Basis variable (b : Basis ι R M) section Ext variable {R₁ : Type*} [Semiring R₁] {σ : R →+* R₁} {σ' : R₁ →+* R} variable [RingHomInvPair σ σ'] [RingHomInvPair σ' σ] variable {M₁ : Type*} [AddCommMonoid M₁] [Module R₁ M₁] /-- Two linear maps are equal if they are equal on basis vectors. -/ theorem ext {f₁ f₂ : M →ₛₗ[σ] M₁} (h : ∀ i, f₁ (b i) = f₂ (b i)) : f₁ = f₂ := by ext x rw [← b.linearCombination_repr x, Finsupp.linearCombination_apply, Finsupp.sum] simp only [map_sum, LinearMap.map_smulₛₗ, h] /-- Two linear equivs are equal if they are equal on basis vectors. -/ theorem ext' {f₁ f₂ : M ≃ₛₗ[σ] M₁} (h : ∀ i, f₁ (b i) = f₂ (b i)) : f₁ = f₂ := by ext x rw [← b.linearCombination_repr x, Finsupp.linearCombination_apply, Finsupp.sum] simp only [map_sum, LinearEquiv.map_smulₛₗ, h] /-- Two elements are equal iff their coordinates are equal. -/ theorem ext_elem_iff {x y : M} : x = y ↔ ∀ i, b.repr x i = b.repr y i := by simp only [← DFunLike.ext_iff, EmbeddingLike.apply_eq_iff_eq] alias ⟨_, ext_elem⟩ := ext_elem_iff theorem repr_eq_iff {b : Basis ι R M} {f : M →ₗ[R] ι →₀ R} : ↑b.repr = f ↔ ∀ i, f (b i) = Finsupp.single i 1 := ⟨fun h i => h ▸ b.repr_self i, fun h => b.ext fun i => (b.repr_self i).trans (h i).symm⟩ theorem repr_eq_iff' {b : Basis ι R M} {f : M ≃ₗ[R] ι →₀ R} : b.repr = f ↔ ∀ i, f (b i) = Finsupp.single i 1 := ⟨fun h i => h ▸ b.repr_self i, fun h => b.ext' fun i => (b.repr_self i).trans (h i).symm⟩ theorem apply_eq_iff {b : Basis ι R M} {x : M} {i : ι} : b i = x ↔ b.repr x = Finsupp.single i 1 := ⟨fun h => h ▸ b.repr_self i, fun h => b.repr.injective ((b.repr_self i).trans h.symm)⟩ /-- An unbundled version of `repr_eq_iff` -/ theorem repr_apply_eq (f : M → ι → R) (hadd : ∀ x y, f (x + y) = f x + f y) (hsmul : ∀ (c : R) (x : M), f (c • x) = c • f x) (f_eq : ∀ i, f (b i) = Finsupp.single i 1) (x : M) (i : ι) : b.repr x i = f x i := by let f_i : M →ₗ[R] R := { toFun x := f x i map_add' _ _ := by rw [hadd, Pi.add_apply] map_smul' _ _ := by simp [hsmul, Pi.smul_apply] } have : Finsupp.lapply i ∘ₗ ↑b.repr = f_i := by refine b.ext fun j => ?_ change b.repr (b j) i = f (b j) i rw [b.repr_self, f_eq] calc b.repr x i = f_i x := by { rw [← this] rfl } _ = f x i := rfl /-- Two bases are equal if they assign the same coordinates. -/ theorem eq_ofRepr_eq_repr {b₁ b₂ : Basis ι R M} (h : ∀ x i, b₁.repr x i = b₂.repr x i) : b₁ = b₂ := repr_injective <| by ext; apply h /-- Two bases are equal if their basis vectors are the same. -/ @[ext] theorem eq_of_apply_eq {b₁ b₂ : Basis ι R M} : (∀ i, b₁ i = b₂ i) → b₁ = b₂ := DFunLike.ext _ _ end Ext section MapCoeffs variable {R' : Type*} [Semiring R'] [Module R' M] (f : R ≃+* R') attribute [local instance] SMul.comp.isScalarTower /-- If `R` and `R'` are isomorphic rings that act identically on a module `M`, then a basis for `M` as `R`-module is also a basis for `M` as `R'`-module. See also `Basis.algebraMapCoeffs` for the case where `f` is equal to `algebraMap`. -/ @[simps +simpRhs] def mapCoeffs (h : ∀ (c) (x : M), f c • x = c • x) : Basis ι R' M := by letI : Module R' R := Module.compHom R (↑f.symm : R' →+* R) haveI : IsScalarTower R' R M := { smul_assoc := fun x y z => by change (f.symm x * y) • z = x • (y • z) rw [mul_smul, ← h, f.apply_symm_apply] } exact ofRepr <| (b.repr.restrictScalars R').trans <| Finsupp.mapRange.linearEquiv (Module.compHom.toLinearEquiv f.symm).symm variable (h : ∀ (c) (x : M), f c • x = c • x) theorem mapCoeffs_apply (i : ι) : b.mapCoeffs f h i = b i := apply_eq_iff.mpr <| by simp @[simp] theorem coe_mapCoeffs : (b.mapCoeffs f h : ι → M) = b := funext <| b.mapCoeffs_apply f h end MapCoeffs section ReindexRange /-- `b.reindexRange` is a basis indexed by `range b`, the basis vectors themselves. -/ def reindexRange : Basis (range b) R M := haveI := Classical.dec (Nontrivial R) if h : Nontrivial R then b.reindex (Equiv.ofInjective b (Basis.injective b)) else letI : Subsingleton R := not_nontrivial_iff_subsingleton.mp h .ofRepr (Module.subsingletonEquiv R M (range b)) theorem reindexRange_self (i : ι) (h := Set.mem_range_self i) : b.reindexRange ⟨b i, h⟩ = b i := by cases subsingleton_or_nontrivial R · let := Module.subsingleton R M simp [reindexRange, eq_iff_true_of_subsingleton] · simp [*, reindexRange, reindex_apply] theorem reindexRange_repr_self (i : ι) : b.reindexRange.repr (b i) = Finsupp.single ⟨b i, mem_range_self i⟩ 1 := calc b.reindexRange.repr (b i) = b.reindexRange.repr (b.reindexRange ⟨b i, mem_range_self i⟩) := congr_arg _ (b.reindexRange_self _ _).symm _ = Finsupp.single ⟨b i, mem_range_self i⟩ 1 := b.reindexRange.repr_self _ @[simp] theorem reindexRange_apply (x : range b) : b.reindexRange x = x := by rcases x with ⟨bi, ⟨i, rfl⟩⟩ exact b.reindexRange_self i theorem reindexRange_repr' (x : M) {bi : M} {i : ι} (h : b i = bi) : b.reindexRange.repr x ⟨bi, ⟨i, h⟩⟩ = b.repr x i := by nontriviality subst h apply (b.repr_apply_eq (fun x i => b.reindexRange.repr x ⟨b i, _⟩) _ _ _ x i).symm · intro x y ext i simp only [Pi.add_apply, LinearEquiv.map_add, Finsupp.coe_add] · intro c x ext i simp only [Pi.smul_apply, LinearEquiv.map_smul, Finsupp.coe_smul] · intro i ext j simp only [reindexRange_repr_self] apply Finsupp.single_apply_left (f := fun i => (⟨b i, _⟩ : Set.range b)) exact fun i j h => b.injective (Subtype.mk.inj h) @[simp] theorem reindexRange_repr (x : M) (i : ι) (h := Set.mem_range_self i) : b.reindexRange.repr x ⟨b i, h⟩ = b.repr x i := b.reindexRange_repr' _ rfl section Fintype variable [Fintype ι] [DecidableEq M] /-- `b.reindexFinsetRange` is a basis indexed by `Finset.univ.image b`, the finite set of basis vectors themselves. -/ def reindexFinsetRange : Basis (Finset.univ.image b) R M := b.reindexRange.reindex ((Equiv.refl M).subtypeEquiv (by simp)) theorem reindexFinsetRange_self (i : ι) (h := Finset.mem_image_of_mem b (Finset.mem_univ i)) : b.reindexFinsetRange ⟨b i, h⟩ = b i := by rw [reindexFinsetRange, reindex_apply, reindexRange_apply] rfl @[simp] theorem reindexFinsetRange_apply (x : Finset.univ.image b) : b.reindexFinsetRange x = x := by rcases x with ⟨bi, hbi⟩ rcases Finset.mem_image.mp hbi with ⟨i, -, rfl⟩ exact b.reindexFinsetRange_self i theorem reindexFinsetRange_repr_self (i : ι) : b.reindexFinsetRange.repr (b i) = Finsupp.single ⟨b i, Finset.mem_image_of_mem b (Finset.mem_univ i)⟩ 1 := by ext ⟨bi, hbi⟩ rw [reindexFinsetRange, repr_reindex, Finsupp.mapDomain_equiv_apply, reindexRange_repr_self] simp [Finsupp.single_apply] @[simp] theorem reindexFinsetRange_repr (x : M) (i : ι) (h := Finset.mem_image_of_mem b (Finset.mem_univ i)) : b.reindexFinsetRange.repr x ⟨b i, h⟩ = b.repr x i := by simp [reindexFinsetRange] end Fintype end ReindexRange variable [Module R M'] section Constr variable (S : Type*) [Semiring S] [Module S M'] variable [SMulCommClass R S M'] /-- Construct a linear map given the value at the basis, called `Basis.constr b S f` where `b` is a basis, `f` is the value of the linear map over the elements of the basis, and `S` is an extra semiring (typically `S = R` or `S = ℕ`). This definition is parameterized over an extra `Semiring S`, such that `SMulCommClass R S M'` holds. If `R` is commutative, you can set `S := R`; if `R` is not commutative, you can recover an `AddEquiv` by setting `S := ℕ`. See library note [bundled maps over different rings]. -/ def constr : (ι → M') ≃ₗ[S] M →ₗ[R] M' where toFun f := (Finsupp.linearCombination R id).comp <| Finsupp.lmapDomain R R f ∘ₗ ↑b.repr invFun f i := f (b i) left_inv f := by ext simp right_inv f := by refine b.ext fun i => ?_ simp map_add' f g := by refine b.ext fun i => ?_ simp map_smul' c f := by refine b.ext fun i => ?_ simp theorem constr_def (f : ι → M') : constr (M' := M') b S f = linearCombination R id ∘ₗ Finsupp.lmapDomain R R f ∘ₗ ↑b.repr := rfl theorem constr_apply (f : ι → M') (x : M) : constr (M' := M') b S f x = (b.repr x).sum fun b a => a • f b := by simp only [constr_def, LinearMap.comp_apply, lmapDomain_apply, linearCombination_apply] rw [Finsupp.sum_mapDomain_index] <;> simp [add_smul] @[simp] theorem constr_basis (f : ι → M') (i : ι) : (constr (M' := M') b S f : M → M') (b i) = f i := by simp [Basis.constr_apply, b.repr_self] theorem constr_eq {g : ι → M'} {f : M →ₗ[R] M'} (h : ∀ i, g i = f (b i)) : constr (M' := M') b S g = f := b.ext fun i => (b.constr_basis S g i).trans (h i) theorem constr_self (f : M →ₗ[R] M') : (constr (M' := M') b S fun i => f (b i)) = f := b.constr_eq S fun _ => rfl theorem constr_range {f : ι → M'} : LinearMap.range (constr (M' := M') b S f) = span R (range f) := by rw [b.constr_def S f, LinearMap.range_comp, LinearMap.range_comp, LinearEquiv.range, ← Finsupp.supported_univ, Finsupp.lmapDomain_supported, ← Set.image_univ, ← Finsupp.span_image_eq_map_linearCombination, Set.image_id] @[simp] theorem constr_comp (f : M' →ₗ[R] M') (v : ι → M') : constr (M' := M') b S (f ∘ v) = f.comp (constr (M' := M') b S v) := b.ext fun i => by simp only [Basis.constr_basis, LinearMap.comp_apply, Function.comp] variable (S : Type*) [Semiring S] [Module S M'] variable [SMulCommClass R S M'] @[simp] theorem constr_apply_fintype [Fintype ι] (b : Basis ι R M) (f : ι → M') (x : M) : (constr (M' := M') b S f : M → M') x = ∑ i, b.equivFun x i • f i := by simp [b.constr_apply, b.equivFun_apply, Finsupp.sum_fintype] end Constr section Equiv variable (i : ι) variable {M'' : Type*} (b' : Basis ι' R M') (e : ι ≃ ι') variable [AddCommMonoid M''] [Module R M''] /-- If `b` is a basis for `M` and `b'` a basis for `M'`, and the index types are equivalent, `b.equiv b' e` is a linear equivalence `M ≃ₗ[R] M'`, mapping `b i` to `b' (e i)`. -/ protected def equiv : M ≃ₗ[R] M' := b.repr.trans (b'.reindex e.symm).repr.symm @[simp] theorem equiv_apply : b.equiv b' e (b i) = b' (e i) := by simp [Basis.equiv] @[simp] theorem equiv_refl : b.equiv b (Equiv.refl ι) = LinearEquiv.refl R M := b.ext' fun i => by simp @[simp] theorem equiv_symm : (b.equiv b' e).symm = b'.equiv b e.symm := b'.ext' fun i => (b.equiv b' e).injective (by simp) @[simp] theorem equiv_trans {ι'' : Type*} (b'' : Basis ι'' R M'') (e : ι ≃ ι') (e' : ι' ≃ ι'') : (b.equiv b' e).trans (b'.equiv b'' e') = b.equiv b'' (e.trans e') := b.ext' fun i => by simp @[simp] theorem map_equiv (b : Basis ι R M) (b' : Basis ι' R M') (e : ι ≃ ι') : b.map (b.equiv b' e) = b'.reindex e.symm := by ext i simp section CommSemiring variable {R M M' : Type*} [CommSemiring R] variable [AddCommMonoid M] [Module R M] [AddCommMonoid M'] [Module R M'] variable (b : Basis ι R M) (b' : Basis ι' R M') variable [SMulCommClass R R M'] /-- If `b` is a basis for `M` and `b'` a basis for `M'`, and `f`, `g` form a bijection between the basis vectors, `b.equiv' b' f g hf hg hgf hfg` is a linear equivalence `M ≃ₗ[R] M'`, mapping `b i` to `f (b i)`. -/ def equiv' (f : M → M') (g : M' → M) (hf : ∀ i, f (b i) ∈ range b') (hg : ∀ i, g (b' i) ∈ range b) (hgf : ∀ i, g (f (b i)) = b i) (hfg : ∀ i, f (g (b' i)) = b' i) : M ≃ₗ[R] M' := { constr (M' := M') b R (f ∘ b) with invFun := constr (M' := M) b' R (g ∘ b') left_inv := have : (constr (M' := M) b' R (g ∘ b')).comp (constr (M' := M') b R (f ∘ b)) = LinearMap.id := b.ext fun i => Exists.elim (hf i) fun i' hi' => by rw [LinearMap.comp_apply, b.constr_basis, Function.comp_apply, ← hi', b'.constr_basis, Function.comp_apply, hi', hgf, LinearMap.id_apply] fun x => congr_arg (fun h : M →ₗ[R] M => h x) this right_inv := have : (constr (M' := M') b R (f ∘ b)).comp (constr (M' := M) b' R (g ∘ b')) = LinearMap.id := b'.ext fun i => Exists.elim (hg i) fun i' hi' => by rw [LinearMap.comp_apply, b'.constr_basis, Function.comp_apply, ← hi', b.constr_basis, Function.comp_apply, hi', hfg, LinearMap.id_apply] fun x => congr_arg (fun h : M' →ₗ[R] M' => h x) this } @[simp] theorem equiv'_apply (f : M → M') (g : M' → M) (hf hg hgf hfg) (i : ι) : b.equiv' b' f g hf hg hgf hfg (b i) = f (b i) := b.constr_basis R _ _ @[simp] theorem equiv'_symm_apply (f : M → M') (g : M' → M) (hf hg hgf hfg) (i : ι') : (b.equiv' b' f g hf hg hgf hfg).symm (b' i) = g (b' i) := b'.constr_basis R _ _ theorem sum_repr_mul_repr {ι'} [Fintype ι'] (b' : Basis ι' R M) (x : M) (i : ι) : (∑ j : ι', b.repr (b' j) i * b'.repr x j) = b.repr x i := by conv_rhs => rw [← b'.sum_repr x] simp_rw [map_sum, map_smul, Finset.sum_apply'] refine Finset.sum_congr rfl fun j _ => ?_ rw [Finsupp.smul_apply, smul_eq_mul, mul_comm] end CommSemiring end Equiv section Coord variable (i : ι) /-- `b.coord i` is the linear function giving the `i`-th coordinate of a vector with respect to the basis `b`. `b.coord i` is an element of the dual space. In particular, for finite-dimensional spaces it is the `ι`th basis vector of the dual space. -/ @[simps!] def coord : M →ₗ[R] R := Finsupp.lapply i ∘ₗ ↑b.repr theorem forall_coord_eq_zero_iff {x : M} : (∀ i, b.coord i x = 0) ↔ x = 0 := Iff.trans (by simp only [b.coord_apply, DFunLike.ext_iff, Finsupp.zero_apply]) b.repr.map_eq_zero_iff /-- The sum of the coordinates of an element `m : M` with respect to a basis. -/ noncomputable def sumCoords : M →ₗ[R] R := (Finsupp.lsum ℕ fun _ => LinearMap.id) ∘ₗ (b.repr : M →ₗ[R] ι →₀ R) @[simp] theorem coe_sumCoords : (b.sumCoords : M → R) = fun m => (b.repr m).sum fun _ => id := rfl @[simp high] theorem coe_sumCoords_of_fintype [Fintype ι] : (b.sumCoords : M → R) = ∑ i, b.coord i := by ext m simp only [sumCoords, Finsupp.sum_fintype, LinearMap.id_coe, LinearEquiv.coe_coe, coord_apply, id, Fintype.sum_apply, imp_true_iff, Finsupp.coe_lsum, LinearMap.coe_comp, comp_apply, LinearMap.coeFn_sum] @[simp] theorem sumCoords_self_apply : b.sumCoords (b i) = 1 := by simp only [Basis.sumCoords, LinearMap.id_coe, LinearEquiv.coe_coe, id, Basis.repr_self, Function.comp_apply, Finsupp.coe_lsum, LinearMap.coe_comp, Finsupp.sum_single_index] theorem dvd_coord_smul (i : ι) (m : M) (r : R) : r ∣ b.coord i (r • m) := ⟨b.coord i m, by simp⟩ theorem coord_repr_symm (b : Basis ι R M) (i : ι) (f : ι →₀ R) : b.coord i (b.repr.symm f) = f i := by simp only [repr_symm_apply, coord_apply, repr_linearCombination] theorem coe_sumCoords_eq_finsum : (b.sumCoords : M → R) = fun m => ∑ᶠ i, b.coord i m := by ext m simp only [Basis.sumCoords, Basis.coord, Finsupp.lapply_apply, LinearMap.id_coe, LinearEquiv.coe_coe, Function.comp_apply, Finsupp.coe_lsum, LinearMap.coe_comp, finsum_eq_sum _ (b.repr m).finite_support, Finsupp.sum, Finset.finite_toSet_toFinset, id, Finsupp.fun_support_eq] variable (e : ι ≃ ι') @[simp] theorem sumCoords_reindex : (b.reindex e).sumCoords = b.sumCoords := by ext x simp only [coe_sumCoords, repr_reindex] exact Finsupp.sum_mapDomain_index (fun _ => rfl) fun _ _ _ => rfl variable (S : Type*) [Semiring S] [Module S M'] variable [SMulCommClass R S M'] theorem coord_equivFun_symm [Finite ι] (b : Basis ι R M) (i : ι) (f : ι → R) : b.coord i (b.equivFun.symm f) = f i := b.coord_repr_symm i (Finsupp.equivFunOnFinite.symm f) end Coord end Basis end Module
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Exact.lean
import Mathlib.Algebra.Exact import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.Projection /-! # Basis from a split exact sequence Let `0 → K → M → P → 0` be a split exact sequence of `R`-modules, let `s : M → K` be a retraction of `f` and `v` be a basis of `M` indexed by `κ ⊕ σ`. Then if `s vᵢ = 0` for `i : κ` and `(s vⱼ)ⱼ` is linear independent for `j : σ`, then the images of `vᵢ` for `i : κ` form a basis of `P`. We treat linear independence and the span condition separately. For convenience this is stated not for `κ ⊕ σ`, but for an arbitrary type `ι` with two maps `κ → ι` and `σ → ι`. -/ variable {R M K P : Type*} [Ring R] [AddCommGroup M] [AddCommGroup K] [AddCommGroup P] variable [Module R M] [Module R K] [Module R P] variable {f : K →ₗ[R] M} {g : M →ₗ[R] P} {s : M →ₗ[R] K} variable (hs : s ∘ₗ f = LinearMap.id) (hfg : Function.Exact f g) variable {ι κ σ : Type*} {v : ι → M} {a : κ → ι} {b : σ → ι} section include hs hfg lemma LinearIndependent.linearIndependent_of_exact_of_retraction (hainj : Function.Injective a) (hsa : ∀ i, s (v (a i)) = 0) (hli : LinearIndependent R v) : LinearIndependent R (g ∘ v ∘ a) := by apply (LinearIndependent.comp hli a hainj).map rw [Submodule.disjoint_def, hfg.linearMap_ker_eq] rintro - hy ⟨y, rfl⟩ have hz : s (f y) = 0 := by revert hy generalize f y = x intro hy induction hy using Submodule.span_induction with | mem m hm => obtain ⟨i, rfl⟩ := hm; apply hsa | zero => simp_all | add => simp_all | smul => simp_all replace hs := DFunLike.congr_fun hs y simp only [LinearMap.coe_comp, Function.comp_apply, LinearMap.id_coe, id_eq] at hs rw [← hs, hz, map_zero] private lemma top_le_span_of_aux (v : κ ⊕ σ → M) (hg : Function.Surjective g) (hslzero : ∀ i, s (v (.inl i)) = 0) (hli : LinearIndependent R (s ∘ v ∘ .inr)) (hsp : ⊤ ≤ Submodule.span R (Set.range v)) : ⊤ ≤ Submodule.span R (Set.range <| g ∘ v ∘ .inl) := by rintro p - obtain ⟨m, rfl⟩ := hg p wlog h : m ∈ LinearMap.ker s · let x : M := f (s m) rw [show g m = g (m - f (s m)) by simp [hfg.apply_apply_eq_zero]] apply this hs hfg v hg hslzero hli hsp replace hs := DFunLike.congr_fun hs (s m) simp only [LinearMap.coe_comp, Function.comp_apply, LinearMap.id_coe, id_eq] at hs simp [hs] have : m ∈ Submodule.span R (Set.range v) := hsp Submodule.mem_top obtain ⟨c, rfl⟩ := Finsupp.mem_span_range_iff_exists_finsupp.mp this simp only [LinearMap.mem_ker, Finsupp.sum, map_sum, map_smul, Finset.sum_sum_eq_sum_toLeft_add_sum_toRight, map_add, hslzero, smul_zero, Finset.sum_const_zero, zero_add] at h replace hli := (linearIndependent_iff'.mp hli) c.support.toRight (c ∘ .inr) h simp only [Finset.mem_toRight, Finsupp.mem_support_iff, Function.comp_apply, not_imp_self] at hli simp only [Finsupp.sum, Finset.sum_sum_eq_sum_toLeft_add_sum_toRight, hli, zero_smul, Finset.sum_const_zero, add_zero, map_sum, map_smul] exact Submodule.sum_mem _ (fun i hi ↦ Submodule.smul_mem _ _ <| Submodule.subset_span ⟨i, rfl⟩) lemma Submodule.top_le_span_of_exact_of_retraction (hg : Function.Surjective g) (hsa : ∀ i, s (v (a i)) = 0) (hlib : LinearIndependent R (s ∘ v ∘ b)) (hab : Codisjoint (Set.range a) (Set.range b)) (hsp : ⊤ ≤ Submodule.span R (Set.range v)) : ⊤ ≤ Submodule.span R (Set.range <| g ∘ v ∘ a) := by apply top_le_span_of_aux hs hfg (Sum.elim (v ∘ a) (v ∘ b)) hg hsa hlib simp only [codisjoint_iff, Set.sup_eq_union, Set.top_eq_univ] at hab rwa [Set.Sum.elim_range, Set.range_comp, Set.range_comp, ← Set.image_union, hab, Set.image_univ] /-- Let `0 → K → M → P → 0` be a split exact sequence of `R`-modules, let `s : M → K` be a retraction of `f` and `v` be a basis of `M` indexed by `κ ⊕ σ`. Then if `s vᵢ = 0` for `i : κ` and `(s vⱼ)ⱼ` is linear independent for `j : σ`, then the images of `vᵢ` for `i : κ` form a basis of `P`. For convenience this is stated for an arbitrary type `ι` with two maps `κ → ι` and `σ → ι`. -/ noncomputable def Module.Basis.ofSplitExact (hg : Function.Surjective g) (v : Basis ι R M) (hainj : Function.Injective a) (hsa : ∀ i, s (v (a i)) = 0) (hlib : LinearIndependent R (s ∘ v ∘ b)) (hab : Codisjoint (Set.range a) (Set.range b)) : Basis κ R P := .mk (v.linearIndependent.linearIndependent_of_exact_of_retraction hs hfg hainj hsa) (Submodule.top_le_span_of_exact_of_retraction hs hfg hg hsa hlib hab (by rw [v.span_eq])) end section include hfg lemma Submodule.linearProjOfIsCompl_comp_surjective_of_exact {p q : Submodule R M} (hpq : IsCompl p q) (hmap : Submodule.map g q = ⊤) : Function.Surjective (Submodule.linearProjOfIsCompl p q hpq ∘ₗ f) := by rw [← Set.surjOn_univ, LinearMap.coe_comp, Set.surjOn_comp_iff, Set.image_univ] rw [← LinearMap.coe_range, ← Submodule.top_coe (R := R), surjOn_iff_le_map, ← hfg.linearMap_ker_eq] intro x triv obtain ⟨a, haq, ha⟩ : g x.val ∈ q.map g := by rwa [hmap] exact ⟨x - a, by simp [← ha], by simpa⟩ lemma Submodule.linearProjOfIsCompl_comp_bijective_of_exact (hf : Function.Injective f) {p q : Submodule R M} (hpq : IsCompl p q) (hker : Disjoint (LinearMap.ker g) q) (hmap : Submodule.map g q = ⊤) : Function.Bijective (Submodule.linearProjOfIsCompl p q hpq ∘ₗ f) := by refine ⟨?_, Submodule.linearProjOfIsCompl_comp_surjective_of_exact hfg _ hmap⟩ rwa [LinearMap.coe_comp, Set.InjOn.injective_iff ↑(LinearMap.range f) _ subset_rfl] simpa [← LinearMap.disjoint_ker_iff_injOn, ← hfg.linearMap_ker_eq] lemma LinearMap.linearProjOfIsCompl_comp_bijective_of_exact (hf : Function.Injective f) {q : Submodule R M} {E : Type*} [AddCommGroup E] [Module R E] {i : E →ₗ[R] M} (hi : Function.Injective i) (h : IsCompl (LinearMap.range i) q) (hker : Disjoint (LinearMap.ker g) q) (hmap : Submodule.map g q = ⊤) : Function.Bijective (LinearMap.linearProjOfIsCompl q i hi h ∘ₗ f) := by rw [LinearMap.linearProjOfIsCompl, LinearMap.comp_assoc, LinearMap.coe_comp, Function.Bijective.of_comp_iff] · exact (LinearEquiv.ofInjective i hi).symm.bijective · exact Submodule.linearProjOfIsCompl_comp_bijective_of_exact hfg hf h hker hmap end
.lake/packages/mathlib/Mathlib/LinearAlgebra/Basis/Submodule.lean
import Mathlib.Algebra.Algebra.Basic import Mathlib.LinearAlgebra.Basis.Basic /-! # Bases of submodules -/ open Function Set Submodule Finsupp Module assert_not_exists Ordinal noncomputable section universe u variable {ι ι' R R₂ M M' : Type*} namespace Module.Basis variable [Semiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid M'] [Module R M'] variable (b : Basis ι R M) /-- If the submodule `P` has a basis, `x ∈ P` iff it is a linear combination of basis vectors. -/ theorem mem_submodule_iff {P : Submodule R M} (b : Basis ι R P) {x : M} : x ∈ P ↔ ∃ c : ι →₀ R, x = Finsupp.sum c fun i x => x • (b i : M) := by conv_lhs => rw [← P.range_subtype, ← Submodule.map_top, ← b.span_eq, Submodule.map_span, ← Set.range_comp, ← Finsupp.range_linearCombination] simp [@eq_comm _ x, Function.comp, Finsupp.linearCombination_apply] /-- If the submodule `P` has a finite basis, `x ∈ P` iff it is a linear combination of basis vectors. -/ theorem mem_submodule_iff' [Fintype ι] {P : Submodule R M} (b : Basis ι R P) {x : M} : x ∈ P ↔ ∃ c : ι → R, x = ∑ i, c i • (b i : M) := b.mem_submodule_iff.trans <| Finsupp.equivFunOnFinite.exists_congr_left.trans <| exists_congr fun c => by simp [Finsupp.sum_fintype, Finsupp.equivFunOnFinite] end Basis open LinearMap variable {v : ι → M} variable [Ring R] [CommRing R₂] [AddCommGroup M] variable [Module R M] [Module R₂ M] variable {x y : M} variable (b : Basis ι R M) theorem Basis.eq_bot_of_rank_eq_zero [NoZeroDivisors R] (b : Basis ι R M) (N : Submodule R M) (rank_eq : ∀ {m : ℕ} (v : Fin m → N), LinearIndependent R ((↑) ∘ v : Fin m → M) → m = 0) : N = ⊥ := by rw [Submodule.eq_bot_iff] intro x hx contrapose! rank_eq with x_ne refine ⟨1, fun _ => ⟨x, hx⟩, ?_, one_ne_zero⟩ rw [Fintype.linearIndependent_iff] rintro g sum_eq i obtain ⟨_, hi⟩ := i simp only [Fin.default_eq_zero, Finset.univ_unique, Finset.sum_singleton] at sum_eq convert (b.smul_eq_zero.mp sum_eq).resolve_right x_ne end Module section Induction variable [Ring R] [IsDomain R] variable [AddCommGroup M] [Module R M] {b : ι → M} /-- If `N` is a submodule with finite rank, do induction on adjoining a linear independent element to a submodule. -/ def Submodule.inductionOnRankAux (b : Basis ι R M) (P : Submodule R M → Sort*) (ih : ∀ N : Submodule R M, (∀ N' ≤ N, ∀ x ∈ N, (∀ (c : R), ∀ y ∈ N', c • x + y = (0 : M) → c = 0) → P N') → P N) (n : ℕ) (N : Submodule R M) (rank_le : ∀ {m : ℕ} (v : Fin m → N), LinearIndependent R ((↑) ∘ v : Fin m → M) → m ≤ n) : P N := by haveI : DecidableEq M := Classical.decEq M have Pbot : P ⊥ := by apply ih intro N _ x x_mem x_ortho exfalso rw [mem_bot] at x_mem simpa [x_mem] using x_ortho 1 0 N.zero_mem induction n generalizing N with | zero => suffices N = ⊥ by rwa [this] apply Basis.eq_bot_of_rank_eq_zero b _ fun m hv => Nat.le_zero.mp (rank_le _ hv) | succ n rank_ih => apply ih intro N' N'_le x x_mem x_ortho apply rank_ih intro m v hli refine Nat.succ_le_succ_iff.mp (rank_le (Fin.cons ⟨x, x_mem⟩ fun i => ⟨v i, N'_le (v i).2⟩) ?_) convert hli.fin_cons' x _ ?_ · ext i refine Fin.cases ?_ ?_ i <;> simp · intro c y hcy refine x_ortho c y (Submodule.span_le.mpr ?_ y.2) hcy rintro _ ⟨z, rfl⟩ exact (v z).2 end Induction namespace Module.Basis /-- An element of a non-unital-non-associative algebra is in the center exactly when it commutes with the basis elements. -/ lemma mem_center_iff {A} [Semiring R] [NonUnitalNonAssocSemiring A] [Module R A] [SMulCommClass R A A] [SMulCommClass R R A] [IsScalarTower R A A] (b : Basis ι R A) {z : A} : z ∈ Set.center A ↔ (∀ i, Commute (b i) z) ∧ ∀ i j, z * (b i * b j) = (z * b i) * b j ∧ (b i * b j) * z = b i * (b j * z) := by constructor · intro h constructor · intro i apply (h.1 (b i)).symm · intros exact ⟨h.2 _ _, h.3 _ _⟩ · intro h rw [center, mem_setOf_eq] constructor case comm => intro y rw [← b.linearCombination_repr y, linearCombination_apply, sum, commute_iff_eq, Finset.sum_mul, Finset.mul_sum] simp_rw [mul_smul_comm, smul_mul_assoc, (h.1 _).eq] case left_assoc => intro c d rw [← b.linearCombination_repr c, ← b.linearCombination_repr d, linearCombination_apply, linearCombination_apply, sum, sum, Finset.sum_mul, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum] simp_rw [smul_mul_assoc, Finset.mul_sum, Finset.sum_mul, mul_smul_comm, Finset.mul_sum, Finset.smul_sum, smul_mul_assoc, mul_smul_comm, (h.2 _ _).1, (@SMulCommClass.smul_comm R R A)] rw [Finset.sum_comm] case right_assoc => intro c d rw [← b.linearCombination_repr c, ← b.linearCombination_repr d, linearCombination_apply, linearCombination_apply, sum, Finsupp.sum, Finset.sum_mul] simp_rw [smul_mul_assoc, Finset.mul_sum, Finset.sum_mul, mul_smul_comm, Finset.mul_sum, Finset.smul_sum, smul_mul_assoc, mul_smul_comm, Finset.sum_mul, smul_mul_assoc, (h.2 _ _).2] section RestrictScalars variable {S : Type*} [CommRing R] [Ring S] [Nontrivial S] [AddCommGroup M] variable [Algebra R S] [Module S M] [Module R M] variable [IsScalarTower R S M] [NoZeroSMulDivisors R S] (b : Basis ι S M) variable (R) open Submodule /-- Let `b` be an `S`-basis of `M`. Let `R` be a CommRing such that `Algebra R S` has no zero smul divisors, then the submodule of `M` spanned by `b` over `R` admits `b` as an `R`-basis. -/ noncomputable def restrictScalars : Basis ι R (span R (Set.range b)) := Basis.span (b.linearIndependent.restrict_scalars (smul_left_injective R one_ne_zero)) @[simp] theorem restrictScalars_apply (i : ι) : (b.restrictScalars R i : M) = b i := by simp only [Basis.restrictScalars, Basis.span_apply] @[simp] theorem restrictScalars_repr_apply (m : span R (Set.range b)) (i : ι) : algebraMap R S ((b.restrictScalars R).repr m i) = b.repr m i := by suffices Finsupp.mapRange.linearMap (Algebra.linearMap R S) ∘ₗ (b.restrictScalars R).repr.toLinearMap = ((b.repr : M →ₗ[S] ι →₀ S).restrictScalars R).domRestrict _ by exact DFunLike.congr_fun (LinearMap.congr_fun this m) i refine Basis.ext (b.restrictScalars R) fun _ => ?_ simp only [LinearMap.coe_comp, LinearEquiv.coe_toLinearMap, Function.comp_apply, map_one, Basis.repr_self, Finsupp.mapRange.linearMap_apply, Finsupp.mapRange_single, Algebra.linearMap_apply, LinearMap.domRestrict_apply, Basis.restrictScalars_apply, LinearMap.coe_restrictScalars] /-- Let `b` be an `S`-basis of `M`. Then `m : M` lies in the `R`-module spanned by `b` iff all the coordinates of `m` on the basis `b` are in `R` (see `Basis.mem_span` for the case `R = S`). -/ theorem mem_span_iff_repr_mem (m : M) : m ∈ span R (Set.range b) ↔ ∀ i, b.repr m i ∈ Set.range (algebraMap R S) := by refine ⟨fun hm i => ⟨(b.restrictScalars R).repr ⟨m, hm⟩ i, b.restrictScalars_repr_apply R ⟨m, hm⟩ i⟩, fun h => ?_⟩ rw [← b.linearCombination_repr m, Finsupp.linearCombination_apply S _] refine sum_mem fun i _ => ?_ obtain ⟨_, h⟩ := h i simp_rw [← h, algebraMap_smul] exact smul_mem _ _ (subset_span (Set.mem_range_self i)) end RestrictScalars section AddSubgroup variable {M R : Type*} [Ring R] [Nontrivial R] [IsAddTorsionFree R] [AddCommGroup M] [Module R M] (A : AddSubgroup M) {ι : Type*} (b : Basis ι R M) /-- Let `A` be an subgroup of an additive commutative group `M` that is also an `R`-module. Construct a basis of `A` as a `ℤ`-basis from a `R`-basis of `E` that generates `A`. -/ noncomputable def addSubgroupOfClosure (h : A = .closure (Set.range b)) : Basis ι ℤ A.toIntSubmodule := (b.restrictScalars ℤ).map <| LinearEquiv.ofEq _ _ (by rw [h, ← Submodule.span_int_eq_addSubgroupClosure, toAddSubgroup_toIntSubmodule]) @[simp] theorem addSubgroupOfClosure_apply (h : A = .closure (Set.range b)) (i : ι) : b.addSubgroupOfClosure A h i = b i := by simp [addSubgroupOfClosure] @[simp] theorem addSubgroupOfClosure_repr_apply (h : A = .closure (Set.range b)) (x : A) (i : ι) : (b.addSubgroupOfClosure A h).repr x i = b.repr x i := by suffices Finsupp.mapRange.linearMap (Algebra.linearMap ℤ R) ∘ₗ (b.addSubgroupOfClosure A h).repr.toLinearMap = ((b.repr : M →ₗ[R] ι →₀ R).restrictScalars ℤ).domRestrict A.toIntSubmodule by exact DFunLike.congr_fun (LinearMap.congr_fun this x) i exact (b.addSubgroupOfClosure A h).ext fun _ ↦ by simp end AddSubgroup end Module.Basis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Finite.lean
import Mathlib.LinearAlgebra.Dimension.Constructions import Mathlib.LinearAlgebra.Dimension.StrongRankCondition import Mathlib.LinearAlgebra.Dimension.Subsingleton import Mathlib.LinearAlgebra.FreeModule.Finite.Basic import Mathlib.SetTheory.Cardinal.Cofinality /-! # Conditions for rank to be finite Also contains characterization for when rank equals zero or rank equals one. -/ noncomputable section universe u v v' w variable {R : Type u} {M M₁ : Type v} {M' : Type v'} {ι : Type w} variable [Ring R] [AddCommGroup M] [AddCommGroup M'] [AddCommGroup M₁] variable [Module R M] [Module R M'] [Module R M₁] attribute [local instance] nontrivial_of_invariantBasisNumber open Basis Cardinal Function Module Set Submodule /-- If every finite set of linearly independent vectors has cardinality at most `n`, then the same is true for arbitrary sets of linearly independent vectors. -/ theorem linearIndependent_bounded_of_finset_linearIndependent_bounded {n : ℕ} (H : ∀ s : Finset M, (LinearIndependent R fun i : s => (i : M)) → s.card ≤ n) : ∀ s : Set M, LinearIndependent R ((↑) : s → M) → #s ≤ n := by intro s li apply Cardinal.card_le_of intro t rw [← Finset.card_map (Embedding.subtype s)] apply H apply linearIndependent_finset_map_embedding_subtype _ li theorem rank_le {n : ℕ} (H : ∀ s : Finset M, (LinearIndependent R fun i : s => (i : M)) → s.card ≤ n) : Module.rank R M ≤ n := by rw [Module.rank_def] apply ciSup_le' rintro ⟨s, li⟩ exact linearIndependent_bounded_of_finset_linearIndependent_bounded H _ li section RankZero /-- See `rank_zero_iff` for a stronger version with `NoZeroSMulDivisor R M`. -/ lemma rank_eq_zero_iff : Module.rank R M = 0 ↔ ∀ x : M, ∃ a : R, a ≠ 0 ∧ a • x = 0 := by nontriviality R constructor · contrapose! rintro ⟨x, hx⟩ rw [← Cardinal.one_le_iff_ne_zero] have : LinearIndependent R (fun _ : Unit ↦ x) := linearIndependent_iff.mpr (fun l hl ↦ Finsupp.unique_ext <| not_not.mp fun H ↦ hx _ H ((Finsupp.linearCombination_unique _ _ _).symm.trans hl)) simpa using this.cardinal_lift_le_rank · intro h rw [← le_zero_iff, Module.rank_def] apply ciSup_le' intro ⟨s, hs⟩ rw [nonpos_iff_eq_zero, Cardinal.mk_eq_zero_iff, ← not_nonempty_iff] rintro ⟨i : s⟩ obtain ⟨a, ha, ha'⟩ := h i apply ha simpa using DFunLike.congr_fun (linearIndependent_iff.mp hs (Finsupp.single i a) (by simpa)) i theorem rank_pos_of_free [Module.Free R M] [Nontrivial M] : 0 < Module.rank R M := have := Module.nontrivial R M (pos_of_ne_zero <| Cardinal.mk_ne_zero _).trans_le (Free.chooseBasis R M).linearIndependent.cardinal_le_rank variable [Nontrivial R] section variable [NoZeroSMulDivisors R M] theorem rank_zero_iff_forall_zero : Module.rank R M = 0 ↔ ∀ x : M, x = 0 := by simp_rw [rank_eq_zero_iff, smul_eq_zero, and_or_left, not_and_self_iff, false_or, exists_and_right, and_iff_right (exists_ne (0 : R))] /-- See `rank_subsingleton` for the reason that `Nontrivial R` is needed. Also see `rank_eq_zero_iff` for the version without `NoZeroSMulDivisor R M`. -/ theorem rank_zero_iff : Module.rank R M = 0 ↔ Subsingleton M := rank_zero_iff_forall_zero.trans (subsingleton_iff_forall_eq 0).symm theorem rank_pos_iff_exists_ne_zero : 0 < Module.rank R M ↔ ∃ x : M, x ≠ 0 := by rw [← not_iff_not] simpa using rank_zero_iff_forall_zero theorem rank_pos_iff_nontrivial : 0 < Module.rank R M ↔ Nontrivial M := rank_pos_iff_exists_ne_zero.trans (nontrivial_iff_exists_ne 0).symm theorem rank_pos [Nontrivial M] : 0 < Module.rank R M := rank_pos_iff_nontrivial.mpr ‹_› end theorem exists_mem_ne_zero_of_rank_pos {s : Submodule R M} (h : 0 < Module.rank R s) : ∃ b : M, b ∈ s ∧ b ≠ 0 := exists_mem_ne_zero_of_ne_bot fun eq => by rw [eq, rank_bot] at h; exact lt_irrefl _ h end RankZero section Finite theorem Module.finite_of_rank_eq_nat [Module.Free R M] {n : ℕ} (h : Module.rank R M = n) : Module.Finite R M := by nontriviality R obtain ⟨⟨ι, b⟩⟩ := Module.Free.exists_basis (R := R) (M := M) have := mk_lt_aleph0_iff.mp <| b.linearIndependent.cardinal_le_rank |>.trans_eq h |>.trans_lt <| nat_lt_aleph0 n exact Module.Finite.of_basis b theorem Module.finite_of_rank_eq_zero [NoZeroSMulDivisors R M] (h : Module.rank R M = 0) : Module.Finite R M := by nontriviality R rw [rank_zero_iff] at h infer_instance theorem Module.finite_of_rank_eq_one [Module.Free R M] (h : Module.rank R M = 1) : Module.Finite R M := Module.finite_of_rank_eq_nat <| h.trans Nat.cast_one.symm section variable [StrongRankCondition R] /-- If a module has a finite dimension, all bases are indexed by a finite type. -/ theorem Module.Basis.nonempty_fintype_index_of_rank_lt_aleph0 {ι : Type*} (b : Basis ι R M) (h : Module.rank R M < ℵ₀) : Nonempty (Fintype ι) := by rwa [← Cardinal.lift_lt, ← b.mk_eq_rank, Cardinal.lift_aleph0, Cardinal.lift_lt_aleph0, Cardinal.lt_aleph0_iff_fintype] at h /-- If a module has a finite dimension, all bases are indexed by a finite type. -/ noncomputable def Module.Basis.fintypeIndexOfRankLtAleph0 {ι : Type*} (b : Basis ι R M) (h : Module.rank R M < ℵ₀) : Fintype ι := Classical.choice (b.nonempty_fintype_index_of_rank_lt_aleph0 h) /-- If a module has a finite dimension, all bases are indexed by a finite set. -/ theorem Module.Basis.finite_index_of_rank_lt_aleph0 {ι : Type*} {s : Set ι} (b : Basis s R M) (h : Module.rank R M < ℵ₀) : s.Finite := Set.finite_def.2 (b.nonempty_fintype_index_of_rank_lt_aleph0 h) end namespace LinearIndependent variable [StrongRankCondition R] theorem cardinalMk_le_finrank [Module.Finite R M] {ι : Type w} {b : ι → M} (h : LinearIndependent R b) : #ι ≤ finrank R M := by rw [← lift_le.{max v w}] simpa only [← finrank_eq_rank, lift_natCast, lift_le_nat_iff] using h.cardinal_lift_le_rank theorem fintype_card_le_finrank [Module.Finite R M] {ι : Type*} [Fintype ι] {b : ι → M} (h : LinearIndependent R b) : Fintype.card ι ≤ finrank R M := by simpa using h.cardinalMk_le_finrank theorem finset_card_le_finrank [Module.Finite R M] {b : Finset M} (h : LinearIndependent R (fun x => x : b → M)) : b.card ≤ finrank R M := by rw [← Fintype.card_coe] exact h.fintype_card_le_finrank theorem lt_aleph0_of_finite {ι : Type w} [Module.Finite R M] {v : ι → M} (h : LinearIndependent R v) : #ι < ℵ₀ := by apply Cardinal.lift_lt.1 apply lt_of_le_of_lt · apply h.cardinal_lift_le_rank · rw [← finrank_eq_rank, Cardinal.lift_aleph0, Cardinal.lift_natCast] apply Cardinal.nat_lt_aleph0 theorem finite [Module.Finite R M] {ι : Type*} {f : ι → M} (h : LinearIndependent R f) : Finite ι := Cardinal.lt_aleph0_iff_finite.1 <| h.lt_aleph0_of_finite theorem setFinite [Module.Finite R M] {b : Set M} (h : LinearIndependent R fun x : b => (x : M)) : b.Finite := Cardinal.lt_aleph0_iff_set_finite.mp h.lt_aleph0_of_finite end LinearIndependent lemma exists_set_linearIndependent_of_lt_rank {n : Cardinal} (hn : n < Module.rank R M) : ∃ s : Set M, #s = n ∧ LinearIndepOn R id s := by obtain ⟨⟨s, hs⟩, hs'⟩ := exists_lt_of_lt_ciSup' (hn.trans_eq (Module.rank_def R M)) obtain ⟨t, ht, ht'⟩ := le_mk_iff_exists_subset.mp hs'.le exact ⟨t, ht', hs.mono ht⟩ lemma exists_finset_linearIndependent_of_le_rank {n : ℕ} (hn : n ≤ Module.rank R M) : ∃ s : Finset M, s.card = n ∧ LinearIndepOn R id (s : Set M) := by rcases hn.eq_or_lt with h | h · obtain ⟨⟨s, hs⟩, hs'⟩ := Cardinal.exists_eq_natCast_of_iSup_eq _ (Cardinal.bddAbove_range _) _ (h.trans (Module.rank_def R M)).symm have : Finite s := lt_aleph0_iff_finite.mp (hs' ▸ nat_lt_aleph0 n) cases nonempty_fintype s refine ⟨s.toFinset, by simpa using hs', by simpa⟩ · obtain ⟨s, hs, hs'⟩ := exists_set_linearIndependent_of_lt_rank h have : Finite s := lt_aleph0_iff_finite.mp (hs ▸ nat_lt_aleph0 n) cases nonempty_fintype s exact ⟨s.toFinset, by simpa using hs, by simpa⟩ lemma exists_linearIndependent_of_le_rank {n : ℕ} (hn : n ≤ Module.rank R M) : ∃ f : Fin n → M, LinearIndependent R f := have ⟨_, hs, hs'⟩ := exists_finset_linearIndependent_of_le_rank hn ⟨_, (linearIndependent_equiv (Finset.equivFinOfCardEq hs).symm).mpr hs'⟩ lemma natCast_le_rank_iff [Nontrivial R] {n : ℕ} : n ≤ Module.rank R M ↔ ∃ f : Fin n → M, LinearIndependent R f := ⟨exists_linearIndependent_of_le_rank, fun H ↦ by simpa using H.choose_spec.cardinal_lift_le_rank⟩ lemma natCast_le_rank_iff_finset [Nontrivial R] {n : ℕ} : n ≤ Module.rank R M ↔ ∃ s : Finset M, s.card = n ∧ LinearIndependent R ((↑) : s → M) := ⟨exists_finset_linearIndependent_of_le_rank, fun ⟨s, h₁, h₂⟩ ↦ by simpa [h₁] using h₂.cardinal_le_rank⟩ lemma exists_finset_linearIndependent_of_le_finrank {n : ℕ} (hn : n ≤ finrank R M) : ∃ s : Finset M, s.card = n ∧ LinearIndependent R ((↑) : s → M) := by by_cases h : finrank R M = 0 · rw [le_zero_iff.mp (hn.trans_eq h)] exact ⟨∅, rfl, by convert linearIndependent_empty R M using 2 <;> aesop⟩ exact exists_finset_linearIndependent_of_le_rank ((Nat.cast_le.mpr hn).trans_eq (cast_toNat_of_lt_aleph0 (toNat_ne_zero.mp h).2)) lemma exists_linearIndependent_of_le_finrank {n : ℕ} (hn : n ≤ finrank R M) : ∃ f : Fin n → M, LinearIndependent R f := have ⟨_, hs, hs'⟩ := exists_finset_linearIndependent_of_le_finrank hn ⟨_, (linearIndependent_equiv (Finset.equivFinOfCardEq hs).symm).mpr hs'⟩ variable [Module.Finite R M] [StrongRankCondition R] in theorem Module.Finite.not_linearIndependent_of_infinite {ι : Type*} [Infinite ι] (v : ι → M) : ¬LinearIndependent R v := mt LinearIndependent.finite <| @not_finite _ _ section variable [NoZeroSMulDivisors R M] theorem iSupIndep.subtype_ne_bot_le_rank [Nontrivial R] {V : ι → Submodule R M} (hV : iSupIndep V) : Cardinal.lift.{v} #{ i : ι // V i ≠ ⊥ } ≤ Cardinal.lift.{w} (Module.rank R M) := by set I := { i : ι // V i ≠ ⊥ } have hI : ∀ i : I, ∃ v ∈ V i, v ≠ (0 : M) := by intro i rw [← Submodule.ne_bot_iff] exact i.prop choose v hvV hv using hI have : LinearIndependent R v := (hV.comp Subtype.coe_injective).linearIndependent _ hvV hv exact this.cardinal_lift_le_rank variable [Module.Finite R M] [StrongRankCondition R] theorem iSupIndep.subtype_ne_bot_le_finrank_aux {p : ι → Submodule R M} (hp : iSupIndep p) : #{ i // p i ≠ ⊥ } ≤ (finrank R M : Cardinal.{w}) := by suffices Cardinal.lift.{v} #{ i // p i ≠ ⊥ } ≤ Cardinal.lift.{v} (finrank R M : Cardinal.{w}) by rwa [Cardinal.lift_le] at this calc Cardinal.lift.{v} #{ i // p i ≠ ⊥ } ≤ Cardinal.lift.{w} (Module.rank R M) := hp.subtype_ne_bot_le_rank _ = Cardinal.lift.{w} (finrank R M : Cardinal.{v}) := by rw [finrank_eq_rank] _ = Cardinal.lift.{v} (finrank R M : Cardinal.{w}) := by simp /-- If `p` is an independent family of submodules of a `R`-finite module `M`, then the number of nontrivial subspaces in the family `p` is finite. -/ noncomputable def iSupIndep.fintypeNeBotOfFiniteDimensional {p : ι → Submodule R M} (hp : iSupIndep p) : Fintype { i : ι // p i ≠ ⊥ } := by suffices #{ i // p i ≠ ⊥ } < (ℵ₀ : Cardinal.{w}) by rw [Cardinal.lt_aleph0_iff_fintype] at this exact this.some refine lt_of_le_of_lt hp.subtype_ne_bot_le_finrank_aux ?_ simp [Cardinal.nat_lt_aleph0] /-- If `p` is an independent family of submodules of a `R`-finite module `M`, then the number of nontrivial subspaces in the family `p` is bounded above by the dimension of `M`. Note that the `Fintype` hypothesis required here can be provided by `iSupIndep.fintypeNeBotOfFiniteDimensional`. -/ theorem iSupIndep.subtype_ne_bot_le_finrank {p : ι → Submodule R M} (hp : iSupIndep p) [Fintype { i // p i ≠ ⊥ }] : Fintype.card { i // p i ≠ ⊥ } ≤ finrank R M := by simpa using hp.subtype_ne_bot_le_finrank_aux end variable [Module.Finite R M] [StrongRankCondition R] section open Finset /-- If a finset has cardinality larger than the rank of a module, then there is a nontrivial linear relation amongst its elements. -/ theorem Module.exists_nontrivial_relation_of_finrank_lt_card {t : Finset M} (h : finrank R M < t.card) : ∃ f : M → R, ∑ e ∈ t, f e • e = 0 ∧ ∃ x ∈ t, f x ≠ 0 := by obtain ⟨g, sum, z, nonzero⟩ := Fintype.not_linearIndependent_iff.mp (mt LinearIndependent.finset_card_le_finrank h.not_ge) refine ⟨Subtype.val.extend g 0, ?_, z, z.2, by rwa [Subtype.val_injective.extend_apply]⟩ rw [← Finset.sum_finset_coe]; convert sum; apply Subtype.val_injective.extend_apply /-- If a finset has cardinality larger than `finrank + 1`, then there is a nontrivial linear relation amongst its elements, such that the coefficients of the relation sum to zero. -/ theorem Module.exists_nontrivial_relation_sum_zero_of_finrank_succ_lt_card {t : Finset M} (h : finrank R M + 1 < t.card) : ∃ f : M → R, ∑ e ∈ t, f e • e = 0 ∧ ∑ e ∈ t, f e = 0 ∧ ∃ x ∈ t, f x ≠ 0 := by -- Pick an element x₀ ∈ t, obtain ⟨x₀, x₀_mem⟩ := card_pos.1 ((Nat.succ_pos _).trans h) -- and apply the previous lemma to the {xᵢ - x₀} let shift : M ↪ M := ⟨(· - x₀), sub_left_injective⟩ classical let t' := (t.erase x₀).map shift have h' : finrank R M < t'.card := by rw [card_map, card_erase_of_mem x₀_mem] exact Nat.lt_pred_iff.mpr h -- to obtain a function `g`. obtain ⟨g, gsum, x₁, x₁_mem, nz⟩ := exists_nontrivial_relation_of_finrank_lt_card h' -- Then obtain `f` by translating back by `x₀`, -- and setting the value of `f` at `x₀` to ensure `∑ e ∈ t, f e = 0`. let f : M → R := fun z ↦ if z = x₀ then -∑ z ∈ t.erase x₀, g (z - x₀) else g (z - x₀) refine ⟨f, ?_, ?_, ?_⟩ -- After this, it's a matter of verifying the properties, -- based on the corresponding properties for `g`. · rw [sum_map, Embedding.coeFn_mk] at gsum simp_rw [f, ← t.sum_erase_add _ x₀_mem, if_pos, neg_smul, sum_smul, ← sub_eq_add_neg, ← sum_sub_distrib, ← gsum, smul_sub] refine sum_congr rfl fun x x_mem ↦ ?_ rw [if_neg (mem_erase.mp x_mem).1] · simp_rw [f, ← t.sum_erase_add _ x₀_mem, if_pos, add_neg_eq_zero] exact sum_congr rfl fun x x_mem ↦ if_neg (mem_erase.mp x_mem).1 · obtain ⟨x₁, x₁_mem', rfl⟩ := Finset.mem_map.mp x₁_mem have := mem_erase.mp x₁_mem' exact ⟨x₁, by simpa only [f, Embedding.coeFn_mk, sub_add_cancel, this.2, true_and, if_neg this.1]⟩ end end Finite section FinrankZero section variable [Nontrivial R] /-- A (finite-dimensional) space that is a subsingleton has zero `finrank`. -/ @[nontriviality] theorem Module.finrank_zero_of_subsingleton [Subsingleton M] : finrank R M = 0 := by rw [finrank, rank_subsingleton', map_zero] lemma LinearIndependent.finrank_eq_zero_of_infinite {ι} [Infinite ι] {v : ι → M} (hv : LinearIndependent R v) : finrank R M = 0 := toNat_eq_zero.mpr <| .inr hv.aleph0_le_rank section variable [NoZeroSMulDivisors R M] /-- A finite-dimensional space is nontrivial if it has positive `finrank`. -/ theorem Module.nontrivial_of_finrank_pos (h : 0 < finrank R M) : Nontrivial M := rank_pos_iff_nontrivial.mp (lt_rank_of_lt_finrank h) /-- A finite-dimensional space is nontrivial if it has `finrank` equal to the successor of a natural number. -/ theorem Module.nontrivial_of_finrank_eq_succ {n : ℕ} (hn : finrank R M = n.succ) : Nontrivial M := nontrivial_of_finrank_pos (R := R) (by rw [hn]; exact n.succ_pos) end variable (R M) @[simp] theorem finrank_bot : finrank R (⊥ : Submodule R M) = 0 := finrank_eq_of_rank_eq (rank_bot _ _) end section StrongRankCondition variable [StrongRankCondition R] [Module.Finite R M] /-- A finite rank torsion-free module has positive `finrank` iff it has a nonzero element. -/ theorem Module.finrank_pos_iff_exists_ne_zero [NoZeroSMulDivisors R M] : 0 < finrank R M ↔ ∃ x : M, x ≠ 0 := by rw [← @rank_pos_iff_exists_ne_zero R M, ← finrank_eq_rank] norm_cast /-- An `R`-finite torsion-free module has positive `finrank` iff it is nontrivial. -/ theorem Module.finrank_pos_iff [NoZeroSMulDivisors R M] : 0 < finrank R M ↔ Nontrivial M := by rw [← rank_pos_iff_nontrivial (R := R), ← finrank_eq_rank] norm_cast /-- A nontrivial finite-dimensional space has positive `finrank`. -/ theorem Module.finrank_pos [NoZeroSMulDivisors R M] [h : Nontrivial M] : 0 < finrank R M := finrank_pos_iff.mpr h /-- See `Module.finrank_zero_iff` for the stronger version with `NoZeroSMulDivisors R M`. -/ theorem Module.finrank_eq_zero_iff : finrank R M = 0 ↔ ∀ x : M, ∃ a : R, a ≠ 0 ∧ a • x = 0 := by rw [← rank_eq_zero_iff (R := R), ← finrank_eq_rank] norm_cast /-- A finite-dimensional space has zero `finrank` iff it is a subsingleton. This is the `finrank` version of `rank_zero_iff`. -/ theorem Module.finrank_zero_iff [NoZeroSMulDivisors R M] : finrank R M = 0 ↔ Subsingleton M := by rw [← rank_zero_iff (R := R), ← finrank_eq_rank] norm_cast /-- Similar to `rank_quotient_add_rank_le` but for `finrank` and a finite `M`. -/ lemma Module.finrank_quotient_add_finrank_le (N : Submodule R M) : finrank R (M ⧸ N) + finrank R N ≤ finrank R M := by haveI := nontrivial_of_invariantBasisNumber R have := rank_quotient_add_rank_le N rw [← finrank_eq_rank R M, ← finrank_eq_rank R, ← N.finrank_eq_rank] at this exact mod_cast this end StrongRankCondition theorem Module.finrank_eq_zero_of_rank_eq_zero (h : Module.rank R M = 0) : finrank R M = 0 := by delta finrank rw [h, zero_toNat] theorem Submodule.bot_eq_top_of_rank_eq_zero [NoZeroSMulDivisors R M] (h : Module.rank R M = 0) : (⊥ : Submodule R M) = ⊤ := by nontriviality R rw [rank_zero_iff] at h subsingleton /-- See `rank_subsingleton` for the reason that `Nontrivial R` is needed. -/ @[simp] theorem Submodule.rank_eq_zero [Nontrivial R] [NoZeroSMulDivisors R M] {S : Submodule R M} : Module.rank R S = 0 ↔ S = ⊥ := ⟨fun h => (Submodule.eq_bot_iff _).2 fun x hx => congr_arg Subtype.val <| ((Submodule.eq_bot_iff _).1 <| Eq.symm <| Submodule.bot_eq_top_of_rank_eq_zero h) ⟨x, hx⟩ Submodule.mem_top, fun h => by rw [h, rank_bot]⟩ @[simp] theorem Submodule.finrank_eq_zero [StrongRankCondition R] [NoZeroSMulDivisors R M] {S : Submodule R M} [Module.Finite R S] : finrank R S = 0 ↔ S = ⊥ := by rw [← Submodule.rank_eq_zero, ← finrank_eq_rank, ← @Nat.cast_zero Cardinal, Nat.cast_inj] @[simp] lemma Submodule.one_le_finrank_iff [StrongRankCondition R] [NoZeroSMulDivisors R M] {S : Submodule R M} [Module.Finite R S] : 1 ≤ finrank R S ↔ S ≠ ⊥ := by simp [← not_iff_not] @[simp] theorem Set.finrank_empty [Nontrivial R] : Set.finrank R (∅ : Set M) = 0 := by rw [Set.finrank, span_empty, finrank_bot] variable [Module.Free R M] theorem finrank_eq_zero_of_basis_imp_not_finite (h : ∀ s : Set M, Basis.{v} (s : Set M) R M → ¬s.Finite) : finrank R M = 0 := by cases subsingleton_or_nontrivial R · have := Module.subsingleton R M exact (h ∅ ⟨LinearEquiv.ofSubsingleton _ _⟩ Set.finite_empty).elim obtain ⟨_, ⟨b⟩⟩ := (Module.free_iff_set R M).mp ‹_› have := Set.Infinite.to_subtype (h _ b) exact b.linearIndependent.finrank_eq_zero_of_infinite theorem finrank_eq_zero_of_basis_imp_false (h : ∀ s : Finset M, Basis.{v} (s : Set M) R M → False) : finrank R M = 0 := finrank_eq_zero_of_basis_imp_not_finite fun s b hs => h hs.toFinset (by convert b simp) theorem finrank_eq_zero_of_not_exists_basis (h : ¬∃ s : Finset M, Nonempty (Basis (s : Set M) R M)) : finrank R M = 0 := finrank_eq_zero_of_basis_imp_false fun s b => h ⟨s, ⟨b⟩⟩ theorem finrank_eq_zero_of_not_exists_basis_finite (h : ¬∃ (s : Set M) (_ : Basis.{v} (s : Set M) R M), s.Finite) : finrank R M = 0 := finrank_eq_zero_of_basis_imp_not_finite fun s b hs => h ⟨s, b, hs⟩ theorem finrank_eq_zero_of_not_exists_basis_finset (h : ¬∃ s : Finset M, Nonempty (Basis s R M)) : finrank R M = 0 := finrank_eq_zero_of_basis_imp_false fun s b => h ⟨s, ⟨b⟩⟩ end FinrankZero section RankOne variable [NoZeroSMulDivisors R M] [StrongRankCondition R] /-- If there is a nonzero vector and every other vector is a multiple of it, then the module has dimension one. -/ theorem rank_eq_one (v : M) (n : v ≠ 0) (h : ∀ w : M, ∃ c : R, c • v = w) : Module.rank R M = 1 := by haveI := nontrivial_of_invariantBasisNumber R obtain ⟨b⟩ := (Basis.basis_singleton_iff.{_, _, u} PUnit).mpr ⟨v, n, h⟩ rw [rank_eq_card_basis b, Fintype.card_punit, Nat.cast_one] /-- If there is a nonzero vector and every other vector is a multiple of it, then the module has dimension one. -/ theorem finrank_eq_one (v : M) (n : v ≠ 0) (h : ∀ w : M, ∃ c : R, c • v = w) : finrank R M = 1 := finrank_eq_of_rank_eq (rank_eq_one v n h) /-- If every vector is a multiple of some `v : M`, then `M` has dimension at most one. -/ theorem finrank_le_one (v : M) (h : ∀ w : M, ∃ c : R, c • v = w) : finrank R M ≤ 1 := by haveI := nontrivial_of_invariantBasisNumber R rcases eq_or_ne v 0 with (rfl | hn) · haveI := _root_.subsingleton_of_forall_eq (0 : M) fun w => by obtain ⟨c, rfl⟩ := h w simp rw [finrank_zero_of_subsingleton] exact zero_le_one · exact (finrank_eq_one v hn h).le end RankOne namespace Module variable {ι : Type*} @[simp] lemma finite_finsupp_iff : Module.Finite R (ι →₀ M) ↔ IsEmpty ι ∨ Subsingleton M ∨ Module.Finite R M ∧ Finite ι where mp := by simp only [or_iff_not_imp_left, not_subsingleton_iff_nontrivial, not_isEmpty_iff] rintro h ⟨i⟩ _ obtain ⟨s, hs⟩ := id h exact ⟨.of_surjective (Finsupp.lapply (R := R) (M := M) i) (Finsupp.apply_surjective i), finite_of_span_finite_eq_top_finsupp s.finite_toSet hs⟩ mpr | .inl _ => inferInstance | .inr <| .inl h => inferInstance | .inr <| .inr h => by cases h; infer_instance @[simp high] lemma finite_finsupp_self_iff : Module.Finite R (ι →₀ R) ↔ Subsingleton R ∨ Finite ι := by simp only [finite_finsupp_iff, Finite.self, true_and, or_iff_right_iff_imp] exact fun _ ↦ .inr inferInstance end Module
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Finrank.lean
import Mathlib.LinearAlgebra.Dimension.Subsingleton import Mathlib.SetTheory.Cardinal.ToNat /-! # Finite dimension of vector spaces Definition of the rank of a module, or dimension of a vector space, as a natural number. ## Main definitions Defined is `Module.finrank`, the dimension of a finite-dimensional space, returning a `Nat`, as opposed to `Module.rank`, which returns a `Cardinal`. When the space has infinite dimension, its `finrank` is by convention set to `0`. The definition of `finrank` does not assume a `FiniteDimensional` instance, but lemmas might. Import `LinearAlgebra.FiniteDimensional` to get access to these additional lemmas. Formulas for the dimension are given for linear equivs, in `LinearEquiv.finrank_eq`. ## Implementation notes Most results are deduced from the corresponding results for the general dimension (as a cardinal), in `Dimension.lean`. Not all results have been ported yet. You should not assume that there has been any effort to state lemmas as generally as possible. -/ universe u v w open Cardinal Submodule Module Function variable {R : Type u} {M : Type v} {N : Type w} variable [Semiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid N] [Module R N] namespace Module section Semiring /-- The rank of a module as a natural number. For a finite-dimensional vector space `V` over a field `k`, `Module.finrank k V` is equal to the dimension of `V` over `k`. For a general module `M` over a ring `R`, `Module.finrank R M` is defined to be the supremum of the cardinalities of the `R`-linearly independent subsets of `M`, if this supremum is finite. It is defined by convention to be `0` if this supremum is infinite. See `Module.rank` for a cardinal-valued version where infinite rank modules have rank an infinite cardinal. Note that if `R` is not a field then there can exist modules `M` with `¬(Module.Finite R M)` but `finrank R M ≠ 0`. For example `ℚ` has `finrank` equal to `1` over `ℤ`, because the nonempty `ℤ`-linearly independent subsets of `ℚ` are precisely the nonzero singletons. -/ noncomputable def finrank (R M : Type*) [Semiring R] [AddCommMonoid M] [Module R M] : ℕ := Cardinal.toNat (Module.rank R M) @[simp] theorem finrank_subsingleton [Subsingleton R] : finrank R M = 1 := by rw [finrank, rank_subsingleton, map_one] theorem finrank_eq_of_rank_eq {n : ℕ} (h : Module.rank R M = ↑n) : finrank R M = n := by simp [finrank, h] lemma rank_eq_one_iff_finrank_eq_one : Module.rank R M = 1 ↔ finrank R M = 1 := Cardinal.toNat_eq_one.symm /-- This is like `rank_eq_one_iff_finrank_eq_one` but works for `2`, `3`, `4`, ... -/ lemma rank_eq_ofNat_iff_finrank_eq_ofNat (n : ℕ) [Nat.AtLeastTwo n] : Module.rank R M = OfNat.ofNat n ↔ finrank R M = OfNat.ofNat n := Cardinal.toNat_eq_ofNat.symm theorem finrank_le_of_rank_le {n : ℕ} (h : Module.rank R M ≤ ↑n) : finrank R M ≤ n := by rwa [← Cardinal.toNat_le_iff_le_of_lt_aleph0, toNat_natCast] at h · exact h.trans_lt (nat_lt_aleph0 n) · exact nat_lt_aleph0 n theorem finrank_lt_of_rank_lt {n : ℕ} (h : Module.rank R M < ↑n) : finrank R M < n := by rwa [← Cardinal.toNat_lt_iff_lt_of_lt_aleph0, toNat_natCast] at h · exact h.trans (nat_lt_aleph0 n) · exact nat_lt_aleph0 n theorem lt_rank_of_lt_finrank {n : ℕ} (h : n < finrank R M) : ↑n < Module.rank R M := by rwa [← Cardinal.toNat_lt_iff_lt_of_lt_aleph0, toNat_natCast] · exact nat_lt_aleph0 n · contrapose! h rw [finrank, Cardinal.toNat_apply_of_aleph0_le h] exact n.zero_le theorem one_lt_rank_of_one_lt_finrank (h : 1 < finrank R M) : 1 < Module.rank R M := by simpa using lt_rank_of_lt_finrank h theorem finrank_le_finrank_of_rank_le_rank (h : lift.{w} (Module.rank R M) ≤ Cardinal.lift.{v} (Module.rank R N)) (h' : Module.rank R N < ℵ₀) : finrank R M ≤ finrank R N := by simpa only [toNat_lift] using toNat_le_toNat h (lift_lt_aleph0.mpr h') end Semiring end Module open Module namespace LinearEquiv /-- The dimension of a finite-dimensional space is preserved under linear equivalence. -/ theorem finrank_eq (f : M ≃ₗ[R] N) : finrank R M = finrank R N := by unfold finrank rw [← Cardinal.toNat_lift, f.lift_rank_eq, Cardinal.toNat_lift] /-- Pushforwards of finite-dimensional submodules along a `LinearEquiv` have the same finrank. -/ theorem finrank_map_eq (f : M ≃ₗ[R] N) (p : Submodule R M) : finrank R (p.map (f : M →ₗ[R] N)) = finrank R p := (f.submoduleMap p).finrank_eq.symm end LinearEquiv /-- The dimensions of the domain and range of an injective linear map are equal. -/ theorem LinearMap.finrank_range_of_inj {f : M →ₗ[R] N} (hf : Function.Injective f) : finrank R (LinearMap.range f) = finrank R M := by rw [(LinearEquiv.ofInjective f hf).finrank_eq] @[simp] theorem Submodule.finrank_map_subtype_eq (p : Submodule R M) (q : Submodule R p) : finrank R (q.map p.subtype) = finrank R q := (Submodule.equivSubtypeMap p q).symm.finrank_eq variable (R M) @[simp] theorem finrank_top : finrank R (⊤ : Submodule R M) = finrank R M := by unfold finrank simp namespace Algebra /-- If `S₀ / R₀` and `S₁ / R₁` are algebras, `i : R₀ ≃+* R₁` and `j : S₀ ≃+* S₁` are ring isomorphisms, such that `R₀ → R₁ → S₁` and `R₀ → S₀ → S₁` commute, then the `finrank` of `S₀ / R₀` is equal to the finrank of `S₁ / R₁`. -/ theorem finrank_eq_of_equiv_equiv {R₀ S₀ : Type*} [CommSemiring R₀] [Semiring S₀] [Algebra R₀ S₀] {R₁ S₁ : Type*} [CommSemiring R₁] [Semiring S₁] [Algebra R₁ S₁] (i : R₀ ≃+* R₁) (j : S₀ ≃+* S₁) (hc : (algebraMap R₁ S₁).comp i.toRingHom = j.toRingHom.comp (algebraMap R₀ S₀)) : Module.finrank R₀ S₀ = Module.finrank R₁ S₁ := by simpa using (congr_arg Cardinal.toNat (lift_rank_eq_of_equiv_equiv i j hc)) end Algebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/StrongRankCondition.lean
import Mathlib.LinearAlgebra.Basis.Basic import Mathlib.LinearAlgebra.Basis.Submodule import Mathlib.LinearAlgebra.Dimension.Finrank import Mathlib.LinearAlgebra.InvariantBasisNumber /-! # Lemmas about rank and `finrank` in rings satisfying strong rank condition. ## Main statements For modules over rings satisfying the rank condition * `Basis.le_span`: the cardinality of a basis is bounded by the cardinality of any spanning set For modules over rings satisfying the strong rank condition * `linearIndependent_le_span`: For any linearly independent family `v : ι → M` and any finite spanning set `w : Set M`, the cardinality of `ι` is bounded by the cardinality of `w`. * `linearIndependent_le_basis`: If `b` is a basis for a module `M`, and `s` is a linearly independent set, then the cardinality of `s` is bounded by the cardinality of `b`. For modules over rings with invariant basis number (including all commutative rings and all Noetherian rings) * `mk_eq_mk_of_basis`: the dimension theorem, any two bases of the same vector space have the same cardinality. ## Additional definition * `Algebra.IsQuadraticExtension`: An extension of rings `R ⊆ S` is quadratic if `S` is a free `R`-algebra of rank `2`. -/ noncomputable section universe u v w w' variable {R : Type u} {S : Type*} {M : Type v} [Semiring R] [AddCommMonoid M] [Module R M] variable {ι : Type w} {ι' : Type w'} open Cardinal Basis Submodule Function Set Module attribute [local instance] nontrivial_of_invariantBasisNumber section InvariantBasisNumber variable [InvariantBasisNumber R] /-- The dimension theorem: if `v` and `v'` are two bases, their index types have the same cardinalities. -/ theorem mk_eq_mk_of_basis (v : Basis ι R M) (v' : Basis ι' R M) : Cardinal.lift.{w'} #ι = Cardinal.lift.{w} #ι' := by classical haveI := nontrivial_of_invariantBasisNumber R cases fintypeOrInfinite ι · -- `v` is a finite basis, so by `basis_finite_of_finite_spans` so is `v'`. -- haveI : Finite (range v) := Set.finite_range v haveI := basis_finite_of_finite_spans (Set.finite_range v) v.span_eq v' cases nonempty_fintype ι' -- We clean up a little: rw [Cardinal.mk_fintype, Cardinal.mk_fintype] simp only [Cardinal.lift_natCast, Nat.cast_inj] -- Now we can use invariant basis number to show they have the same cardinality. apply card_eq_of_linearEquiv R exact (Finsupp.linearEquivFunOnFinite R R ι).symm.trans v.repr.symm ≪≫ₗ v'.repr ≪≫ₗ Finsupp.linearEquivFunOnFinite R R ι' · -- `v` is an infinite basis, -- so by `infinite_basis_le_maximal_linearIndependent`, `v'` is at least as big, -- and then applying `infinite_basis_le_maximal_linearIndependent` again -- we see they have the same cardinality. have w₁ := infinite_basis_le_maximal_linearIndependent' v _ v'.linearIndependent v'.maximal rcases Cardinal.lift_mk_le'.mp w₁ with ⟨f⟩ haveI : Infinite ι' := Infinite.of_injective f f.2 have w₂ := infinite_basis_le_maximal_linearIndependent' v' _ v.linearIndependent v.maximal exact le_antisymm w₁ w₂ /-- Given two bases indexed by `ι` and `ι'` of an `R`-module, where `R` satisfies the invariant basis number property, an equiv `ι ≃ ι'`. -/ def Module.Basis.indexEquiv (v : Basis ι R M) (v' : Basis ι' R M) : ι ≃ ι' := (Cardinal.lift_mk_eq'.1 <| mk_eq_mk_of_basis v v').some theorem mk_eq_mk_of_basis' {ι' : Type w} (v : Basis ι R M) (v' : Basis ι' R M) : #ι = #ι' := Cardinal.lift_inj.1 <| mk_eq_mk_of_basis v v' end InvariantBasisNumber section RankCondition variable [RankCondition R] /-- An auxiliary lemma for `Basis.le_span`. If `R` satisfies the rank condition, then for any finite basis `b : Basis ι R M`, and any finite spanning set `w : Set M`, the cardinality of `ι` is bounded by the cardinality of `w`. -/ theorem Basis.le_span'' {ι : Type*} [Fintype ι] (b : Basis ι R M) {w : Set M} [Fintype w] (s : span R w = ⊤) : Fintype.card ι ≤ Fintype.card w := by -- We construct a surjective linear map `(w → R) →ₗ[R] (ι → R)`, -- by expressing a linear combination in `w` as a linear combination in `ι`. fapply card_le_of_surjective' R · exact b.repr.toLinearMap.comp (Finsupp.linearCombination R (↑)) · apply Surjective.comp (g := b.repr.toLinearMap) · apply LinearEquiv.surjective rw [← LinearMap.range_eq_top, Finsupp.range_linearCombination] simpa using s /-- Another auxiliary lemma for `Basis.le_span`, which does not require assuming the basis is finite, but still assumes we have a finite spanning set. -/ theorem basis_le_span' {ι : Type*} (b : Basis ι R M) {w : Set M} [Fintype w] (s : span R w = ⊤) : #ι ≤ Fintype.card w := by haveI := nontrivial_of_invariantBasisNumber R haveI := basis_finite_of_finite_spans w.toFinite s b cases nonempty_fintype ι rw [Cardinal.mk_fintype ι] simp only [Nat.cast_le] exact Basis.le_span'' b s -- Note that if `R` satisfies the strong rank condition, -- this also follows from `linearIndependent_le_span` below. /-- If `R` satisfies the rank condition, then the cardinality of any basis is bounded by the cardinality of any spanning set. -/ theorem Module.Basis.le_span {J : Set M} (v : Basis ι R M) (hJ : span R J = ⊤) : #(range v) ≤ #J := by haveI := nontrivial_of_invariantBasisNumber R cases fintypeOrInfinite J · rw [← Cardinal.lift_le, Cardinal.mk_range_eq_of_injective v.injective, Cardinal.mk_fintype J] convert Cardinal.lift_le.{v}.2 (basis_le_span' v hJ) simp · let S : J → Set ι := fun j => ↑(v.repr j).support let S' : J → Set M := fun j => v '' S j have hs : range v ⊆ ⋃ j, S' j := by intro b hb rcases mem_range.1 hb with ⟨i, hi⟩ have : span R J ≤ comap v.repr.toLinearMap (Finsupp.supported R R (⋃ j, S j)) := span_le.2 fun j hj x hx => ⟨_, ⟨⟨j, hj⟩, rfl⟩, hx⟩ rw [hJ] at this replace : v.repr (v i) ∈ Finsupp.supported R R (⋃ j, S j) := this trivial rw [v.repr_self, Finsupp.mem_supported, Finsupp.support_single_ne_zero _ one_ne_zero] at this · subst b rcases mem_iUnion.1 (this (Finset.mem_singleton_self _)) with ⟨j, hj⟩ exact mem_iUnion.2 ⟨j, (mem_image _ _ _).2 ⟨i, hj, rfl⟩⟩ refine le_of_not_gt fun IJ => ?_ suffices #(⋃ j, S' j) < #(range v) by exact not_le_of_gt this ⟨Set.embeddingOfSubset _ _ hs⟩ refine lt_of_le_of_lt (le_trans Cardinal.mk_iUnion_le_sum_mk (Cardinal.sum_le_sum _ (fun _ => ℵ₀) ?_)) ?_ · exact fun j => (Cardinal.lt_aleph0_of_finite _).le · simpa end RankCondition section StrongRankCondition variable [StrongRankCondition R] open Submodule Finsupp -- An auxiliary lemma for `linearIndependent_le_span'`, -- with the additional assumption that the linearly independent family is finite. theorem linearIndependent_le_span_aux' {ι : Type*} [Fintype ι] (v : ι → M) (i : LinearIndependent R v) (w : Set M) [Fintype w] (s : range v ≤ span R w) : Fintype.card ι ≤ Fintype.card w := by -- We construct an injective linear map `(ι → R) →ₗ[R] (w → R)`, -- by thinking of `f : ι → R` as a linear combination of the finite family `v`, -- and expressing that (using the axiom of choice) as a linear combination over `w`. -- We can do this linearly by constructing the map on a basis. fapply card_le_of_injective' R · apply Finsupp.linearCombination exact fun i => Span.repr R w ⟨v i, s (mem_range_self i)⟩ · intro f g h apply_fun linearCombination R ((↑) : w → M) at h simp only [linearCombination_linearCombination, Span.finsupp_linearCombination_repr] at h exact i h /-- If `R` satisfies the strong rank condition, then any linearly independent family `v : ι → M` contained in the span of some finite `w : Set M`, is itself finite. -/ lemma LinearIndependent.finite_of_le_span_finite {ι : Type*} (v : ι → M) (i : LinearIndependent R v) (w : Set M) [Finite w] (s : range v ≤ span R w) : Finite ι := letI := Fintype.ofFinite w Fintype.finite <| fintypeOfFinsetCardLe (Fintype.card w) fun t => by let v' := fun x : (t : Set ι) => v x have i' : LinearIndependent R v' := i.comp _ Subtype.val_injective have s' : range v' ≤ span R w := (range_comp_subset_range _ _).trans s simpa using linearIndependent_le_span_aux' v' i' w s' /-- If `R` satisfies the strong rank condition, then for any linearly independent family `v : ι → M` contained in the span of some finite `w : Set M`, the cardinality of `ι` is bounded by the cardinality of `w`. -/ theorem linearIndependent_le_span' {ι : Type*} (v : ι → M) (i : LinearIndependent R v) (w : Set M) [Fintype w] (s : range v ≤ span R w) : #ι ≤ Fintype.card w := by haveI : Finite ι := i.finite_of_le_span_finite v w s letI := Fintype.ofFinite ι rw [Cardinal.mk_fintype] simp only [Nat.cast_le] exact linearIndependent_le_span_aux' v i w s /-- If `R` satisfies the strong rank condition, then for any linearly independent family `v : ι → M` and any finite spanning set `w : Set M`, the cardinality of `ι` is bounded by the cardinality of `w`. -/ theorem linearIndependent_le_span {ι : Type*} (v : ι → M) (i : LinearIndependent R v) (w : Set M) [Fintype w] (s : span R w = ⊤) : #ι ≤ Fintype.card w := by apply linearIndependent_le_span' v i w rw [s] exact le_top /-- A version of `linearIndependent_le_span` for `Finset`. -/ theorem linearIndependent_le_span_finset {ι : Type*} (v : ι → M) (i : LinearIndependent R v) (w : Finset M) (s : span R (w : Set M) = ⊤) : #ι ≤ w.card := by simpa only [Finset.coe_sort_coe, Fintype.card_coe] using linearIndependent_le_span v i w s /-- An auxiliary lemma for `linearIndependent_le_basis`: we handle the case where the basis `b` is infinite. -/ theorem linearIndependent_le_infinite_basis {ι : Type w} (b : Basis ι R M) [Infinite ι] {κ : Type w} (v : κ → M) (i : LinearIndependent R v) : #κ ≤ #ι := by classical by_contra h rw [not_le, ← Cardinal.mk_finset_of_infinite ι] at h let Φ := fun k : κ => (b.repr (v k)).support obtain ⟨s, w : Infinite ↑(Φ ⁻¹' {s})⟩ := Cardinal.exists_infinite_fiber Φ h (by infer_instance) let v' := fun k : Φ ⁻¹' {s} => v k have i' : LinearIndependent R v' := i.comp _ Subtype.val_injective have w' : Finite (Φ ⁻¹' {s}) := by apply i'.finite_of_le_span_finite v' (s.image b) rintro m ⟨⟨p, ⟨rfl⟩⟩, rfl⟩ simp only [SetLike.mem_coe, Finset.coe_image] apply Basis.mem_span_repr_support exact w.false /-- Over any ring `R` satisfying the strong rank condition, if `b` is a basis for a module `M`, and `s` is a linearly independent set, then the cardinality of `s` is bounded by the cardinality of `b`. -/ theorem linearIndependent_le_basis {ι : Type w} (b : Basis ι R M) {κ : Type w} (v : κ → M) (i : LinearIndependent R v) : #κ ≤ #ι := by classical -- We split into cases depending on whether `ι` is infinite. cases fintypeOrInfinite ι · rw [Cardinal.mk_fintype ι] -- When `ι` is finite, we have `linearIndependent_le_span`, haveI : Nontrivial R := nontrivial_of_invariantBasisNumber R rw [Fintype.card_congr (Equiv.ofInjective b b.injective)] exact linearIndependent_le_span v i (range b) b.span_eq · -- and otherwise we have `linearIndependent_le_infinite_basis`. exact linearIndependent_le_infinite_basis b v i /-- `StrongRankCondition` implies that if there is an injective linear map `(α →₀ R) →ₗ[R] β →₀ R`, then the cardinal of `α` is smaller than or equal to the cardinal of `β`. -/ theorem card_le_of_injective'' {α : Type v} {β : Type v} (f : (α →₀ R) →ₗ[R] β →₀ R) (i : Injective f) : #α ≤ #β := by let b : Basis β R (β →₀ R) := ⟨1⟩ apply linearIndependent_le_basis b (fun (i : α) ↦ f (Finsupp.single i 1)) rw [LinearIndependent] have : (linearCombination R fun i ↦ f (Finsupp.single i 1)) = f := by ext a b; simp exact this.symm ▸ i /-- If `R` satisfies the strong rank condition, then for any linearly independent family `v : ι → M` and spanning set `w : Set M`, the cardinality of `ι` is bounded by the cardinality of `w`. -/ theorem linearIndependent_le_span'' {ι : Type v} {v : ι → M} (i : LinearIndependent R v) (w : Set M) (s : span R w = ⊤) : #ι ≤ #w := by fapply card_le_of_injective'' (R := R) · apply Finsupp.linearCombination exact fun i ↦ Span.repr R w ⟨v i, s ▸ trivial⟩ · intro f g h apply_fun linearCombination R ((↑) : w → M) at h simp only [linearCombination_linearCombination, Span.finsupp_linearCombination_repr] at h exact i h /-- Let `R` satisfy the strong rank condition. If `m` elements of a free rank `n` `R`-module are linearly independent, then `m ≤ n`. -/ theorem Basis.card_le_card_of_linearIndependent_aux {R : Type*} [Semiring R] [StrongRankCondition R] (n : ℕ) {m : ℕ} (v : Fin m → Fin n → R) : LinearIndependent R v → m ≤ n := fun h => by simpa using linearIndependent_le_basis (Pi.basisFun R (Fin n)) v h -- When the basis is not infinite this need not be true! /-- Over any ring `R` satisfying the strong rank condition, if `b` is an infinite basis for a module `M`, then every maximal linearly independent set has the same cardinality as `b`. This proof (along with some of the lemmas above) comes from [Les familles libres maximales d'un module ont-elles le meme cardinal?][lazarus1973] -/ theorem maximal_linearIndependent_eq_infinite_basis {ι : Type w} (b : Basis ι R M) [Infinite ι] {κ : Type w} (v : κ → M) (i : LinearIndependent R v) (m : i.Maximal) : #κ = #ι := by apply le_antisymm · exact linearIndependent_le_basis b v i · haveI : Nontrivial R := nontrivial_of_invariantBasisNumber R exact infinite_basis_le_maximal_linearIndependent b v i m theorem Module.Basis.mk_eq_rank'' {ι : Type v} (v : Basis ι R M) : #ι = Module.rank R M := by haveI := nontrivial_of_invariantBasisNumber R rw [Module.rank_def] apply le_antisymm · trans swap · apply le_ciSup (Cardinal.bddAbove_range _) exact ⟨Set.range v, by rw [LinearIndepOn] convert v.reindexRange.linearIndependent simp⟩ · exact (Cardinal.mk_range_eq v v.injective).ge · apply ciSup_le' rintro ⟨s, li⟩ apply linearIndependent_le_basis v _ li theorem Module.Basis.mk_range_eq_rank (v : Basis ι R M) : #(range v) = Module.rank R M := v.reindexRange.mk_eq_rank'' /-- If a vector space has a finite basis, then its dimension (seen as a cardinal) is equal to the cardinality of the basis. -/ theorem rank_eq_card_basis {ι : Type w} [Fintype ι] (h : Basis ι R M) : Module.rank R M = Fintype.card ι := by classical haveI := nontrivial_of_invariantBasisNumber R rw [← h.mk_range_eq_rank, Cardinal.mk_fintype, Set.card_range_of_injective h.injective] namespace Module.Basis theorem card_le_card_of_linearIndependent {ι : Type*} [Fintype ι] (b : Basis ι R M) {ι' : Type*} [Fintype ι'] {v : ι' → M} (hv : LinearIndependent R v) : Fintype.card ι' ≤ Fintype.card ι := by letI := nontrivial_of_invariantBasisNumber R simpa [rank_eq_card_basis b, Cardinal.mk_fintype] using hv.cardinal_lift_le_rank theorem card_le_card_of_submodule (N : Submodule R M) [Fintype ι] (b : Basis ι R M) [Fintype ι'] (b' : Basis ι' R N) : Fintype.card ι' ≤ Fintype.card ι := b.card_le_card_of_linearIndependent (b'.linearIndependent.map_injOn N.subtype N.injective_subtype.injOn) theorem card_le_card_of_le {N O : Submodule R M} (hNO : N ≤ O) [Fintype ι] (b : Basis ι R O) [Fintype ι'] (b' : Basis ι' R N) : Fintype.card ι' ≤ Fintype.card ι := b.card_le_card_of_linearIndependent (b'.linearIndependent.map_injOn (inclusion hNO) (N.inclusion_injective _).injOn) theorem mk_eq_rank (v : Basis ι R M) : Cardinal.lift.{v} #ι = Cardinal.lift.{w} (Module.rank R M) := by haveI := nontrivial_of_invariantBasisNumber R rw [← v.mk_range_eq_rank, Cardinal.mk_range_eq_of_injective v.injective] theorem mk_eq_rank'.{m} (v : Basis ι R M) : Cardinal.lift.{max v m} #ι = Cardinal.lift.{max w m} (Module.rank R M) := Cardinal.lift_umax_eq.{w, v, m}.mpr v.mk_eq_rank end Module.Basis theorem rank_span {v : ι → M} (hv : LinearIndependent R v) : Module.rank R ↑(span R (range v)) = #(range v) := by haveI := nontrivial_of_invariantBasisNumber R rw [← Cardinal.lift_inj, ← (Basis.span hv).mk_eq_rank, Cardinal.mk_range_eq_of_injective (@LinearIndependent.injective ι R M v _ _ _ _ hv)] theorem rank_span_set {s : Set M} (hs : LinearIndepOn R id s) : Module.rank R ↑(span R s) = #s := by rw [← @setOf_mem_eq _ s, ← Subtype.range_coe_subtype] exact rank_span hs theorem toENat_rank_span_set {v : ι → M} {s : Set ι} (hs : LinearIndepOn R v s) : (Module.rank R <| span R <| v '' s).toENat = s.encard := by rw [image_eq_range, ← hs.injOn.encard_image, ← toENat_cardinalMk, image_eq_range, ← rank_span hs.linearIndependent] /-- An induction (and recursion) principle for proving results about all submodules of a fixed finite free module `M`. A property is true for all submodules of `M` if it satisfies the following "inductive step": the property is true for a submodule `N` if it's true for all submodules `N'` of `N` with the property that there exists `0 ≠ x ∈ N` such that the sum `N' + Rx` is direct. -/ def Submodule.inductionOnRank {R M} [Ring R] [StrongRankCondition R] [AddCommGroup M] [Module R M] [IsDomain R] [Finite ι] (b : Basis ι R M) (P : Submodule R M → Sort*) (ih : ∀ N : Submodule R M, (∀ N' ≤ N, ∀ x ∈ N, (∀ (c : R), ∀ y ∈ N', c • x + y = (0 : M) → c = 0) → P N') → P N) (N : Submodule R M) : P N := letI := Fintype.ofFinite ι Submodule.inductionOnRankAux b P ih (Fintype.card ι) N fun hs hli => by simpa using b.card_le_card_of_linearIndependent hli /-- If `S` a module-finite free `R`-algebra, then the `R`-rank of a nonzero `R`-free ideal `I` of `S` is the same as the rank of `S`. -/ theorem Ideal.rank_eq {R S : Type*} [CommRing R] [StrongRankCondition R] [Ring S] [IsDomain S] [Algebra R S] {n m : Type*} [Fintype n] [Fintype m] (b : Basis n R S) {I : Ideal S} (hI : I ≠ ⊥) (c : Basis m R I) : Fintype.card m = Fintype.card n := by obtain ⟨a, ha⟩ := Submodule.nonzero_mem_of_bot_lt (bot_lt_iff_ne_bot.mpr hI) have : LinearIndependent R fun i => b i • a := by have hb := b.linearIndependent rw [Fintype.linearIndependent_iff] at hb ⊢ intro g hg apply hb g simp only [← smul_assoc, ← Finset.sum_smul, smul_eq_zero] at hg exact hg.resolve_right ha exact le_antisymm (b.card_le_card_of_linearIndependent (c.linearIndependent.map' (Submodule.subtype I) ((LinearMap.ker_eq_bot (f := (Submodule.subtype I : I →ₗ[R] S))).mpr Subtype.coe_injective))) (c.card_le_card_of_linearIndependent this) namespace Module theorem finrank_eq_nat_card_basis (h : Basis ι R M) : finrank R M = Nat.card ι := by rw [Nat.card, ← toNat_lift.{v}, h.mk_eq_rank, toNat_lift, finrank] /-- If a vector space (or module) has a finite basis, then its dimension (or rank) is equal to the cardinality of the basis. -/ theorem finrank_eq_card_basis {ι : Type w} [Fintype ι] (h : Basis ι R M) : finrank R M = Fintype.card ι := finrank_eq_of_rank_eq (rank_eq_card_basis h) /-- If a free module is of finite rank, then the cardinality of any basis is equal to its `finrank`. -/ theorem mk_finrank_eq_card_basis [Module.Finite R M] {ι : Type w} (h : Basis ι R M) : (finrank R M : Cardinal.{w}) = #ι := by cases @nonempty_fintype _ (Module.Finite.finite_basis h) rw [Cardinal.mk_fintype, finrank_eq_card_basis h] /-- If a vector space (or module) has a finite basis, then its dimension (or rank) is equal to the cardinality of the basis. This lemma uses a `Finset` instead of indexed types. -/ theorem finrank_eq_card_finset_basis {ι : Type w} {b : Finset ι} (h : Basis b R M) : finrank R M = Finset.card b := by rw [finrank_eq_card_basis h, Fintype.card_coe] variable (R) @[simp] theorem rank_self : Module.rank R R = 1 := by rw [← Cardinal.lift_inj, ← (Basis.singleton PUnit R).mk_eq_rank, Cardinal.mk_punit] /-- A ring satisfying `StrongRankCondition` (such as a `DivisionRing`) is one-dimensional as a module over itself. -/ @[simp] theorem finrank_self : finrank R R = 1 := finrank_eq_of_rank_eq (by simp) /-- Given a basis of a ring over itself indexed by a type `ι`, then `ι` is `Unique`. -/ noncomputable def _root_.Module.Basis.unique {ι : Type*} (b : Basis ι R R) : Unique ι := by have : Cardinal.mk ι = ↑(Module.finrank R R) := (Module.mk_finrank_eq_card_basis b).symm have : Subsingleton ι ∧ Nonempty ι := by simpa [Cardinal.eq_one_iff_unique] exact Nonempty.some ((unique_iff_subsingleton_and_nonempty _).2 this) variable (M) /-- The rank of a finite module is finite. -/ theorem rank_lt_aleph0 [Module.Finite R M] : Module.rank R M < ℵ₀ := by simp only [Module.rank_def] obtain ⟨S, hS⟩ := Module.finite_def.mp ‹_› refine (ciSup_le' fun i => ?_).trans_lt (nat_lt_aleph0 S.card) exact linearIndependent_le_span_finset _ i.prop S hS noncomputable instance {R M : Type*} [DivisionRing R] [AddCommGroup M] [Module R M] {s t : Set M} [Module.Finite R (span R t)] (hs : LinearIndepOn R id s) (hst : s ⊆ t) : Fintype (hs.extend hst) := by refine Classical.choice (Cardinal.lt_aleph0_iff_fintype.1 ?_) rw [← rank_span_set (hs.linearIndepOn_extend hst), hs.span_extend_eq_span] exact Module.rank_lt_aleph0 .. /-- If `M` is finite, `finrank M = rank M`. -/ @[simp] theorem finrank_eq_rank [Module.Finite R M] : ↑(finrank R M) = Module.rank R M := by rw [Module.finrank, cast_toNat_of_lt_aleph0 (rank_lt_aleph0 R M)] /-- If `M` is finite, then `finrank N = rank N` for all `N : Submodule M`. Note that such an `N` need not be finitely generated. -/ protected theorem _root_.Submodule.finrank_eq_rank [Module.Finite R M] (N : Submodule R M) : finrank R N = Module.rank R N := by rw [finrank, Cardinal.cast_toNat_of_lt_aleph0] exact lt_of_le_of_lt (Submodule.rank_le N) (rank_lt_aleph0 R M) end Module variable {M'} [AddCommMonoid M'] [Module R M'] theorem LinearMap.finrank_le_finrank_of_injective [Module.Finite R M'] {f : M →ₗ[R] M'} (hf : Function.Injective f) : finrank R M ≤ finrank R M' := finrank_le_finrank_of_rank_le_rank (LinearMap.lift_rank_le_of_injective _ hf) (rank_lt_aleph0 _ _) theorem LinearMap.finrank_range_le [Module.Finite R M] (f : M →ₗ[R] M') : finrank R (LinearMap.range f) ≤ finrank R M := finrank_le_finrank_of_rank_le_rank (lift_rank_range_le f) (rank_lt_aleph0 _ _) theorem LinearMap.finrank_le_of_isSMulRegular {S : Type*} [CommSemiring S] [Algebra S R] [Module S M] [IsScalarTower S R M] (L L' : Submodule R M) [Module.Finite R L'] {s : S} (hr : IsSMulRegular M s) (h : ∀ x ∈ L, s • x ∈ L') : Module.finrank R L ≤ Module.finrank R L' := by refine finrank_le_finrank_of_rank_le_rank (lift_le.mpr <| rank_le_of_isSMulRegular L L' hr h) ?_ rw [← Module.finrank_eq_rank R L'] exact nat_lt_aleph0 (finrank R ↥L') variable (R S M) in /-- Also see `Module.finrank_top_le_finrank_of_isScalarTower_of_free` for a version with different typeclass constraints. -/ lemma Module.finrank_top_le_finrank_of_isScalarTower [Module.Finite R M] [Semiring S] [Module S M] [Module R S] [IsScalarTower R S S] [FaithfulSMul R S] [IsScalarTower R S M] : finrank S M ≤ finrank R M := by rw [finrank, finrank, Cardinal.toNat_le_iff_le_of_lt_aleph0] · exact rank_top_le_rank_of_isScalarTower R S M · exact lt_of_le_of_lt (rank_top_le_rank_of_isScalarTower R S M) (Module.rank_lt_aleph0 R M) · exact Module.rank_lt_aleph0 _ _ variable (R) in /-- Also see `Module.finrank_bot_le_finrank_of_isScalarTower_of_free` for a version with different typeclass constraints. -/ lemma Module.finrank_bot_le_finrank_of_isScalarTower (S T : Type*) [Semiring S] [Semiring T] [Module R T] [Module S T] [Module R S] [IsScalarTower R S T] [IsScalarTower S T T] [FaithfulSMul S T] [Module.Finite R T] : finrank R S ≤ finrank R T := finrank_le_finrank_of_rank_le_rank (lift_rank_bot_le_lift_rank_of_isScalarTower R S T) (Module.rank_lt_aleph0 _ _) end StrongRankCondition namespace Submodule variable {K M : Type*} [DivisionRing K] [AddCommGroup M] [Module K M] {s : Set M} {x : M} [Module.Finite K (span K s)] variable (K s) in /-- This is a version of `exists_linearIndependent` with an upper estimate on the size of the finite set we choose. -/ theorem exists_finset_span_eq_linearIndepOn : ∃ t : Finset M, ↑t ⊆ s ∧ t.card = finrank K (span K s) ∧ span K t = span K s ∧ LinearIndepOn K id (t : Set M) := by rcases exists_linearIndependent K s with ⟨t, ht_sub, ht_span, ht_indep⟩ obtain ⟨t, rfl, ht_card⟩ : ∃ u : Finset M, ↑u = t ∧ u.card = finrank K (span K s) := by rw [← Cardinal.mk_set_eq_nat_iff_finset, finrank_eq_rank, ← ht_span, rank_span_set ht_indep] exact ⟨t, ht_sub, ht_card, ht_span, ht_indep⟩ variable (K s) in theorem exists_fun_fin_finrank_span_eq : ∃ f : Fin (finrank K (span K s)) → M, (∀ i, f i ∈ s) ∧ span K (range f) = span K s ∧ LinearIndependent K f := by rcases exists_finset_span_eq_linearIndepOn K s with ⟨t, hts, ht_card, ht_span, ht_indep⟩ set e := (Finset.equivFinOfCardEq ht_card).symm exact ⟨(↑) ∘ e, fun i ↦ hts (e i).2, by simpa, ht_indep.comp _ e.injective⟩ /-- This is a version of `mem_span_set` with an estimate on the number of terms in the sum. -/ theorem mem_span_set_iff_exists_finsupp_le_finrank : x ∈ span K s ↔ ∃ c : M →₀ K, c.support.card ≤ finrank K (span K s) ∧ ↑c.support ⊆ s ∧ c.sum (fun mi r ↦ r • mi) = x := by constructor · intro h rcases exists_finset_span_eq_linearIndepOn K s with ⟨t, ht_sub, ht_card, ht_span, ht_indep⟩ rcases mem_span_set.mp (ht_span ▸ h) with ⟨c, hct, hx⟩ refine ⟨c, ?_, hct.trans ht_sub, hx⟩ exact ht_card ▸ Finset.card_mono hct · rintro ⟨c, -, hcs, hx⟩ exact mem_span_set.mpr ⟨c, hcs, hx⟩ end Submodule namespace Algebra /-- An extension of rings `R ⊆ S` is quadratic if `S` is a free `R`-algebra of rank `2`. -/ -- TODO. use this in connection with `NumberTheory.Zsqrtd` class IsQuadraticExtension (R S : Type*) [CommSemiring R] [StrongRankCondition R] [Semiring S] [Algebra R S] extends Module.Free R S where finrank_eq_two' : Module.finrank R S = 2 theorem IsQuadraticExtension.finrank_eq_two (R S : Type*) [CommSemiring R] [StrongRankCondition R] [Semiring S] [Algebra R S] [IsQuadraticExtension R S] : Module.finrank R S = 2 := finrank_eq_two' end Algebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Basic.lean
import Mathlib.Algebra.Algebra.Tower import Mathlib.LinearAlgebra.LinearIndependent.Basic import Mathlib.Data.Set.Card /-! # Dimension of modules and vector spaces ## Main definitions * The rank of a module is defined as `Module.rank : Cardinal`. This is defined as the supremum of the cardinalities of linearly independent subsets. ## Main statements * `LinearMap.rank_le_of_injective`: the source of an injective linear map has dimension at most that of the target. * `LinearMap.rank_le_of_surjective`: the target of a surjective linear map has dimension at most that of that source. ## Implementation notes Many theorems in this file are not universe-generic when they relate dimensions in different universes. They should be as general as they can be without inserting `lift`s. The types `M`, `M'`, ... all live in different universes, and `M₁`, `M₂`, ... all live in the same universe. -/ noncomputable section universe w w' u u' v v' variable {R : Type u} {R' : Type u'} {M M₁ : Type v} {M' : Type v'} open Cardinal Submodule Function Set section Module section variable [Semiring R] [AddCommMonoid M] [Module R M] variable (R M) /-- The rank of a module, defined as a term of type `Cardinal`. We define this as the supremum of the cardinalities of linearly independent subsets. The supremum may not be attained, see https://mathoverflow.net/a/263053. For a free module over any ring satisfying the strong rank condition (e.g. left-Noetherian rings, commutative rings, and in particular division rings and fields), this is the same as the dimension of the space (i.e. the cardinality of any basis). In particular this agrees with the usual notion of the dimension of a vector space. See also `Module.finrank` for a `ℕ`-valued function which returns the correct value for a finite-dimensional vector space (but 0 for an infinite-dimensional vector space). -/ @[stacks 09G3 "first part"] protected irreducible_def Module.rank : Cardinal := ⨆ ι : { s : Set M // LinearIndepOn R id s }, (#ι.1) theorem rank_le_card : Module.rank R M ≤ #M := (Module.rank_def _ _).trans_le (ciSup_le' fun _ ↦ mk_set_le _) instance nonempty_linearIndependent_set : Nonempty {s : Set M // LinearIndepOn R id s} := ⟨⟨∅, linearIndepOn_empty _ _⟩⟩ end namespace LinearIndependent variable [Semiring R] [AddCommMonoid M] [Module R M] variable [Nontrivial R] theorem cardinal_lift_le_rank {ι : Type w} {v : ι → M} (hv : LinearIndependent R v) : Cardinal.lift.{v} #ι ≤ Cardinal.lift.{w} (Module.rank R M) := by rw [Module.rank] refine le_trans ?_ (lift_le.mpr <| le_ciSup (bddAbove_range _) ⟨_, hv.linearIndepOn_id⟩) exact lift_mk_le'.mpr ⟨(Equiv.ofInjective _ hv.injective).toEmbedding⟩ lemma aleph0_le_rank {ι : Type w} [Infinite ι] {v : ι → M} (hv : LinearIndependent R v) : ℵ₀ ≤ Module.rank R M := aleph0_le_lift.mp <| (aleph0_le_lift.mpr <| aleph0_le_mk ι).trans hv.cardinal_lift_le_rank theorem cardinal_le_rank {ι : Type v} {v : ι → M} (hv : LinearIndependent R v) : #ι ≤ Module.rank R M := by simpa using hv.cardinal_lift_le_rank theorem cardinal_le_rank' {s : Set M} (hs : LinearIndependent R (fun x => x : s → M)) : #s ≤ Module.rank R M := hs.cardinal_le_rank theorem _root_.LinearIndepOn.encard_le_toENat_rank {ι : Type*} {v : ι → M} {s : Set ι} (hs : LinearIndepOn R v s) : s.encard ≤ (Module.rank R M).toENat := by simpa using OrderHom.mono (β := ℕ∞) Cardinal.toENat hs.linearIndependent.cardinal_lift_le_rank end LinearIndependent section SurjectiveInjective section Semiring variable [Semiring R] [AddCommMonoid M] [Module R M] [Semiring R'] section variable [AddCommMonoid M'] [Module R' M'] /-- If `M / R` and `M' / R'` are modules, `i : R' → R` is an injective map non-zero elements, `j : M →+ M'` is an injective monoid homomorphism, such that the scalar multiplications on `M` and `M'` are compatible, then the rank of `M / R` is smaller than or equal to the rank of `M' / R'`. As a special case, taking `R = R'` it is `LinearMap.lift_rank_le_of_injective`. -/ theorem lift_rank_le_of_injective_injectiveₛ (i : R' → R) (j : M →+ M') (hi : Injective i) (hj : Injective j) (hc : ∀ (r : R') (m : M), j (i r • m) = r • j m) : lift.{v'} (Module.rank R M) ≤ lift.{v} (Module.rank R' M') := by simp_rw [Module.rank, lift_iSup (bddAbove_range _)] exact ciSup_mono' (bddAbove_range _) fun ⟨s, h⟩ ↦ ⟨⟨j '' s, LinearIndepOn.id_image (h.linearIndependent.map_of_injective_injectiveₛ i j hi hj hc)⟩, lift_mk_le'.mpr ⟨(Equiv.Set.image j s hj).toEmbedding⟩⟩ /-- If `M / R` and `M' / R'` are modules, `i : R → R'` is a surjective map, and `j : M →+ M'` is an injective monoid homomorphism, such that the scalar multiplications on `M` and `M'` are compatible, then the rank of `M / R` is smaller than or equal to the rank of `M' / R'`. As a special case, taking `R = R'` it is `LinearMap.lift_rank_le_of_injective`. -/ theorem lift_rank_le_of_surjective_injective (i : R → R') (j : M →+ M') (hi : Surjective i) (hj : Injective j) (hc : ∀ (r : R) (m : M), j (r • m) = i r • j m) : lift.{v'} (Module.rank R M) ≤ lift.{v} (Module.rank R' M') := by obtain ⟨i', hi'⟩ := hi.hasRightInverse refine lift_rank_le_of_injective_injectiveₛ i' j (fun _ _ h ↦ ?_) hj fun r m ↦ ?_ · apply_fun i at h rwa [hi', hi'] at h rw [hc (i' r) m, hi'] /-- If `M / R` and `M' / R'` are modules, `i : R → R'` is a bijective map which maps zero to zero, `j : M ≃+ M'` is a group isomorphism, such that the scalar multiplications on `M` and `M'` are compatible, then the rank of `M / R` is equal to the rank of `M' / R'`. As a special case, taking `R = R'` it is `LinearEquiv.lift_rank_eq`. -/ theorem lift_rank_eq_of_equiv_equiv (i : R → R') (j : M ≃+ M') (hi : Bijective i) (hc : ∀ (r : R) (m : M), j (r • m) = i r • j m) : lift.{v'} (Module.rank R M) = lift.{v} (Module.rank R' M') := (lift_rank_le_of_surjective_injective i j hi.2 j.injective hc).antisymm <| lift_rank_le_of_injective_injectiveₛ i j.symm hi.1 j.symm.injective fun _ _ ↦ j.symm_apply_eq.2 <| by simp_all end section variable [AddCommMonoid M₁] [Module R' M₁] /-- The same-universe version of `lift_rank_le_of_injective_injective`. -/ theorem rank_le_of_injective_injectiveₛ (i : R' → R) (j : M →+ M₁) (hi : Injective i) (hj : Injective j) (hc : ∀ (r : R') (m : M), j (i r • m) = r • j m) : Module.rank R M ≤ Module.rank R' M₁ := by simpa only [lift_id] using lift_rank_le_of_injective_injectiveₛ i j hi hj hc /-- The same-universe version of `lift_rank_le_of_surjective_injective`. -/ theorem rank_le_of_surjective_injective (i : R → R') (j : M →+ M₁) (hi : Surjective i) (hj : Injective j) (hc : ∀ (r : R) (m : M), j (r • m) = i r • j m) : Module.rank R M ≤ Module.rank R' M₁ := by simpa only [lift_id] using lift_rank_le_of_surjective_injective i j hi hj hc /-- The same-universe version of `lift_rank_eq_of_equiv_equiv`. -/ theorem rank_eq_of_equiv_equiv (i : R → R') (j : M ≃+ M₁) (hi : Bijective i) (hc : ∀ (r : R) (m : M), j (r • m) = i r • j m) : Module.rank R M = Module.rank R' M₁ := by simpa only [lift_id] using lift_rank_eq_of_equiv_equiv i j hi hc end end Semiring section Ring variable [Ring R] [AddCommGroup M] [Module R M] [Ring R'] /-- If `M / R` and `M' / R'` are modules, `i : R' → R` is a map which sends non-zero elements to non-zero elements, `j : M →+ M'` is an injective group homomorphism, such that the scalar multiplications on `M` and `M'` are compatible, then the rank of `M / R` is smaller than or equal to the rank of `M' / R'`. As a special case, taking `R = R'` it is `LinearMap.lift_rank_le_of_injective`. -/ theorem lift_rank_le_of_injective_injective [AddCommGroup M'] [Module R' M'] (i : R' → R) (j : M →+ M') (hi : ∀ r, i r = 0 → r = 0) (hj : Injective j) (hc : ∀ (r : R') (m : M), j (i r • m) = r • j m) : lift.{v'} (Module.rank R M) ≤ lift.{v} (Module.rank R' M') := by simp_rw [Module.rank, lift_iSup (bddAbove_range _)] exact ciSup_mono' (bddAbove_range _) fun ⟨s, h⟩ ↦ ⟨⟨j '' s, LinearIndepOn.id_image <| h.linearIndependent.map_of_injective_injective i j hi (fun _ _ ↦ hj <| by rwa [j.map_zero]) hc⟩, lift_mk_le'.mpr ⟨(Equiv.Set.image j s hj).toEmbedding⟩⟩ /-- The same-universe version of `lift_rank_le_of_injective_injective`. -/ theorem rank_le_of_injective_injective [AddCommGroup M₁] [Module R' M₁] (i : R' → R) (j : M →+ M₁) (hi : ∀ r, i r = 0 → r = 0) (hj : Injective j) (hc : ∀ (r : R') (m : M), j (i r • m) = r • j m) : Module.rank R M ≤ Module.rank R' M₁ := by simpa only [lift_id] using lift_rank_le_of_injective_injective i j hi hj hc end Ring namespace Algebra variable {R : Type w} {S : Type v} [CommSemiring R] [Semiring S] [Algebra R S] {R' : Type w'} {S' : Type v'} [CommSemiring R'] [Semiring S'] [Algebra R' S'] /-- If `S / R` and `S' / R'` are algebras, `i : R' →+* R` and `j : S →+* S'` are injective ring homomorphisms, such that `R' → R → S → S'` and `R' → S'` commute, then the rank of `S / R` is smaller than or equal to the rank of `S' / R'`. -/ theorem lift_rank_le_of_injective_injective (i : R' →+* R) (j : S →+* S') (hi : Injective i) (hj : Injective j) (hc : (j.comp (algebraMap R S)).comp i = algebraMap R' S') : lift.{v'} (Module.rank R S) ≤ lift.{v} (Module.rank R' S') := by refine _root_.lift_rank_le_of_injective_injectiveₛ i j hi hj fun r _ ↦ ?_ have := congr($hc r) simp only [RingHom.coe_comp, comp_apply] at this simp_rw [smul_def, AddMonoidHom.coe_coe, map_mul, this] /-- If `S / R` and `S' / R'` are algebras, `i : R →+* R'` is a surjective ring homomorphism, `j : S →+* S'` is an injective ring homomorphism, such that `R → R' → S'` and `R → S → S'` commute, then the rank of `S / R` is smaller than or equal to the rank of `S' / R'`. -/ theorem lift_rank_le_of_surjective_injective (i : R →+* R') (j : S →+* S') (hi : Surjective i) (hj : Injective j) (hc : (algebraMap R' S').comp i = j.comp (algebraMap R S)) : lift.{v'} (Module.rank R S) ≤ lift.{v} (Module.rank R' S') := by refine _root_.lift_rank_le_of_surjective_injective i j hi hj fun r _ ↦ ?_ have := congr($hc r) simp only [RingHom.coe_comp, comp_apply] at this simp only [smul_def, AddMonoidHom.coe_coe, map_mul, this] /-- If `S / R` and `S' / R'` are algebras, `i : R ≃+* R'` and `j : S ≃+* S'` are ring isomorphisms, such that `R → R' → S'` and `R → S → S'` commute, then the rank of `S / R` is equal to the rank of `S' / R'`. -/ theorem lift_rank_eq_of_equiv_equiv (i : R ≃+* R') (j : S ≃+* S') (hc : (algebraMap R' S').comp i.toRingHom = j.toRingHom.comp (algebraMap R S)) : lift.{v'} (Module.rank R S) = lift.{v} (Module.rank R' S') := by refine _root_.lift_rank_eq_of_equiv_equiv i j i.bijective fun r _ ↦ ?_ have := congr($hc r) simp only [RingEquiv.toRingHom_eq_coe, RingHom.coe_comp, RingHom.coe_coe, comp_apply] at this simp only [smul_def, RingEquiv.coe_toAddEquiv, map_mul, this] variable {S' : Type v} [Semiring S'] [Algebra R' S'] /-- The same-universe version of `Algebra.lift_rank_le_of_injective_injective`. -/ theorem rank_le_of_injective_injective (i : R' →+* R) (j : S →+* S') (hi : Injective i) (hj : Injective j) (hc : (j.comp (algebraMap R S)).comp i = algebraMap R' S') : Module.rank R S ≤ Module.rank R' S' := by simpa only [lift_id] using lift_rank_le_of_injective_injective i j hi hj hc /-- The same-universe version of `Algebra.lift_rank_le_of_surjective_injective`. -/ theorem rank_le_of_surjective_injective (i : R →+* R') (j : S →+* S') (hi : Surjective i) (hj : Injective j) (hc : (algebraMap R' S').comp i = j.comp (algebraMap R S)) : Module.rank R S ≤ Module.rank R' S' := by simpa only [lift_id] using lift_rank_le_of_surjective_injective i j hi hj hc /-- The same-universe version of `Algebra.lift_rank_eq_of_equiv_equiv`. -/ theorem rank_eq_of_equiv_equiv (i : R ≃+* R') (j : S ≃+* S') (hc : (algebraMap R' S').comp i.toRingHom = j.toRingHom.comp (algebraMap R S)) : Module.rank R S = Module.rank R' S' := by simpa only [lift_id] using lift_rank_eq_of_equiv_equiv i j hc end Algebra end SurjectiveInjective variable [Semiring R] [AddCommMonoid M] [Module R M] [Semiring R'] [AddCommMonoid M'] [AddCommMonoid M₁] [Module R M'] [Module R M₁] [Module R' M'] [Module R' M₁] section theorem LinearMap.lift_rank_le_of_injective (f : M →ₗ[R] M') (i : Injective f) : Cardinal.lift.{v'} (Module.rank R M) ≤ Cardinal.lift.{v} (Module.rank R M') := lift_rank_le_of_injective_injectiveₛ (RingHom.id R) f (fun _ _ h ↦ h) i f.map_smul theorem LinearMap.rank_le_of_injective (f : M →ₗ[R] M₁) (i : Injective f) : Module.rank R M ≤ Module.rank R M₁ := Cardinal.lift_le.1 (f.lift_rank_le_of_injective i) /-- The rank of the range of a linear map is at most the rank of the source. -/ -- The proof is: a free submodule of the range lifts to a free submodule of the -- source, by arbitrarily lifting a basis. theorem lift_rank_range_le (f : M →ₗ[R] M') : Cardinal.lift.{v} (Module.rank R (LinearMap.range f)) ≤ Cardinal.lift.{v'} (Module.rank R M) := by simp only [Module.rank_def] rw [Cardinal.lift_iSup (Cardinal.bddAbove_range _)] apply ciSup_le' rintro ⟨s, li⟩ apply le_trans swap · apply Cardinal.lift_le.mpr refine le_ciSup (Cardinal.bddAbove_range _) ⟨rangeSplitting f '' s, ?_⟩ apply LinearIndependent.of_comp f.rangeRestrict convert li.comp (Equiv.Set.rangeSplittingImageEquiv f s) (Equiv.injective _) using 1 · exact (Cardinal.lift_mk_eq'.mpr ⟨Equiv.Set.rangeSplittingImageEquiv f s⟩).ge theorem rank_range_le (f : M →ₗ[R] M₁) : Module.rank R (LinearMap.range f) ≤ Module.rank R M := by simpa using lift_rank_range_le f theorem lift_rank_map_le (f : M →ₗ[R] M') (p : Submodule R M) : Cardinal.lift.{v} (Module.rank R (p.map f)) ≤ Cardinal.lift.{v'} (Module.rank R p) := by have h := lift_rank_range_le (f.comp (Submodule.subtype p)) rwa [LinearMap.range_comp, range_subtype] at h theorem rank_map_le (f : M →ₗ[R] M₁) (p : Submodule R M) : Module.rank R (p.map f) ≤ Module.rank R p := by simpa using lift_rank_map_le f p lemma Submodule.rank_mono {s t : Submodule R M} (h : s ≤ t) : Module.rank R s ≤ Module.rank R t := (Submodule.inclusion h).rank_le_of_injective fun ⟨x, _⟩ ⟨y, _⟩ eq => Subtype.eq <| show x = y from Subtype.ext_iff.1 eq /-- Two linearly equivalent vector spaces have the same dimension, a version with different universes. -/ theorem LinearEquiv.lift_rank_eq (f : M ≃ₗ[R] M') : Cardinal.lift.{v'} (Module.rank R M) = Cardinal.lift.{v} (Module.rank R M') := by apply le_antisymm · exact f.toLinearMap.lift_rank_le_of_injective f.injective · exact f.symm.toLinearMap.lift_rank_le_of_injective f.symm.injective /-- Two linearly equivalent vector spaces have the same dimension. -/ theorem LinearEquiv.rank_eq (f : M ≃ₗ[R] M₁) : Module.rank R M = Module.rank R M₁ := Cardinal.lift_inj.1 f.lift_rank_eq theorem lift_rank_range_of_injective (f : M →ₗ[R] M') (h : Injective f) : lift.{v} (Module.rank R (LinearMap.range f)) = lift.{v'} (Module.rank R M) := (LinearEquiv.ofInjective f h).lift_rank_eq.symm theorem rank_range_of_injective (f : M →ₗ[R] M₁) (h : Injective f) : Module.rank R (LinearMap.range f) = Module.rank R M := (LinearEquiv.ofInjective f h).rank_eq.symm theorem LinearEquiv.lift_rank_map_eq (f : M ≃ₗ[R] M') (p : Submodule R M) : lift.{v} (Module.rank R (p.map (f : M →ₗ[R] M'))) = lift.{v'} (Module.rank R p) := (f.submoduleMap p).lift_rank_eq.symm /-- Pushforwards of submodules along a `LinearEquiv` have the same dimension. -/ theorem LinearEquiv.rank_map_eq (f : M ≃ₗ[R] M₁) (p : Submodule R M) : Module.rank R (p.map (f : M →ₗ[R] M₁)) = Module.rank R p := (f.submoduleMap p).rank_eq.symm variable (R M) @[simp] theorem rank_top : Module.rank R (⊤ : Submodule R M) = Module.rank R M := (LinearEquiv.ofTop ⊤ rfl).rank_eq variable {R M} theorem rank_range_of_surjective (f : M →ₗ[R] M') (h : Surjective f) : Module.rank R (LinearMap.range f) = Module.rank R M' := by rw [LinearMap.range_eq_top.2 h, rank_top] theorem Submodule.rank_le (s : Submodule R M) : Module.rank R s ≤ Module.rank R M := by rw [← rank_top R M] exact rank_mono le_top theorem LinearMap.lift_rank_le_of_surjective (f : M →ₗ[R] M') (h : Surjective f) : lift.{v} (Module.rank R M') ≤ lift.{v'} (Module.rank R M) := by rw [← rank_range_of_surjective f h] apply lift_rank_range_le theorem LinearMap.rank_le_of_surjective (f : M →ₗ[R] M₁) (h : Surjective f) : Module.rank R M₁ ≤ Module.rank R M := by rw [← rank_range_of_surjective f h] apply rank_range_le lemma rank_le_of_isSMulRegular {S : Type*} [CommSemiring S] [Algebra S R] [Module S M] [IsScalarTower S R M] (L L' : Submodule R M) {s : S} (hr : IsSMulRegular M s) (h : ∀ x ∈ L, s • x ∈ L') : Module.rank R L ≤ Module.rank R L' := ((Algebra.lsmul S R M s).restrict h).rank_le_of_injective <| fun _ _ h ↦ by simpa using hr (Subtype.ext_iff.mp h) variable (R R' M) in lemma Module.rank_top_le_rank_of_isScalarTower [Module R' M] [SMulWithZero R R'] [IsScalarTower R R' M] [FaithfulSMul R R'] [IsScalarTower R R' R'] : Module.rank R' M ≤ Module.rank R M := by rw [Module.rank, Module.rank] exact ciSup_le' fun ⟨s, hs⟩ ↦ le_ciSup_of_le (Cardinal.bddAbove_range _) ⟨s, hs.restrict_scalars (by simpa [← faithfulSMul_iff_injective_smul_one])⟩ le_rfl variable (R R') in lemma Module.lift_rank_bot_le_lift_rank_of_isScalarTower (T : Type w) [Module R R'] [NonAssocSemiring T] [Module R T] [Module R' T] [IsScalarTower R' T T] [FaithfulSMul R' T] [IsScalarTower R R' T] : Cardinal.lift.{w} (Module.rank R R') ≤ Cardinal.lift (Module.rank R T) := LinearMap.lift_rank_le_of_injective ((LinearMap.toSpanSingleton R' T 1).restrictScalars R) <| (faithfulSMul_iff_injective_smul_one R' T).mp ‹_› variable (R R') in lemma Module.rank_bot_le_rank_of_isScalarTower (T : Type u') [Module R R'] [NonAssocSemiring T] [Module R T] [Module R' T] [IsScalarTower R' T T] [FaithfulSMul R' T] [IsScalarTower R R' T] : Module.rank R R' ≤ Module.rank R T := by simpa using Module.lift_rank_bot_le_lift_rank_of_isScalarTower R R' T end end Module
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Constructions.lean
import Mathlib.Algebra.Algebra.Subalgebra.Lattice import Mathlib.LinearAlgebra.Basis.Prod import Mathlib.LinearAlgebra.Dimension.Free import Mathlib.LinearAlgebra.TensorProduct.Basis /-! # Rank of various constructions ## Main statements - `rank_quotient_add_rank_le` : `rank M/N + rank N ≤ rank M`. - `lift_rank_add_lift_rank_le_rank_prod`: `rank M × N ≤ rank M + rank N`. - `rank_span_le_of_finite`: `rank (span s) ≤ #s` for finite `s`. For free modules, we have - `rank_prod` : `rank M × N = rank M + rank N`. - `rank_finsupp` : `rank (ι →₀ M) = #ι * rank M` - `rank_directSum`: `rank (⨁ Mᵢ) = ∑ rank Mᵢ` - `rank_tensorProduct`: `rank (M ⊗ N) = rank M * rank N`. Lemmas for ranks of submodules and subalgebras are also provided. We have `finrank` variants for most lemmas as well. -/ noncomputable section universe u u' v v' u₁' w w' variable {R : Type u} {S : Type u'} {M : Type v} {M' : Type v'} {M₁ : Type v} variable {ι : Type w} {ι' : Type w'} {η : Type u₁'} {φ : η → Type*} open Basis Cardinal DirectSum Function Module Set Submodule section Quotient variable [Ring R] [CommRing S] [AddCommGroup M] [AddCommGroup M'] [AddCommGroup M₁] variable [Module R M] theorem LinearIndependent.sumElim_of_quotient {M' : Submodule R M} {ι₁ ι₂} {f : ι₁ → M'} (hf : LinearIndependent R f) (g : ι₂ → M) (hg : LinearIndependent R (Submodule.Quotient.mk (p := M') ∘ g)) : LinearIndependent R (Sum.elim (f · : ι₁ → M) g) := by refine .sum_type (hf.map' M'.subtype M'.ker_subtype) (.of_comp M'.mkQ hg) ?_ refine disjoint_def.mpr fun x h₁ h₂ ↦ ?_ have : x ∈ M' := span_le.mpr (Set.range_subset_iff.mpr fun i ↦ (f i).prop) h₁ obtain ⟨c, rfl⟩ := Finsupp.mem_span_range_iff_exists_finsupp.mp h₂ simp_rw [← Quotient.mk_eq_zero, ← mkQ_apply, map_finsuppSum, map_smul, mkQ_apply] at this rw [linearIndependent_iff.mp hg _ this, Finsupp.sum_zero_index] theorem LinearIndepOn.union_of_quotient {s t : Set ι} {f : ι → M} (hs : LinearIndepOn R f s) (ht : LinearIndepOn R (mkQ (span R (f '' s)) ∘ f) t) : LinearIndepOn R f (s ∪ t) := by apply hs.union ht.of_comp convert (Submodule.range_ker_disjoint ht).symm · simp aesop theorem LinearIndepOn.union_id_of_quotient {M' : Submodule R M} {s : Set M} (hs : s ⊆ M') (hs' : LinearIndepOn R id s) {t : Set M} (ht : LinearIndepOn R (mkQ M') t) : LinearIndepOn R id (s ∪ t) := hs'.union_of_quotient <| by rw [image_id] exact ht.of_comp ((span R s).mapQ M' (LinearMap.id) (span_le.2 hs)) theorem linearIndepOn_union_iff_quotient {s t : Set ι} {f : ι → M} (hst : Disjoint s t) : LinearIndepOn R f (s ∪ t) ↔ LinearIndepOn R f s ∧ LinearIndepOn R (mkQ (span R (f '' s)) ∘ f) t := by refine ⟨fun h ↦ ⟨?_, ?_⟩, fun h ↦ h.1.union_of_quotient h.2⟩ · exact h.mono subset_union_left apply (h.mono subset_union_right).map simpa [← image_eq_range] using ((linearIndepOn_union_iff hst).1 h).2.2.symm theorem LinearIndepOn.quotient_iff_union {s t : Set ι} {f : ι → M} (hs : LinearIndepOn R f s) (hst : Disjoint s t) : LinearIndepOn R (mkQ (span R (f '' s)) ∘ f) t ↔ LinearIndepOn R f (s ∪ t) := by rw [linearIndepOn_union_iff_quotient hst, and_iff_right hs] theorem rank_quotient_add_rank_le [Nontrivial R] (M' : Submodule R M) : Module.rank R (M ⧸ M') + Module.rank R M' ≤ Module.rank R M := by conv_lhs => simp only [Module.rank_def] rw [Cardinal.ciSup_add_ciSup _ (bddAbove_range _) _ (bddAbove_range _)] refine ciSup_le fun ⟨s, hs⟩ ↦ ciSup_le fun ⟨t, ht⟩ ↦ ?_ choose f hf using Submodule.Quotient.mk_surjective M' simpa [add_comm] using (LinearIndependent.sumElim_of_quotient ht (fun (i : s) ↦ f i) (by simpa [Function.comp_def, hf] using hs)).cardinal_le_rank theorem rank_quotient_le (p : Submodule R M) : Module.rank R (M ⧸ p) ≤ Module.rank R M := (mkQ p).rank_le_of_surjective Quot.mk_surjective /-- The dimension of a quotient is bounded by the dimension of the ambient space. -/ theorem Submodule.finrank_quotient_le [StrongRankCondition R] [Module.Finite R M] (s : Submodule R M) : finrank R (M ⧸ s) ≤ finrank R M := toNat_le_toNat ((Submodule.mkQ s).rank_le_of_surjective Quot.mk_surjective) (rank_lt_aleph0 _ _) end Quotient variable [Semiring R] [CommSemiring S] [AddCommMonoid M] [AddCommMonoid M'] [AddCommMonoid M₁] variable [Module R M] section ULift @[simp] theorem rank_ulift : Module.rank R (ULift.{w} M) = Cardinal.lift.{w} (Module.rank R M) := Cardinal.lift_injective.{v} <| Eq.symm <| (lift_lift _).trans ULift.moduleEquiv.symm.lift_rank_eq @[simp] theorem finrank_ulift : finrank R (ULift M) = finrank R M := by simp_rw [finrank, rank_ulift, toNat_lift] end ULift section Prod variable (R M M') variable [Module R M₁] [Module R M'] theorem rank_add_rank_le_rank_prod [Nontrivial R] : Module.rank R M + Module.rank R M₁ ≤ Module.rank R (M × M₁) := by conv_lhs => simp only [Module.rank_def] rw [Cardinal.ciSup_add_ciSup _ (bddAbove_range _) _ (bddAbove_range _)] exact ciSup_le fun ⟨s, hs⟩ ↦ ciSup_le fun ⟨t, ht⟩ ↦ (linearIndependent_inl_union_inr' hs ht).cardinal_le_rank theorem lift_rank_add_lift_rank_le_rank_prod [Nontrivial R] : lift.{v'} (Module.rank R M) + lift.{v} (Module.rank R M') ≤ Module.rank R (M × M') := by rw [← rank_ulift, ← rank_ulift] exact (rank_add_rank_le_rank_prod R _).trans_eq (ULift.moduleEquiv.prodCongr ULift.moduleEquiv).rank_eq variable {R M M'} variable [StrongRankCondition R] [Module.Free R M] [Module.Free R M'] [Module.Free R M₁] open Module.Free /-- If `M` and `M'` are free, then the rank of `M × M'` is `(Module.rank R M).lift + (Module.rank R M').lift`. -/ @[simp] theorem rank_prod : Module.rank R (M × M') = Cardinal.lift.{v'} (Module.rank R M) + Cardinal.lift.{v, v'} (Module.rank R M') := by simpa [rank_eq_card_chooseBasisIndex R M, rank_eq_card_chooseBasisIndex R M', lift_umax] using ((chooseBasis R M).prod (chooseBasis R M')).mk_eq_rank.symm /-- If `M` and `M'` are free (and lie in the same universe), the rank of `M × M'` is `(Module.rank R M) + (Module.rank R M')`. -/ theorem rank_prod' : Module.rank R (M × M₁) = Module.rank R M + Module.rank R M₁ := by simp /-- The `finrank` of `M × M'` is `(finrank R M) + (finrank R M')`. -/ @[simp] theorem Module.finrank_prod [Module.Finite R M] [Module.Finite R M'] : finrank R (M × M') = finrank R M + finrank R M' := by simp [finrank, rank_lt_aleph0 R M, rank_lt_aleph0 R M'] end Prod section Finsupp variable (R M M') variable [StrongRankCondition R] [Module.Free R M] [Module R M'] [Module.Free R M'] open Module.Free @[simp] theorem rank_finsupp (ι : Type w) : Module.rank R (ι →₀ M) = Cardinal.lift.{v} #ι * Cardinal.lift.{w} (Module.rank R M) := by obtain ⟨⟨_, bs⟩⟩ := Module.Free.exists_basis (R := R) (M := M) rw [← bs.mk_eq_rank'', ← (Finsupp.basis fun _ : ι => bs).mk_eq_rank'', Cardinal.mk_sigma, Cardinal.sum_const] theorem rank_finsupp' (ι : Type v) : Module.rank R (ι →₀ M) = #ι * Module.rank R M := by simp [rank_finsupp] /-- The rank of `(ι →₀ R)` is `(#ι).lift`. -/ theorem rank_finsupp_self (ι : Type w) : Module.rank R (ι →₀ R) = Cardinal.lift.{u} #ι := by simp /-- If `R` and `ι` lie in the same universe, the rank of `(ι →₀ R)` is `# ι`. -/ theorem rank_finsupp_self' {ι : Type u} : Module.rank R (ι →₀ R) = #ι := by simp /-- The rank of the direct sum is the sum of the ranks. -/ @[simp] theorem rank_directSum {ι : Type v} (M : ι → Type w) [∀ i : ι, AddCommMonoid (M i)] [∀ i : ι, Module R (M i)] [∀ i : ι, Module.Free R (M i)] : Module.rank R (⨁ i, M i) = Cardinal.sum fun i => Module.rank R (M i) := by let B i := chooseBasis R (M i) let b : Basis _ R (⨁ i, M i) := DFinsupp.basis fun i => B i simp [← b.mk_eq_rank'', fun i => (B i).mk_eq_rank''] /-- If `m` and `n` are finite, the rank of `m × n` matrices over a module `M` is `(#m).lift * (#n).lift * rank R M`. -/ @[simp] theorem rank_matrix_module (m : Type w) (n : Type w') [Finite m] [Finite n] : Module.rank R (Matrix m n M) = lift.{max v w'} #m * lift.{max v w} #n * lift.{max w w'} (Module.rank R M) := by cases nonempty_fintype m cases nonempty_fintype n obtain ⟨I, b⟩ := Module.Free.exists_basis (R := R) (M := M) rw [← (b.matrix m n).mk_eq_rank''] simp only [mk_prod, lift_mul, lift_lift, ← mul_assoc, b.mk_eq_rank''] /-- If `m` and `n` are finite and lie in the same universe, the rank of `m × n` matrices over a module `M` is `(#m * #n).lift * rank R M`. -/ @[simp high] theorem rank_matrix_module' (m n : Type w) [Finite m] [Finite n] : Module.rank R (Matrix m n M) = lift.{max v} (#m * #n) * lift.{w} (Module.rank R M) := by rw [rank_matrix_module, lift_mul, lift_umax.{w, v}] /-- If `m` and `n` are finite, the rank of `m × n` matrices is `(#m).lift * (#n).lift`. -/ theorem rank_matrix (m : Type v) (n : Type w) [Finite m] [Finite n] : Module.rank R (Matrix m n R) = Cardinal.lift.{max v w u, v} #m * Cardinal.lift.{max v w u, w} #n := by rw [rank_matrix_module, rank_self, lift_one, mul_one, ← lift_lift.{v, max u w}, lift_id, ← lift_lift.{w, max u v}, lift_id] /-- If `m` and `n` are finite and lie in the same universe, the rank of `m × n` matrices is `(#n * #m).lift`. -/ theorem rank_matrix' (m n : Type v) [Finite m] [Finite n] : Module.rank R (Matrix m n R) = Cardinal.lift.{u} (#m * #n) := by rw [rank_matrix, lift_mul, lift_umax.{v, u}] /-- If `m` and `n` are finite and lie in the same universe as `R`, the rank of `m × n` matrices is `# m * # n`. -/ theorem rank_matrix'' (m n : Type u) [Finite m] [Finite n] : Module.rank R (Matrix m n R) = #m * #n := by simp open Fintype namespace Module @[simp] theorem finrank_finsupp {ι : Type v} [Fintype ι] : finrank R (ι →₀ M) = card ι * finrank R M := by rw [finrank, finrank, rank_finsupp, ← mk_toNat_eq_card, toNat_mul, toNat_lift, toNat_lift] /-- The `finrank` of `(ι →₀ R)` is `Fintype.card ι`. -/ @[simp] theorem finrank_finsupp_self {ι : Type v} [Fintype ι] : finrank R (ι →₀ R) = card ι := by rw [finrank, rank_finsupp_self, ← mk_toNat_eq_card, toNat_lift] /-- The `finrank` of the direct sum is the sum of the `finrank`s. -/ @[simp] theorem finrank_directSum {ι : Type v} [Fintype ι] (M : ι → Type w) [∀ i : ι, AddCommMonoid (M i)] [∀ i : ι, Module R (M i)] [∀ i : ι, Module.Free R (M i)] [∀ i : ι, Module.Finite R (M i)] : finrank R (⨁ i, M i) = ∑ i, finrank R (M i) := by simp only [finrank, fun i => rank_eq_card_chooseBasisIndex R (M i), rank_directSum, ← mk_sigma, mk_toNat_eq_card, card_sigma] /-- If `m` and `n` are `Fintype`, the `finrank` of `m × n` matrices over a module `M` is `(Fintype.card m) * (Fintype.card n) * finrank R M`. -/ theorem finrank_matrix (m n : Type*) [Fintype m] [Fintype n] : finrank R (Matrix m n M) = card m * card n * finrank R M := by simp [finrank] end Module end Finsupp section Pi variable [StrongRankCondition R] [Module.Free R M] variable [∀ i, AddCommMonoid (φ i)] [∀ i, Module R (φ i)] [∀ i, Module.Free R (φ i)] open Module.Free open LinearMap /-- The rank of a finite product of free modules is the sum of the ranks. -/ -- this result is not true without the freeness assumption @[simp] theorem rank_pi [Finite η] : Module.rank R (∀ i, φ i) = Cardinal.sum fun i => Module.rank R (φ i) := by cases nonempty_fintype η let B i := chooseBasis R (φ i) let b : Basis _ R (∀ i, φ i) := Pi.basis fun i => B i simp [← b.mk_eq_rank'', fun i => (B i).mk_eq_rank''] variable (R) /-- The `finrank` of `(ι → R)` is `Fintype.card ι`. -/ theorem Module.finrank_pi {ι : Type v} [Fintype ι] : finrank R (ι → R) = Fintype.card ι := by simp [finrank] --TODO: this should follow from `LinearEquiv.finrank_eq`, that is over a field. /-- The `finrank` of a finite product is the sum of the `finrank`s. -/ theorem Module.finrank_pi_fintype {ι : Type v} [Fintype ι] {M : ι → Type w} [∀ i : ι, AddCommMonoid (M i)] [∀ i : ι, Module R (M i)] [∀ i : ι, Module.Free R (M i)] [∀ i : ι, Module.Finite R (M i)] : finrank R (∀ i, M i) = ∑ i, finrank R (M i) := by simp only [finrank, fun i => rank_eq_card_chooseBasisIndex R (M i), rank_pi, ← mk_sigma, mk_toNat_eq_card, Fintype.card_sigma] variable {R} variable [Fintype η] theorem rank_fun {M η : Type u} [Fintype η] [AddCommMonoid M] [Module R M] [Module.Free R M] : Module.rank R (η → M) = Fintype.card η * Module.rank R M := by rw [rank_pi, Cardinal.sum_const', Cardinal.mk_fintype] theorem rank_fun_eq_lift_mul : Module.rank R (η → M) = (Fintype.card η : Cardinal.{max u₁' v}) * Cardinal.lift.{u₁'} (Module.rank R M) := by rw [rank_pi, Cardinal.sum_const, Cardinal.mk_fintype, Cardinal.lift_natCast] theorem rank_fun' : Module.rank R (η → R) = Fintype.card η := by rw [rank_fun_eq_lift_mul, rank_self, Cardinal.lift_one, mul_one] theorem rank_fin_fun (n : ℕ) : Module.rank R (Fin n → R) = n := by simp variable (R) /-- The vector space of functions on a `Fintype ι` has `finrank` equal to the cardinality of `ι`. -/ @[simp] theorem Module.finrank_fintype_fun_eq_card : finrank R (η → R) = Fintype.card η := finrank_eq_of_rank_eq rank_fun' /-- The vector space of functions on `Fin n` has `finrank` equal to `n`. -/ theorem Module.finrank_fin_fun {n : ℕ} : finrank R (Fin n → R) = n := by simp variable {R} -- TODO: merge with the `Finrank` content /-- An `n`-dimensional `R`-vector space is equivalent to `Fin n → R`. -/ def finDimVectorspaceEquiv (n : ℕ) (hn : Module.rank R M = n) : M ≃ₗ[R] Fin n → R := by haveI := nontrivial_of_invariantBasisNumber R have : Cardinal.lift.{u} (n : Cardinal.{v}) = Cardinal.lift.{v} (n : Cardinal.{u}) := by simp have hn := Cardinal.lift_inj.{v, u}.2 hn rw [this] at hn rw [← @rank_fin_fun R _ _ n] at hn haveI : Module.Free R (Fin n → R) := Module.Free.pi _ _ exact Classical.choice (nonempty_linearEquiv_of_lift_rank_eq hn) end Pi section TensorProduct open TensorProduct variable [StrongRankCondition R] [StrongRankCondition S] variable [Module S M] [Module S M'] [Module.Free S M'] variable [Module S M₁] [Module.Free S M₁] variable [Algebra S R] [IsScalarTower S R M] [Module.Free R M] open Module.Free /-- The `S`-rank of `M ⊗[R] M'` is `(Module.rank S M).lift * (Module.rank R M').lift`. -/ @[simp] theorem rank_tensorProduct : Module.rank R (M ⊗[S] M') = Cardinal.lift.{v'} (Module.rank R M) * Cardinal.lift.{v} (Module.rank S M') := by obtain ⟨⟨_, bM⟩⟩ := Module.Free.exists_basis (R := R) (M := M) obtain ⟨⟨_, bN⟩⟩ := Module.Free.exists_basis (R := S) (M := M') rw [← bM.mk_eq_rank'', ← bN.mk_eq_rank'', ← (bM.tensorProduct bN).mk_eq_rank'', Cardinal.mk_prod] /-- If `M` and `M'` lie in the same universe, the `S`-rank of `M ⊗[R] M'` is `(Module.rank S M) * (Module.rank R M')`. -/ theorem rank_tensorProduct' : Module.rank R (M ⊗[S] M₁) = Module.rank R M * Module.rank S M₁ := by simp theorem Module.rank_baseChange : Module.rank R (R ⊗[S] M') = Cardinal.lift.{u} (Module.rank S M') := by simp /-- The `S`-`finrank` of `M ⊗[R] M'` is `(finrank S M) * (finrank R M')`. -/ @[simp] theorem Module.finrank_tensorProduct : finrank R (M ⊗[S] M') = finrank R M * finrank S M' := by simp [finrank] theorem Module.finrank_baseChange : finrank R (R ⊗[S] M') = finrank S M' := by simp end TensorProduct section SubmoduleRank section open Module namespace Submodule theorem lt_of_le_of_finrank_lt_finrank {s t : Submodule R M} (le : s ≤ t) (lt : finrank R s < finrank R t) : s < t := lt_of_le_of_ne le fun h => ne_of_lt lt (by rw [h]) theorem lt_top_of_finrank_lt_finrank {s : Submodule R M} (lt : finrank R s < finrank R M) : s < ⊤ := by rw [← finrank_top R M] at lt exact lt_of_le_of_finrank_lt_finrank le_top lt end Submodule variable [StrongRankCondition R] /-- The dimension of a submodule is bounded by the dimension of the ambient space. -/ theorem Submodule.finrank_le [Module.Finite R M] (s : Submodule R M) : finrank R s ≤ finrank R M := toNat_le_toNat (Submodule.rank_le s) (rank_lt_aleph0 _ _) /-- Pushforwards of finite submodules have a smaller finrank. -/ theorem Submodule.finrank_map_le [Module R M'] (f : M →ₗ[R] M') (p : Submodule R M) [Module.Finite R p] : finrank R (p.map f) ≤ finrank R p := finrank_le_finrank_of_rank_le_rank (lift_rank_map_le _ _) (rank_lt_aleph0 _ _) theorem Submodule.finrank_mono {s t : Submodule R M} [Module.Finite R t] (hst : s ≤ t) : finrank R s ≤ finrank R t := Cardinal.toNat_le_toNat (Submodule.rank_mono hst) (rank_lt_aleph0 R ↥t) end end SubmoduleRank section Span variable [StrongRankCondition R] theorem rank_span_le (s : Set M) : Module.rank R (span R s) ≤ #s := by rw [Finsupp.span_eq_range_linearCombination, ← lift_strictMono.le_iff_le] refine (lift_rank_range_le _).trans ?_ rw [rank_finsupp_self] simp only [lift_lift, le_refl] theorem rank_span_finset_le (s : Finset M) : Module.rank R (span R (s : Set M)) ≤ s.card := by simpa using rank_span_le (s : Set M) theorem rank_span_of_finset (s : Finset M) : Module.rank R (span R (s : Set M)) < ℵ₀ := (rank_span_finset_le s).trans_lt (Cardinal.nat_lt_aleph0 _) open Submodule Module variable (R) in /-- The rank of a set of vectors as a natural number. -/ protected noncomputable def Set.finrank (s : Set M) : ℕ := finrank R (span R s) theorem finrank_span_le_card (s : Set M) [Fintype s] : finrank R (span R s) ≤ s.toFinset.card := finrank_le_of_rank_le (by simpa using rank_span_le (R := R) s) theorem finrank_span_finset_le_card (s : Finset M) : (s : Set M).finrank R ≤ s.card := calc (s : Set M).finrank R ≤ (s : Set M).toFinset.card := finrank_span_le_card (M := M) s _ = s.card := by simp theorem finrank_range_le_card {ι : Type*} [Fintype ι] (b : ι → M) : (Set.range b).finrank R ≤ Fintype.card ι := by classical refine (finrank_span_le_card _).trans ?_ rw [Set.toFinset_range] exact Finset.card_image_le theorem finrank_span_eq_card [Nontrivial R] {ι : Type*} [Fintype ι] {b : ι → M} (hb : LinearIndependent R b) : finrank R (span R (Set.range b)) = Fintype.card ι := finrank_eq_of_rank_eq (by have : Module.rank R (span R (Set.range b)) = #(Set.range b) := rank_span hb rwa [← lift_inj, mk_range_eq_of_injective hb.injective, Cardinal.mk_fintype, lift_natCast, lift_eq_nat_iff] at this) theorem finrank_span_set_eq_card {s : Set M} [Fintype s] (hs : LinearIndepOn R id s) : finrank R (span R s) = s.toFinset.card := finrank_eq_of_rank_eq (by have : Module.rank R (span R s) = #s := rank_span_set hs rwa [Cardinal.mk_fintype, ← Set.toFinset_card] at this) theorem finrank_span_finset_eq_card {s : Finset M} (hs : LinearIndepOn R id (s : Set M)) : finrank R (span R (s : Set M)) = s.card := by convert finrank_span_set_eq_card (s := (s : Set M)) hs ext simp theorem span_lt_of_subset_of_card_lt_finrank {s : Set M} [Fintype s] {t : Submodule R M} (subset : s ⊆ t) (card_lt : s.toFinset.card < finrank R t) : span R s < t := lt_of_le_of_finrank_lt_finrank (span_le.mpr subset) (lt_of_le_of_lt (finrank_span_le_card _) card_lt) theorem span_lt_top_of_card_lt_finrank {s : Set M} [Fintype s] (card_lt : s.toFinset.card < finrank R M) : span R s < ⊤ := lt_top_of_finrank_lt_finrank (lt_of_le_of_lt (finrank_span_le_card _) card_lt) lemma finrank_le_of_span_eq_top {ι : Type*} [Fintype ι] {v : ι → M} (hv : Submodule.span R (Set.range v) = ⊤) : finrank R M ≤ Fintype.card ι := by classical rw [← finrank_top, ← hv] exact (finrank_span_le_card _).trans (by convert Fintype.card_range_le v; rw [Set.toFinset_card]) end Span section SubalgebraRank open Module section Semiring variable {F E : Type*} [CommSemiring F] [Semiring E] [Algebra F E] @[simp] theorem Subalgebra.rank_toSubmodule (S : Subalgebra F E) : Module.rank F (Subalgebra.toSubmodule S) = Module.rank F S := rfl @[simp] theorem Subalgebra.finrank_toSubmodule (S : Subalgebra F E) : finrank F (Subalgebra.toSubmodule S) = finrank F S := rfl theorem subalgebra_top_rank_eq_submodule_top_rank : Module.rank F (⊤ : Subalgebra F E) = Module.rank F (⊤ : Submodule F E) := by rw [← Algebra.top_toSubmodule] rfl theorem subalgebra_top_finrank_eq_submodule_top_finrank : finrank F (⊤ : Subalgebra F E) = finrank F (⊤ : Submodule F E) := by rw [← Algebra.top_toSubmodule] rfl theorem Subalgebra.rank_top : Module.rank F (⊤ : Subalgebra F E) = Module.rank F E := by rw [subalgebra_top_rank_eq_submodule_top_rank] exact _root_.rank_top F E end Semiring section Ring variable {F E : Type*} [CommRing F] [Ring E] [Algebra F E] variable [StrongRankCondition F] [NoZeroSMulDivisors F E] [Nontrivial E] @[simp] theorem Subalgebra.rank_bot : Module.rank F (⊥ : Subalgebra F E) = 1 := (Subalgebra.toSubmoduleEquiv (⊥ : Subalgebra F E)).symm.rank_eq.trans <| by rw [Algebra.toSubmodule_bot, one_eq_span, rank_span_set, mk_singleton _] have := Module.nontrivial F E exact .singleton one_ne_zero @[simp] theorem Subalgebra.finrank_bot : finrank F (⊥ : Subalgebra F E) = 1 := finrank_eq_of_rank_eq (by simp) end Ring end SubalgebraRank
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/DivisionRing.lean
import Mathlib.LinearAlgebra.Basis.VectorSpace import Mathlib.LinearAlgebra.Dimension.Finite import Mathlib.LinearAlgebra.Dimension.RankNullity /-! # Dimension of vector spaces In this file we provide results about `Module.rank` and `Module.finrank` of vector spaces over division rings. ## Main statements For vector spaces (i.e. modules over a field), we have * `rank_quotient_add_rank_of_divisionRing`: if `V₁` is a submodule of `V`, then `Module.rank (V/V₁) + Module.rank V₁ = Module.rank V`. * `rank_range_add_rank_ker`: the rank-nullity theorem. See also `Mathlib/LinearAlgebra/Dimension/ErdosKaplansky.lean` for the Erdős-Kaplansky theorem. -/ noncomputable section universe u₀ u v v' v'' u₁' w w' variable {K R : Type u} {V V₁ V₂ V₃ : Type v} {V' V'₁ : Type v'} {V'' : Type v''} variable {ι : Type w} {ι' : Type w'} {η : Type u₁'} {φ : η → Type*} open Cardinal Basis Submodule Function Set section Module section DivisionRing variable [DivisionRing K] variable [AddCommGroup V] [Module K V] variable [AddCommGroup V'] [Module K V'] variable [AddCommGroup V₁] [Module K V₁] /-- If a vector space has a finite dimension, the index set of `Basis.ofVectorSpace` is finite. -/ theorem Module.Basis.finite_ofVectorSpaceIndex_of_rank_lt_aleph0 (h : Module.rank K V < ℵ₀) : (Basis.ofVectorSpaceIndex K V).Finite := Set.finite_def.2 <| (Basis.ofVectorSpace K V).nonempty_fintype_index_of_rank_lt_aleph0 h /-- Also see `rank_quotient_add_rank`. -/ theorem rank_quotient_add_rank_of_divisionRing (p : Submodule K V) : Module.rank K (V ⧸ p) + Module.rank K p = Module.rank K V := by classical let ⟨f⟩ := quotient_prod_linearEquiv p exact rank_prod'.symm.trans f.rank_eq instance DivisionRing.hasRankNullity : HasRankNullity.{u₀} K where rank_quotient_add_rank := rank_quotient_add_rank_of_divisionRing exists_set_linearIndependent V _ _ := by let b := Module.Free.chooseBasis K V refine ⟨range b, ?_, b.linearIndependent.linearIndepOn_id⟩ rw [← lift_injective.eq_iff, mk_range_eq_of_injective b.injective, Module.Free.rank_eq_card_chooseBasisIndex] section variable [AddCommGroup V₂] [Module K V₂] variable [AddCommGroup V₃] [Module K V₃] open LinearMap /-- This is mostly an auxiliary lemma for `Submodule.rank_sup_add_rank_inf_eq`. -/ theorem rank_add_rank_split (db : V₂ →ₗ[K] V) (eb : V₃ →ₗ[K] V) (cd : V₁ →ₗ[K] V₂) (ce : V₁ →ₗ[K] V₃) (hde : ⊤ ≤ LinearMap.range db ⊔ LinearMap.range eb) (hgd : ker cd = ⊥) (eq : db.comp cd = eb.comp ce) (eq₂ : ∀ d e, db d = eb e → ∃ c, cd c = d ∧ ce c = e) : Module.rank K V + Module.rank K V₁ = Module.rank K V₂ + Module.rank K V₃ := by have hf : Surjective (coprod db eb) := by rwa [← range_eq_top, range_coprod, eq_top_iff] conv => rhs rw [← rank_prod', rank_eq_of_surjective hf] congr 1 apply LinearEquiv.rank_eq let L : V₁ →ₗ[K] ker (coprod db eb) := LinearMap.codRestrict _ (prod cd (-ce)) <| by simpa [add_eq_zero_iff_eq_neg] using LinearMap.ext_iff.1 eq refine LinearEquiv.ofBijective L ⟨?_, ?_⟩ · rw [← ker_eq_bot, ker_codRestrict, ker_prod, hgd, bot_inf_eq] · rw [← range_eq_top, eq_top_iff, range_codRestrict, ← map_le_iff_le_comap, Submodule.map_top, range_subtype] rintro ⟨d, e⟩ have h := eq₂ d (-e) simp only [add_eq_zero_iff_eq_neg, LinearMap.prod_apply, mem_ker, Prod.mk_inj, coprod_apply, map_neg, neg_apply, LinearMap.mem_range, Pi.prod] at h ⊢ grind end end DivisionRing end Module section Basis open Module variable [DivisionRing K] [AddCommGroup V] [Module K V] theorem linearIndependent_of_top_le_span_of_card_eq_finrank {ι : Type*} [Fintype ι] {b : ι → V} (spans : ⊤ ≤ span K (Set.range b)) (card_eq : Fintype.card ι = finrank K V) : LinearIndependent K b := linearIndependent_iff'.mpr fun s g dependent i i_mem_s => by classical by_contra gx_ne_zero -- We'll derive a contradiction by showing `b '' (univ \ {i})` of cardinality `n - 1` -- spans a vector space of dimension `n`. refine not_le_of_gt (span_lt_top_of_card_lt_finrank (show (b '' (Set.univ \ {i})).toFinset.card < finrank K V from ?_)) ?_ · calc (b '' (Set.univ \ {i})).toFinset.card = ((Set.univ \ {i}).toFinset.image b).card := by rw [Set.toFinset_card, Fintype.card_ofFinset] _ ≤ (Set.univ \ {i}).toFinset.card := Finset.card_image_le _ = (Finset.univ.erase i).card := (congr_arg Finset.card (Finset.ext (by simp [and_comm]))) _ < Finset.univ.card := Finset.card_erase_lt_of_mem (Finset.mem_univ i) _ = finrank K V := card_eq -- We already have that `b '' univ` spans the whole space, -- so we only need to show that the span of `b '' (univ \ {i})` contains each `b j`. refine spans.trans (span_le.mpr ?_) rintro _ ⟨j, rfl, rfl⟩ -- The case that `j ≠ i` is easy because `b j ∈ b '' (univ \ {i})`. by_cases j_eq : j = i swap · refine subset_span ⟨j, (Set.mem_diff _).mpr ⟨Set.mem_univ _, ?_⟩, rfl⟩ exact mt Set.mem_singleton_iff.mp j_eq -- To show `b i ∈ span (b '' (univ \ {i}))`, we use that it's a weighted sum -- of the other `b j`s. rw [j_eq, SetLike.mem_coe, show b i = -((g i)⁻¹ • (s.erase i).sum fun j => g j • b j) from _] · refine neg_mem (smul_mem _ _ (sum_mem fun k hk => ?_)) obtain ⟨k_ne_i, _⟩ := Finset.mem_erase.mp hk refine smul_mem _ _ (subset_span ⟨k, ?_, rfl⟩) simp_all only [Set.mem_univ, Set.mem_diff, Set.mem_singleton_iff, and_self, not_false_eq_true] -- To show `b i` is a weighted sum of the other `b j`s, we'll rewrite this sum -- to have the form of the assumption `dependent`. apply eq_neg_of_add_eq_zero_left calc (b i + (g i)⁻¹ • (s.erase i).sum fun j => g j • b j) = (g i)⁻¹ • (g i • b i + (s.erase i).sum fun j => g j • b j) := by rw [smul_add, ← mul_smul, inv_mul_cancel₀ gx_ne_zero, one_smul] _ = (g i)⁻¹ • (0 : V) := congr_arg _ ?_ _ = 0 := smul_zero _ -- And then it's just a bit of manipulation with finite sums. rwa [← Finset.insert_erase i_mem_s, Finset.sum_insert (Finset.notMem_erase _ _)] at dependent /-- A finite family of vectors is linearly independent if and only if its cardinality equals the dimension of its span. -/ theorem linearIndependent_iff_card_eq_finrank_span {ι : Type*} [Fintype ι] {b : ι → V} : LinearIndependent K b ↔ Fintype.card ι = (Set.range b).finrank K := by constructor · intro h exact (finrank_span_eq_card h).symm · intro hc let f := Submodule.subtype (span K (Set.range b)) let b' : ι → span K (Set.range b) := fun i => ⟨b i, mem_span.2 fun p hp => hp (Set.mem_range_self _)⟩ have hs : ⊤ ≤ span K (Set.range b') := by intro x have h : span K (f '' Set.range b') = map f (span K (Set.range b')) := span_image f have hf : f '' Set.range b' = Set.range b := by ext x simp [f, b', Set.mem_image, Set.mem_range] rw [hf] at h have hx : (x : V) ∈ span K (Set.range b) := x.property simp_rw [h] at hx simpa [f, mem_map] using hx have hi : LinearMap.ker f = ⊥ := ker_subtype _ convert (linearIndependent_of_top_le_span_of_card_eq_finrank hs hc).map' _ hi theorem linearIndependent_iff_card_le_finrank_span {ι : Type*} [Fintype ι] {b : ι → V} : LinearIndependent K b ↔ Fintype.card ι ≤ (Set.range b).finrank K := by rw [linearIndependent_iff_card_eq_finrank_span, (finrank_range_le_card _).ge_iff_eq'] /-- A family of `finrank K V` vectors forms a basis if they span the whole space. -/ noncomputable def basisOfTopLeSpanOfCardEqFinrank {ι : Type*} [Fintype ι] (b : ι → V) (le_span : ⊤ ≤ span K (Set.range b)) (card_eq : Fintype.card ι = finrank K V) : Basis ι K V := Basis.mk (linearIndependent_of_top_le_span_of_card_eq_finrank le_span card_eq) le_span @[simp] theorem coe_basisOfTopLeSpanOfCardEqFinrank {ι : Type*} [Fintype ι] (b : ι → V) (le_span : ⊤ ≤ span K (Set.range b)) (card_eq : Fintype.card ι = finrank K V) : ⇑(basisOfTopLeSpanOfCardEqFinrank b le_span card_eq) = b := Basis.coe_mk _ _ /-- A finset of `finrank K V` vectors forms a basis if they span the whole space. -/ @[simps! repr_apply] noncomputable def finsetBasisOfTopLeSpanOfCardEqFinrank {s : Finset V} (le_span : ⊤ ≤ span K (s : Set V)) (card_eq : s.card = finrank K V) : Basis {x // x ∈ s} K V := basisOfTopLeSpanOfCardEqFinrank ((↑) : ↥(s : Set V) → V) ((@Subtype.range_coe_subtype _ fun x => x ∈ s).symm ▸ le_span) (_root_.trans (Fintype.card_coe _) card_eq) /-- A set of `finrank K V` vectors forms a basis if they span the whole space. -/ @[simps! repr_apply] noncomputable def setBasisOfTopLeSpanOfCardEqFinrank {s : Set V} [Fintype s] (le_span : ⊤ ≤ span K s) (card_eq : s.toFinset.card = finrank K V) : Basis s K V := basisOfTopLeSpanOfCardEqFinrank ((↑) : s → V) ((@Subtype.range_coe_subtype _ s).symm ▸ le_span) (_root_.trans s.toFinset_card.symm card_eq) end Basis
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Localization.lean
import Mathlib.Algebra.Module.LocalizedModule.Submodule import Mathlib.LinearAlgebra.Dimension.DivisionRing import Mathlib.RingTheory.IsTensorProduct import Mathlib.RingTheory.Localization.BaseChange import Mathlib.RingTheory.Localization.FractionRing import Mathlib.RingTheory.OreLocalization.OreSet /-! # Rank of localization ## Main statements - `IsLocalizedModule.lift_rank_eq`: `rank_Rₚ Mₚ = rank R M`. - `rank_quotient_add_rank_of_isDomain`: The **rank-nullity theorem** for commutative domains. -/ open Cardinal Module nonZeroDivisors section CommRing universe uR uS uT uM uN uP variable {R : Type uR} (S : Type uS) {M : Type uM} {N : Type uN} variable [CommRing R] [CommRing S] [AddCommGroup M] [AddCommGroup N] variable [Module R M] [Module R N] [Algebra R S] [Module S N] [IsScalarTower R S N] variable (p : Submonoid R) [IsLocalization p S] (f : M →ₗ[R] N) [IsLocalizedModule p f] variable (hp : p ≤ R⁰) section include hp section include f lemma IsLocalizedModule.lift_rank_eq : Cardinal.lift.{uM} (Module.rank R N) = Cardinal.lift.{uN} (Module.rank R M) := by cases subsingleton_or_nontrivial R · simp only [rank_subsingleton, lift_one] apply le_antisymm <;> rw [Module.rank_def, lift_iSup (bddAbove_range _)] <;> apply ciSup_le' <;> intro ⟨s, hs⟩ exacts [(IsLocalizedModule.linearIndependent_lift p f hs).choose_spec.cardinal_lift_le_rank, hs.of_isLocalizedModule_of_isRegular p f (le_nonZeroDivisors_iff_isRegular.mp hp) |>.cardinal_lift_le_rank] lemma IsLocalizedModule.finrank_eq : finrank R N = finrank R M := by simpa using congr_arg toNat (lift_rank_eq p f hp) end lemma IsLocalizedModule.rank_eq {N : Type uM} [AddCommGroup N] [Module R N] (f : M →ₗ[R] N) [IsLocalizedModule p f] : Module.rank R N = Module.rank R M := by simpa using lift_rank_eq p f hp lemma IsLocalization.rank_eq : Module.rank S N = Module.rank R N := by cases subsingleton_or_nontrivial R · have := (algebraMap R S).codomain_trivial; simp only [rank_subsingleton] have inj := IsLocalization.injective S hp apply le_antisymm <;> (rw [Module.rank]; apply ciSup_le'; intro ⟨s, hs⟩) · have := (faithfulSMul_iff_algebraMap_injective R S).mpr inj exact (hs.restrict_scalars' R).cardinal_le_rank · have := inj.nontrivial exact (hs.localization S p).cardinal_le_rank end variable (R M) in theorem exists_set_linearIndependent_of_isDomain [IsDomain R] : ∃ s : Set M, #s = Module.rank R M ∧ LinearIndepOn R id s := by obtain ⟨w, hw⟩ := IsLocalizedModule.linearIndependent_lift R⁰ (LocalizedModule.mkLinearMap R⁰ M) <| Module.Free.chooseBasis (FractionRing R) (LocalizedModule R⁰ M) |>.linearIndependent.restrict_scalars' _ refine ⟨Set.range w, ?_, (linearIndepOn_id_range_iff hw.injective).mpr hw⟩ apply Cardinal.lift_injective.{max uR uM} rw [Cardinal.mk_range_eq_of_injective hw.injective, ← Module.Free.rank_eq_card_chooseBasisIndex, IsLocalization.rank_eq (FractionRing R) R⁰ le_rfl, IsLocalizedModule.lift_rank_eq R⁰ (LocalizedModule.mkLinearMap R⁰ M) le_rfl] /-- The **rank-nullity theorem** for commutative domains. Also see `rank_quotient_add_rank`. -/ theorem rank_quotient_add_rank_of_isDomain [IsDomain R] (M' : Submodule R M) : Module.rank R (M ⧸ M') + Module.rank R M' = Module.rank R M := by apply lift_injective.{max uR uM} simp_rw [lift_add, ← IsLocalizedModule.lift_rank_eq R⁰ (M'.toLocalized R⁰) le_rfl, ← IsLocalizedModule.lift_rank_eq R⁰ (LocalizedModule.mkLinearMap R⁰ M) le_rfl, ← IsLocalizedModule.lift_rank_eq R⁰ (M'.toLocalizedQuotient R⁰) le_rfl, ← IsLocalization.rank_eq (FractionRing R) R⁰ le_rfl, ← lift_add, rank_quotient_add_rank_of_divisionRing] universe w in instance IsDomain.hasRankNullity [IsDomain R] : HasRankNullity.{w} R where rank_quotient_add_rank := rank_quotient_add_rank_of_isDomain exists_set_linearIndependent M := exists_set_linearIndependent_of_isDomain R M namespace IsBaseChange open Cardinal TensorProduct section variable {p} [Free S N] [StrongRankCondition S] {T : Type uT} [CommRing T] [Algebra R T] (hpT : Algebra.algebraMapSubmonoid T p ≤ T⁰) [StrongRankCondition (S ⊗[R] T)] {P : Type uP} [AddCommGroup P] [Module R P] [Module T P] [IsScalarTower R T P] {g : M →ₗ[R] P} (bc : IsBaseChange T g) include S hp hpT f bc theorem lift_rank_eq_of_le_nonZeroDivisors : Cardinal.lift.{uM} (Module.rank T P) = Cardinal.lift.{uP} (Module.rank R M) := by rw [← lift_inj.{_, max uS uT uN}, lift_lift, lift_lift] let ST := S ⊗[R] T conv_rhs => rw [← lift_lift.{uN, max uS uT uP}, ← IsLocalizedModule.lift_rank_eq p f hp, ← IsLocalization.rank_eq S p hp, lift_lift, ← lift_lift.{max uS uT, max uM uP}, ← rank_baseChange (R := ST), ← lift_id'.{max uS uT, max uS uT uN} (Module.rank ..), lift_lift, ← lift_lift.{max uS uT uP, uM}] let _ : Algebra T ST := Algebra.TensorProduct.rightAlgebra set pT := Algebra.algebraMapSubmonoid T p rw [← lift_lift.{max uS uT, max uM uN}, ← lift_umax.{uP}, ← IsLocalizedModule.lift_rank_eq pT (mk T ST P 1) hpT, ← IsLocalization.rank_eq ST pT hpT, lift_id'.{uP, max uS uT}, ← lift_id'.{max uS uT, max uS uT uP} (Module.rank ..), lift_lift, ← lift_lift.{max uS uT uN, uM}, lift_inj] exact LinearEquiv.lift_rank_eq <| AlgebraTensorModule.congr (.refl ST ST) bc.equiv.symm ≪≫ₗ AlgebraTensorModule.cancelBaseChange .. ≪≫ₗ (AlgebraTensorModule.cancelBaseChange ..).symm ≪≫ₗ AlgebraTensorModule.congr (.refl ..) ((isLocalizedModule_iff_isBaseChange p S f).mp ‹_›).equiv theorem finrank_eq_of_le_nonZeroDivisors : finrank T P = finrank R M := by simpa using congr_arg toNat (lift_rank_eq_of_le_nonZeroDivisors S f hp hpT bc) omit bc theorem rank_eq_of_le_nonZeroDivisors {P : Type uM} [AddCommGroup P] [Module R P] [Module T P] [IsScalarTower R T P] {g : M →ₗ[R] P} (bc : IsBaseChange T g) : Module.rank T P = Module.rank R M := by simpa using lift_rank_eq_of_le_nonZeroDivisors S f hp hpT bc end variable {p} {T : Type uT} [CommRing T] [NoZeroDivisors T] [Algebra R T] [FaithfulSMul R T] {P : Type uP} [AddCommGroup P] [Module R P] [Module T P] [IsScalarTower R T P] {g : M →ₗ[R] P} (bc : IsBaseChange T g) include bc theorem lift_rank_eq : Cardinal.lift.{uM} (Module.rank T P) = Cardinal.lift.{uP} (Module.rank R M) := by have inj := FaithfulSMul.algebraMap_injective R T have := inj.noZeroDivisors _ (map_zero _) (map_mul _) cases subsingleton_or_nontrivial R · have := (algebraMap R T).codomain_trivial; simp only [rank_subsingleton, lift_one] have := (isDomain_iff_noZeroDivisors_and_nontrivial T).mpr ⟨‹_›, (FaithfulSMul.algebraMap_injective R T).nontrivial⟩ let FR := FractionRing R let FT := FractionRing T replace inj : Function.Injective (algebraMap R FT) := (IsFractionRing.injective T _).comp inj let g := TensorProduct.mk T FT P 1 have : IsLocalizedModule R⁰ (TensorProduct.mk R FR FT 1) := inferInstance let _ : Algebra FT (FR ⊗[R] FT) := Algebra.TensorProduct.rightAlgebra let _ := isLocalizedModule_iff_isLocalization.mp this |>.atUnits _ _ ?_ |>.symm.isField (Field.toIsField FT) |>.toField on_goal 2 => rintro _ ⟨_, mem, rfl⟩; exact (map_ne_zero_of_mem_nonZeroDivisors _ inj mem).isUnit have := bc.comp_iff.2 ((isLocalizedModule_iff_isBaseChange T⁰ FT g).1 inferInstance) rw [← lift_inj.{_, max uT uP}, lift_lift, lift_lift, ← lift_lift.{max uT uP, uM}, ← IsLocalizedModule.lift_rank_eq T⁰ g le_rfl, lift_lift, ← lift_lift.{uM}, ← IsLocalization.rank_eq FT T⁰ le_rfl, lift_rank_eq_of_le_nonZeroDivisors FR (LocalizedModule.mkLinearMap R⁰ M) le_rfl (map_le_nonZeroDivisors_of_injective _ inj le_rfl) this, lift_lift] theorem finrank_eq : finrank T P = finrank R M := by simpa using congr_arg toNat bc.lift_rank_eq omit bc theorem rank_eq {P : Type uM} [AddCommGroup P] [Module R P] [Module T P] [IsScalarTower R T P] {g : M →ₗ[R] P} (bc : IsBaseChange T g) : Module.rank T P = Module.rank R M := by simpa using bc.lift_rank_eq end IsBaseChange end CommRing section Ring variable {R} [Ring R] [IsDomain R] /-- A domain that is not (left) Ore is of infinite rank. See [cohn_1995] Proposition 1.3.6 -/ lemma aleph0_le_rank_of_isEmpty_oreSet (hS : IsEmpty (OreLocalization.OreSet R⁰)) : ℵ₀ ≤ Module.rank R R := by classical rw [← not_nonempty_iff, OreLocalization.nonempty_oreSet_iff_of_noZeroDivisors] at hS push_neg at hS obtain ⟨r, s, h⟩ := hS refine Cardinal.aleph0_le.mpr fun n ↦ ?_ suffices LinearIndependent R (fun (i : Fin n) ↦ r * s ^ (i : ℕ)) by simpa using this.cardinal_lift_le_rank suffices ∀ (g : ℕ → R) (x), (∑ i ∈ Finset.range n, g i • (r * s ^ (i + x))) = 0 → ∀ i < n, g i = 0 by refine Fintype.linearIndependent_iff.mpr fun g hg i ↦ ?_ simpa only [dif_pos i.prop] using this (fun i ↦ if h : i < n then g ⟨i, h⟩ else 0) 0 (by simp [← Fin.sum_univ_eq_sum_range, ← hg]) i i.prop intro g x hg i hin induction n generalizing g x i with | zero => exact (hin.not_ge (zero_le i)).elim | succ n IH => rw [Finset.sum_range_succ'] at hg by_cases hg0 : g 0 = 0 · simp only [hg0, zero_smul, add_zero, add_assoc] at hg cases i; exacts [hg0, IH _ _ hg _ (Nat.succ_lt_succ_iff.mp hin)] simp only [zero_add, pow_add _ _ x, ← mul_assoc, pow_succ, ← Finset.sum_mul, smul_eq_mul] at hg rw [← neg_eq_iff_add_eq_zero, ← neg_mul, ← neg_mul] at hg have := mul_right_cancel₀ (mem_nonZeroDivisors_iff_ne_zero.mp (s ^ x).prop) hg exact (h _ ⟨(g 0), mem_nonZeroDivisors_iff_ne_zero.mpr (by simpa)⟩ this.symm).elim -- TODO: Upgrade this to an iff. See [lam_1999] Exercise 10.21 lemma nonempty_oreSet_of_strongRankCondition [StrongRankCondition R] : Nonempty (OreLocalization.OreSet R⁰) := by by_contra! h have := aleph0_le_rank_of_isEmpty_oreSet h rw [rank_self] at this exact this.not_gt one_lt_aleph0 end Ring
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/LinearMap.lean
import Mathlib.Algebra.Module.Projective import Mathlib.LinearAlgebra.Dimension.DivisionRing import Mathlib.LinearAlgebra.Dimension.FreeAndStrongRankCondition /-! # The rank of a linear map ## Main Definition - `LinearMap.rank`: The rank of a linear map. -/ noncomputable section universe u v v' v'' variable {K : Type u} {V V₁ : Type v} {V' V'₁ : Type v'} {V'' : Type v''} open Cardinal Basis Submodule Function Set namespace LinearMap section Ring variable [Ring K] [AddCommGroup V] [Module K V] [AddCommGroup V₁] [Module K V₁] variable [AddCommGroup V'] [Module K V'] /-- `rank f` is the rank of a `LinearMap` `f`, defined as the dimension of `f.range`. -/ abbrev rank (f : V →ₗ[K] V') : Cardinal := Module.rank K (LinearMap.range f) theorem rank_le_range (f : V →ₗ[K] V') : rank f ≤ Module.rank K V' := Submodule.rank_le _ theorem rank_le_domain (f : V →ₗ[K] V₁) : rank f ≤ Module.rank K V := rank_range_le _ @[simp] theorem rank_zero [Nontrivial K] : rank (0 : V →ₗ[K] V') = 0 := by rw [rank, LinearMap.range_zero, rank_bot] variable [AddCommGroup V''] [Module K V''] theorem rank_comp_le_left (g : V →ₗ[K] V') (f : V' →ₗ[K] V'') : rank (f.comp g) ≤ rank f := by refine Submodule.rank_mono ?_ rw [LinearMap.range_comp] exact LinearMap.map_le_range theorem lift_rank_comp_le_right (g : V →ₗ[K] V') (f : V' →ₗ[K] V'') : Cardinal.lift.{v'} (rank (f.comp g)) ≤ Cardinal.lift.{v''} (rank g) := by rw [rank, rank, LinearMap.range_comp]; exact lift_rank_map_le _ _ /-- The rank of the composition of two maps is less than the minimum of their ranks. -/ theorem lift_rank_comp_le (g : V →ₗ[K] V') (f : V' →ₗ[K] V'') : Cardinal.lift.{v'} (rank (f.comp g)) ≤ min (Cardinal.lift.{v'} (rank f)) (Cardinal.lift.{v''} (rank g)) := le_min (Cardinal.lift_le.mpr <| rank_comp_le_left _ _) (lift_rank_comp_le_right _ _) variable [AddCommGroup V'₁] [Module K V'₁] theorem rank_comp_le_right (g : V →ₗ[K] V') (f : V' →ₗ[K] V'₁) : rank (f.comp g) ≤ rank g := by simpa only [Cardinal.lift_id] using lift_rank_comp_le_right g f /-- The rank of the composition of two maps is less than the minimum of their ranks. See `lift_rank_comp_le` for the universe-polymorphic version. -/ theorem rank_comp_le (g : V →ₗ[K] V') (f : V' →ₗ[K] V'₁) : rank (f.comp g) ≤ min (rank f) (rank g) := by simpa only [Cardinal.lift_id] using lift_rank_comp_le g f end Ring section DivisionRing variable [DivisionRing K] [AddCommGroup V] [Module K V] [AddCommGroup V₁] [Module K V₁] variable [AddCommGroup V'] [Module K V'] theorem rank_add_le (f g : V →ₗ[K] V') : rank (f + g) ≤ rank f + rank g := calc rank (f + g) ≤ Module.rank K (LinearMap.range f ⊔ LinearMap.range g : Submodule K V') := by refine Submodule.rank_mono ?_ exact LinearMap.range_le_iff_comap.2 <| eq_top_iff'.2 fun x => show f x + g x ∈ (LinearMap.range f ⊔ LinearMap.range g : Submodule K V') from mem_sup.2 ⟨_, ⟨x, rfl⟩, _, ⟨x, rfl⟩, rfl⟩ _ ≤ rank f + rank g := Submodule.rank_add_le_rank_add_rank _ _ theorem rank_finset_sum_le {η} (s : Finset η) (f : η → V →ₗ[K] V') : rank (∑ d ∈ s, f d) ≤ ∑ d ∈ s, rank (f d) := @Finset.sum_hom_rel _ _ _ _ _ (fun a b => rank a ≤ b) f (fun d => rank (f d)) s (le_of_eq rank_zero) fun _ _ _ h => le_trans (rank_add_le _ _) (by gcongr) theorem le_rank_iff_exists_linearIndependent {c : Cardinal} {f : V →ₗ[K] V'} : c ≤ rank f ↔ ∃ s : Set V, Cardinal.lift.{v'} #s = Cardinal.lift.{v} c ∧ LinearIndepOn K f s := by rcases f.rangeRestrict.exists_rightInverse_of_surjective f.range_rangeRestrict with ⟨g, hg⟩ have fg : LeftInverse f.rangeRestrict g := LinearMap.congr_fun hg refine ⟨fun h => ?_, ?_⟩ · rcases _root_.le_rank_iff_exists_linearIndependent.1 h with ⟨s, rfl, si⟩ refine ⟨g '' s, Cardinal.mk_image_eq_lift _ _ fg.injective, ?_⟩ replace fg : ∀ x, f (g x) = x := by intro x convert congr_arg Subtype.val (fg x) replace si : LinearIndepOn K (fun x => f (g x)) s := by simpa only [fg] using si.map' _ (ker_subtype _) exact si.image_of_comp · rintro ⟨s, hsc, si⟩ have : LinearIndepOn K f.rangeRestrict s := LinearIndependent.of_comp (LinearMap.range f).subtype (by convert si) convert this.id_image.cardinal_le_rank rw [← Cardinal.lift_inj, ← hsc, Cardinal.mk_image_eq_of_injOn_lift] exact injOn_iff_injective.2 this.injective theorem le_rank_iff_exists_linearIndependent_finset {n : ℕ} {f : V →ₗ[K] V'} : ↑n ≤ rank f ↔ ∃ s : Finset V, s.card = n ∧ LinearIndependent K fun x : (s : Set V) => f x := by simp only [le_rank_iff_exists_linearIndependent, Cardinal.lift_natCast, Cardinal.lift_eq_nat_iff, Cardinal.mk_set_eq_nat_iff_finset] constructor · rintro ⟨s, ⟨t, rfl, rfl⟩, si⟩ exact ⟨t, rfl, si⟩ · rintro ⟨s, rfl, si⟩ exact ⟨s, ⟨s, rfl, rfl⟩, si⟩ end DivisionRing end LinearMap
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/RankNullity.lean
import Mathlib.LinearAlgebra.Dimension.Constructions import Mathlib.LinearAlgebra.Dimension.Finite import Mathlib.LinearAlgebra.Isomorphisms import Mathlib.Logic.Equiv.Fin.Rotate /-! # The rank nullity theorem In this file we provide the rank nullity theorem as a typeclass, and prove various corollaries of the theorem. The main definition is `HasRankNullity.{u} R`, which states that 1. Every `R`-module `M : Type u` has a linear independent subset of cardinality `Module.rank R M`. 2. `rank (M ⧸ N) + rank N = rank M` for every `R`-module `M : Type u` and every `N : Submodule R M`. The following instances are provided in mathlib: 1. `DivisionRing.hasRankNullity` for division rings in `LinearAlgebra/Dimension/DivisionRing.lean`. 2. `IsDomain.hasRankNullity` for commutative domains in `LinearAlgebra/Dimension/Localization.lean`. TODO: prove the rank-nullity theorem for `[Ring R] [IsDomain R] [StrongRankCondition R]`. See `nonempty_oreSet_of_strongRankCondition` for a start. -/ universe u v open Function Set Cardinal Submodule LinearMap variable {R} {M M₁ M₂ M₃ : Type u} {M' : Type v} [Ring R] variable [AddCommGroup M] [AddCommGroup M₁] [AddCommGroup M₂] [AddCommGroup M₃] [AddCommGroup M'] variable [Module R M] [Module R M₁] [Module R M₂] [Module R M₃] [Module R M'] /-- `HasRankNullity.{u}` is a class of rings satisfying 1. Every `R`-module `M : Type u` has a linear independent subset of cardinality `Module.rank R M`. 2. `rank (M ⧸ N) + rank N = rank M` for every `R`-module `M : Type u` and every `N : Submodule R M`. Usually such a ring satisfies `HasRankNullity.{w}` for all universes `w`, and the universe argument is there because of technical limitations to universe polymorphism. See `DivisionRing.hasRankNullity` and `IsDomain.hasRankNullity`. -/ @[pp_with_univ] class HasRankNullity (R : Type v) [inst : Ring R] : Prop where exists_set_linearIndependent : ∀ (M : Type u) [AddCommGroup M] [Module R M], ∃ s : Set M, #s = Module.rank R M ∧ LinearIndepOn R id s rank_quotient_add_rank : ∀ {M : Type u} [AddCommGroup M] [Module R M] (N : Submodule R M), Module.rank R (M ⧸ N) + Module.rank R N = Module.rank R M variable [HasRankNullity.{u} R] lemma Submodule.rank_quotient_add_rank (N : Submodule R M) : Module.rank R (M ⧸ N) + Module.rank R N = Module.rank R M := HasRankNullity.rank_quotient_add_rank N variable (R M) in lemma exists_set_linearIndependent : ∃ s : Set M, #s = Module.rank R M ∧ LinearIndependent (ι := s) R Subtype.val := HasRankNullity.exists_set_linearIndependent M variable (R) in theorem nontrivial_of_hasRankNullity : Nontrivial R := by refine (subsingleton_or_nontrivial R).resolve_left fun H ↦ ?_ have := rank_quotient_add_rank (R := R) (M := PUnit) ⊥ simp [one_add_one_eq_two] at this attribute [local instance] nontrivial_of_hasRankNullity theorem LinearMap.lift_rank_range_add_rank_ker (f : M →ₗ[R] M') : lift.{u} (Module.rank R (LinearMap.range f)) + lift.{v} (Module.rank R (LinearMap.ker f)) = lift.{v} (Module.rank R M) := by haveI := fun p : Submodule R M => Classical.decEq (M ⧸ p) rw [← f.quotKerEquivRange.lift_rank_eq, ← lift_add, rank_quotient_add_rank] /-- The **rank-nullity theorem** -/ theorem LinearMap.rank_range_add_rank_ker (f : M →ₗ[R] M₁) : Module.rank R (LinearMap.range f) + Module.rank R (LinearMap.ker f) = Module.rank R M := by haveI := fun p : Submodule R M => Classical.decEq (M ⧸ p) rw [← f.quotKerEquivRange.rank_eq, rank_quotient_add_rank] theorem LinearMap.lift_rank_eq_of_surjective {f : M →ₗ[R] M'} (h : Surjective f) : lift.{v} (Module.rank R M) = lift.{u} (Module.rank R M') + lift.{v} (Module.rank R (LinearMap.ker f)) := by rw [← lift_rank_range_add_rank_ker f, ← rank_range_of_surjective f h] theorem LinearMap.rank_eq_of_surjective {f : M →ₗ[R] M₁} (h : Surjective f) : Module.rank R M = Module.rank R M₁ + Module.rank R (LinearMap.ker f) := by rw [← rank_range_add_rank_ker f, ← rank_range_of_surjective f h] theorem exists_linearIndepOn_of_lt_rank [StrongRankCondition R] {s : Set M} (hs : LinearIndepOn R id s) : ∃ t, s ⊆ t ∧ #t = Module.rank R M ∧ LinearIndepOn R id t := by obtain ⟨t, ht, ht'⟩ := exists_set_linearIndependent R (M ⧸ Submodule.span R s) choose sec hsec using Submodule.mkQ_surjective (Submodule.span R s) have hsec' : (Submodule.mkQ _) ∘ sec = _root_.id := funext hsec have hst : Disjoint s (sec '' t) := by rw [Set.disjoint_iff] rintro _ ⟨hxs, ⟨x, hxt, rfl⟩⟩ apply ht'.ne_zero ⟨x, hxt⟩ rw [Subtype.coe_mk, ← hsec x,mkQ_apply, Quotient.mk_eq_zero] exact Submodule.subset_span hxs refine ⟨s ∪ sec '' t, subset_union_left, ?_, ?_⟩ · rw [Cardinal.mk_union_of_disjoint hst, Cardinal.mk_image_eq, ht, ← rank_quotient_add_rank (Submodule.span R s), add_comm, rank_span_set hs] exact HasLeftInverse.injective ⟨Submodule.Quotient.mk, hsec⟩ · apply LinearIndepOn.union_id_of_quotient Submodule.subset_span hs rwa [linearIndepOn_iff_image (hsec'.symm ▸ injective_id).injOn.image_of_comp, ← image_comp, hsec', image_id] /-- Given a family of `n` linearly independent vectors in a space of dimension `> n`, one may extend the family by another vector while retaining linear independence. -/ theorem exists_linearIndependent_cons_of_lt_rank [StrongRankCondition R] {n : ℕ} {v : Fin n → M} (hv : LinearIndependent R v) (h : n < Module.rank R M) : ∃ (x : M), LinearIndependent R (Fin.cons x v) := by obtain ⟨t, h₁, h₂, h₃⟩ := exists_linearIndepOn_of_lt_rank hv.linearIndepOn_id have : range v ≠ t := by refine fun e ↦ h.ne ?_ rw [← e, ← lift_injective.eq_iff, mk_range_eq_of_injective hv.injective] at h₂ simpa only [mk_fintype, Fintype.card_fin, lift_natCast, lift_id'] using h₂ obtain ⟨x, hx, hx'⟩ := nonempty_of_ssubset (h₁.ssubset_of_ne this) exact ⟨x, (linearIndepOn_id_range_iff (Fin.cons_injective_iff.mpr ⟨hx', hv.injective⟩)).mp (h₃.mono (Fin.range_cons x v ▸ insert_subset hx h₁))⟩ /-- Given a family of `n` linearly independent vectors in a space of dimension `> n`, one may extend the family by another vector while retaining linear independence. -/ theorem exists_linearIndependent_snoc_of_lt_rank [StrongRankCondition R] {n : ℕ} {v : Fin n → M} (hv : LinearIndependent R v) (h : n < Module.rank R M) : ∃ (x : M), LinearIndependent R (Fin.snoc v x) := by simp only [Fin.snoc_eq_cons_rotate] have ⟨x, hx⟩ := exists_linearIndependent_cons_of_lt_rank hv h exact ⟨x, hx.comp _ (finRotate _).injective⟩ /-- Given a nonzero vector in a space of dimension `> 1`, one may find another vector linearly independent of the first one. -/ theorem exists_linearIndependent_pair_of_one_lt_rank [StrongRankCondition R] [NoZeroSMulDivisors R M] (h : 1 < Module.rank R M) {x : M} (hx : x ≠ 0) : ∃ y, LinearIndependent R ![x, y] := by obtain ⟨y, hy⟩ := exists_linearIndependent_snoc_of_lt_rank (.of_subsingleton (v := ![x]) 0 hx) h have : Fin.snoc ![x] y = ![x, y] := by simp [Fin.snoc, ← List.ofFn_inj] rw [this] at hy exact ⟨y, hy⟩ theorem Submodule.exists_smul_notMem_of_rank_lt {N : Submodule R M} (h : Module.rank R N < Module.rank R M) : ∃ m : M, ∀ r : R, r ≠ 0 → r • m ∉ N := by have : Module.rank R (M ⧸ N) ≠ 0 := by intro e rw [← rank_quotient_add_rank N, e, zero_add] at h exact h.ne rfl rw [ne_eq, rank_eq_zero_iff, (Submodule.Quotient.mk_surjective N).forall] at this push_neg at this simp_rw [← N.mkQ_apply, ← map_smul, N.mkQ_apply, ne_eq, Submodule.Quotient.mk_eq_zero] at this exact this @[deprecated (since := "2025-05-23")] alias Submodule.exists_smul_not_mem_of_rank_lt := Submodule.exists_smul_notMem_of_rank_lt open Cardinal Basis Submodule Function Set LinearMap theorem Submodule.rank_sup_add_rank_inf_eq (s t : Submodule R M) : Module.rank R (s ⊔ t : Submodule R M) + Module.rank R (s ⊓ t : Submodule R M) = Module.rank R s + Module.rank R t := by conv_rhs => enter [2]; rw [show t = (s ⊔ t) ⊓ t by simp] rw [← rank_quotient_add_rank ((s ⊓ t).comap s.subtype), ← rank_quotient_add_rank (t.comap (s ⊔ t).subtype), comap_inf, (quotientInfEquivSupQuotient s t).rank_eq, ← comap_inf, (equivSubtypeMap s (comap _ (s ⊓ t))).rank_eq, Submodule.map_comap_subtype, (equivSubtypeMap (s ⊔ t) (comap _ t)).rank_eq, Submodule.map_comap_subtype, ← inf_assoc, inf_idem, add_right_comm] theorem Submodule.rank_add_le_rank_add_rank (s t : Submodule R M) : Module.rank R (s ⊔ t : Submodule R M) ≤ Module.rank R s + Module.rank R t := by rw [← Submodule.rank_sup_add_rank_inf_eq] exact self_le_add_right _ _ section Finrank open Submodule Module variable [StrongRankCondition R] /-- Given a family of `n` linearly independent vectors in a finite-dimensional space of dimension `> n`, one may extend the family by another vector while retaining linear independence. -/ theorem exists_linearIndependent_snoc_of_lt_finrank {n : ℕ} {v : Fin n → M} (hv : LinearIndependent R v) (h : n < finrank R M) : ∃ (x : M), LinearIndependent R (Fin.snoc v x) := exists_linearIndependent_snoc_of_lt_rank hv (lt_rank_of_lt_finrank h) /-- Given a family of `n` linearly independent vectors in a finite-dimensional space of dimension `> n`, one may extend the family by another vector while retaining linear independence. -/ theorem exists_linearIndependent_cons_of_lt_finrank {n : ℕ} {v : Fin n → M} (hv : LinearIndependent R v) (h : n < finrank R M) : ∃ (x : M), LinearIndependent R (Fin.cons x v) := exists_linearIndependent_cons_of_lt_rank hv (lt_rank_of_lt_finrank h) /-- Given a nonzero vector in a finite-dimensional space of dimension `> 1`, one may find another vector linearly independent of the first one. -/ theorem exists_linearIndependent_pair_of_one_lt_finrank [NoZeroSMulDivisors R M] (h : 1 < finrank R M) {x : M} (hx : x ≠ 0) : ∃ y, LinearIndependent R ![x, y] := exists_linearIndependent_pair_of_one_lt_rank (one_lt_rank_of_one_lt_finrank h) hx /-- Rank-nullity theorem using `finrank`. -/ lemma Submodule.finrank_quotient_add_finrank [Module.Finite R M] (N : Submodule R M) : finrank R (M ⧸ N) + finrank R N = finrank R M := by rw [← Nat.cast_inj (R := Cardinal), Module.finrank_eq_rank, Nat.cast_add, Module.finrank_eq_rank, Submodule.finrank_eq_rank] exact HasRankNullity.rank_quotient_add_rank _ /-- Rank-nullity theorem using `finrank` and subtraction. -/ lemma Submodule.finrank_quotient [Module.Finite R M] {S : Type*} [Ring S] [SMul R S] [Module S M] [IsScalarTower R S M] (N : Submodule S M) : finrank R (M ⧸ N) = finrank R M - finrank R N := by rw [← (N.restrictScalars R).finrank_quotient_add_finrank] exact Nat.eq_sub_of_add_eq rfl lemma Submodule.disjoint_ker_of_finrank_le [NoZeroSMulDivisors R M] {N : Type*} [AddCommGroup N] [Module R N] {L : Submodule R M} [Module.Finite R L] (f : M →ₗ[R] N) (h : finrank R L ≤ finrank R (L.map f)) : Disjoint L (LinearMap.ker f) := by refine disjoint_iff.mpr <| LinearMap.injective_domRestrict_iff.mp <| LinearMap.ker_eq_bot.mp <| Submodule.rank_eq_zero.mp ?_ rw [← Submodule.finrank_eq_rank, Nat.cast_eq_zero] rw [← LinearMap.range_domRestrict] at h have := (LinearMap.ker (f.domRestrict L)).finrank_quotient_add_finrank rw [LinearEquiv.finrank_eq (f.domRestrict L).quotKerEquivRange] at this omega end Finrank section open Submodule Module variable [StrongRankCondition R] [Module.Finite R M] lemma Submodule.exists_of_finrank_lt (N : Submodule R M) (h : finrank R N < finrank R M) : ∃ m : M, ∀ r : R, r ≠ 0 → r • m ∉ N := by obtain ⟨s, hs, hs'⟩ := exists_finset_linearIndependent_of_le_finrank (R := R) (M := M ⧸ N) le_rfl obtain ⟨v, hv⟩ : s.Nonempty := by rwa [Finset.nonempty_iff_ne_empty, ne_eq, ← Finset.card_eq_zero, hs, finrank_quotient, tsub_eq_zero_iff_le, not_le] obtain ⟨v, rfl⟩ := N.mkQ_surjective v refine ⟨v, fun r hr ↦ mt ?_ hr⟩ have := linearIndependent_iff.mp hs' (Finsupp.single ⟨_, hv⟩ r) rwa [Finsupp.linearCombination_single, Finsupp.single_eq_zero, ← LinearMap.map_smul, Submodule.mkQ_apply, Submodule.Quotient.mk_eq_zero] at this end
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Subsingleton.lean
import Mathlib.LinearAlgebra.Dimension.Basic /-! # Dimension of trivial modules -/ variable (R M : Type*) [Semiring R] [AddCommMonoid M] [Module R M] section variable [Nontrivial R] /-- See `rank_subsingleton` that assumes `Subsingleton R` instead. -/ @[simp, nontriviality] theorem rank_subsingleton' [Subsingleton M] : Module.rank R M = 0 := by rw [Module.rank, ← bot_eq_zero, eq_bot_iff] exact ciSup_le fun s ↦ by simp [(linearIndependent_subsingleton_iff _).mp s.2] theorem rank_punit : Module.rank R PUnit = 0 := rank_subsingleton' _ _ theorem rank_bot : Module.rank R (⊥ : Submodule R M) = 0 := rank_subsingleton' _ _ end @[nontriviality, simp] theorem rank_subsingleton [Subsingleton R] : Module.rank R M = 1 := by haveI := Module.subsingleton R M have : Nonempty { s : Set M // LinearIndepOn R id s} := ⟨⟨∅, linearIndepOn_empty _ _⟩⟩ rw [Module.rank_def, ciSup_eq_of_forall_le_of_forall_lt_exists_gt] · rintro ⟨s, hs⟩ rw [Cardinal.mk_le_one_iff_set_subsingleton] apply Set.subsingleton_of_subsingleton intro w hw exact ⟨⟨{0}, LinearIndepOn.of_subsingleton⟩, hw.trans_eq (Cardinal.mk_singleton _).symm⟩
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/Free.lean
import Mathlib.LinearAlgebra.Dimension.StrongRankCondition import Mathlib.LinearAlgebra.FreeModule.Finite.Basic import Mathlib.RingTheory.AlgebraTower import Mathlib.SetTheory.Cardinal.Finsupp /-! # Rank of free modules ## Main result - `LinearEquiv.nonempty_equiv_iff_lift_rank_eq`: Two free modules are isomorphic iff they have the same dimension. - `Module.finBasis`: An arbitrary basis of a finite free module indexed by `Fin n` given `finrank R M = n`. -/ noncomputable section universe u v v' w open Cardinal Basis Submodule Function Set Module section Tower variable (F : Type u) (K : Type v) (A : Type w) variable [Semiring F] [Semiring K] [AddCommMonoid A] variable [Module F K] [Module K A] [Module F A] [IsScalarTower F K A] variable [StrongRankCondition F] [StrongRankCondition K] [Module.Free F K] [Module.Free K A] /-- Tower law: if `A` is a `K`-module and `K` is an extension of `F` then $\operatorname{rank}_F(A) = \operatorname{rank}_F(K) * \operatorname{rank}_K(A)$. The universe polymorphic version of `rank_mul_rank` below. -/ theorem lift_rank_mul_lift_rank : Cardinal.lift.{w} (Module.rank F K) * Cardinal.lift.{v} (Module.rank K A) = Cardinal.lift.{v} (Module.rank F A) := by let b := Module.Free.chooseBasis F K let c := Module.Free.chooseBasis K A rw [← (Module.rank F K).lift_id, ← b.mk_eq_rank, ← (Module.rank K A).lift_id, ← c.mk_eq_rank, ← lift_umax.{w, v}, ← (b.smulTower c).mk_eq_rank, mk_prod, lift_mul, lift_lift, lift_lift, lift_lift, lift_lift, lift_umax.{v, w}] /-- Tower law: if `A` is a `K`-module and `K` is an extension of `F` then $\operatorname{rank}_F(A) = \operatorname{rank}_F(K) * \operatorname{rank}_K(A)$. This is a simpler version of `lift_rank_mul_lift_rank` with `K` and `A` in the same universe. -/ @[stacks 09G9] theorem rank_mul_rank (A : Type v) [AddCommMonoid A] [Module K A] [Module F A] [IsScalarTower F K A] [Module.Free K A] : Module.rank F K * Module.rank K A = Module.rank F A := by convert lift_rank_mul_lift_rank F K A <;> rw [lift_id] /-- Tower law: if `A` is a `K`-module and `K` is an extension of `F` then $\operatorname{rank}_F(A) = \operatorname{rank}_F(K) * \operatorname{rank}_K(A)$. -/ theorem Module.finrank_mul_finrank : finrank F K * finrank K A = finrank F A := by simp_rw [finrank] rw [← toNat_lift.{w} (Module.rank F K), ← toNat_lift.{v} (Module.rank K A), ← toNat_mul, lift_rank_mul_lift_rank, toNat_lift] end Tower variable {R : Type u} {S : Type*} {M M₁ : Type v} {M' : Type v'} variable [Semiring R] [StrongRankCondition R] variable [AddCommMonoid M] [Module R M] [Module.Free R M] variable [AddCommMonoid M'] [Module R M'] [Module.Free R M'] variable [AddCommMonoid M₁] [Module R M₁] [Module.Free R M₁] namespace Module.Free variable (R M) /-- The rank of a free module `M` over `R` is the cardinality of `ChooseBasisIndex R M`. -/ theorem rank_eq_card_chooseBasisIndex : Module.rank R M = #(ChooseBasisIndex R M) := (chooseBasis R M).mk_eq_rank''.symm /-- The `finrank` of a free module `M` over `R` is the cardinality of `ChooseBasisIndex R M`. -/ theorem _root_.Module.finrank_eq_card_chooseBasisIndex [Module.Finite R M] : finrank R M = Fintype.card (ChooseBasisIndex R M) := by simp [finrank, rank_eq_card_chooseBasisIndex] /-- The rank of a free module `M` over an infinite scalar ring `R` is the cardinality of `M` whenever `#R < #M`. -/ lemma rank_eq_mk_of_infinite_lt [Infinite R] (h_lt : lift.{v} #R < lift.{u} #M) : Module.rank R M = #M := by have : Infinite M := infinite_iff.mpr <| lift_le.mp <| le_trans (by simp) h_lt.le have h : lift #M = lift #(ChooseBasisIndex R M →₀ R) := lift_mk_eq'.mpr ⟨(chooseBasis R M).repr⟩ simp only [mk_finsupp_lift_of_infinite', ← rank_eq_card_chooseBasisIndex, lift_max, lift_lift] at h refine lift_inj.mp ((max_eq_iff.mp h.symm).resolve_right <| not_and_of_not_left _ ?_).left exact (lift_umax.{v, u}.symm ▸ h_lt).ne end Module.Free open Module.Free open Cardinal /-- Two vector spaces are isomorphic if they have the same dimension. -/ theorem nonempty_linearEquiv_of_lift_rank_eq (cnd : Cardinal.lift.{v'} (Module.rank R M) = Cardinal.lift.{v} (Module.rank R M')) : Nonempty (M ≃ₗ[R] M') := by obtain ⟨⟨α, B⟩⟩ := Module.Free.exists_basis (R := R) (M := M) obtain ⟨⟨β, B'⟩⟩ := Module.Free.exists_basis (R := R) (M := M') have : Cardinal.lift.{v', v} #α = Cardinal.lift.{v, v'} #β := by rw [B.mk_eq_rank'', cnd, B'.mk_eq_rank''] exact (Cardinal.lift_mk_eq.{v, v', 0}.1 this).map (B.equiv B') /-- Two vector spaces are isomorphic if they have the same dimension. -/ theorem nonempty_linearEquiv_of_rank_eq (cond : Module.rank R M = Module.rank R M₁) : Nonempty (M ≃ₗ[R] M₁) := nonempty_linearEquiv_of_lift_rank_eq <| congr_arg _ cond section variable (M M' M₁) /-- Two vector spaces are isomorphic if they have the same dimension. -/ def LinearEquiv.ofLiftRankEq (cond : Cardinal.lift.{v'} (Module.rank R M) = Cardinal.lift.{v} (Module.rank R M')) : M ≃ₗ[R] M' := Classical.choice (nonempty_linearEquiv_of_lift_rank_eq cond) /-- Two vector spaces are isomorphic if they have the same dimension. -/ def LinearEquiv.ofRankEq (cond : Module.rank R M = Module.rank R M₁) : M ≃ₗ[R] M₁ := Classical.choice (nonempty_linearEquiv_of_rank_eq cond) end /-- Two vector spaces are isomorphic if and only if they have the same dimension. -/ theorem LinearEquiv.nonempty_equiv_iff_lift_rank_eq : Nonempty (M ≃ₗ[R] M') ↔ Cardinal.lift.{v'} (Module.rank R M) = Cardinal.lift.{v} (Module.rank R M') := ⟨fun ⟨h⟩ => LinearEquiv.lift_rank_eq h, fun h => nonempty_linearEquiv_of_lift_rank_eq h⟩ /-- Two vector spaces are isomorphic if and only if they have the same dimension. -/ theorem LinearEquiv.nonempty_equiv_iff_rank_eq : Nonempty (M ≃ₗ[R] M₁) ↔ Module.rank R M = Module.rank R M₁ := ⟨fun ⟨h⟩ => LinearEquiv.rank_eq h, fun h => nonempty_linearEquiv_of_rank_eq h⟩ /-- Two finite and free modules are isomorphic if they have the same (finite) rank. -/ theorem FiniteDimensional.nonempty_linearEquiv_of_finrank_eq [Module.Finite R M] [Module.Finite R M'] (cond : finrank R M = finrank R M') : Nonempty (M ≃ₗ[R] M') := nonempty_linearEquiv_of_lift_rank_eq <| by simp only [← finrank_eq_rank, cond, lift_natCast] /-- Two finite and free modules are isomorphic if and only if they have the same (finite) rank. -/ theorem FiniteDimensional.nonempty_linearEquiv_iff_finrank_eq [Module.Finite R M] [Module.Finite R M'] : Nonempty (M ≃ₗ[R] M') ↔ finrank R M = finrank R M' := ⟨fun ⟨h⟩ => h.finrank_eq, fun h => nonempty_linearEquiv_of_finrank_eq h⟩ variable (M M') /-- Two finite and free modules are isomorphic if they have the same (finite) rank. -/ noncomputable def LinearEquiv.ofFinrankEq [Module.Finite R M] [Module.Finite R M'] (cond : finrank R M = finrank R M') : M ≃ₗ[R] M' := Classical.choice <| FiniteDimensional.nonempty_linearEquiv_of_finrank_eq cond variable {M M'} namespace Module /-- A free module of rank zero is trivial. -/ lemma subsingleton_of_rank_zero (h : Module.rank R M = 0) : Subsingleton M := by rw [← Basis.mk_eq_rank'' (Module.Free.chooseBasis R M), Cardinal.mk_eq_zero_iff] at h exact (Module.Free.chooseBasis R M).repr.subsingleton /-- See `rank_lt_aleph0` for the inverse direction without `Module.Free R M`. -/ lemma rank_lt_aleph0_iff : Module.rank R M < ℵ₀ ↔ Module.Finite R M := by rw [Free.rank_eq_card_chooseBasisIndex, mk_lt_aleph0_iff] exact ⟨fun h ↦ Finite.of_basis (Free.chooseBasis R M), fun I ↦ Finite.of_fintype (Free.ChooseBasisIndex R M)⟩ theorem finrank_of_not_finite (h : ¬Module.Finite R M) : finrank R M = 0 := by rw [finrank, toNat_eq_zero, ← not_lt, Module.rank_lt_aleph0_iff] exact .inr h theorem finite_of_finrank_pos (h : 0 < finrank R M) : Module.Finite R M := by contrapose h simp [finrank_of_not_finite h] theorem finite_of_finrank_eq_succ {n : ℕ} (hn : finrank R M = n.succ) : Module.Finite R M := finite_of_finrank_pos <| by rw [hn]; exact n.succ_pos theorem finite_iff_of_rank_eq_nsmul {W} [AddCommMonoid W] [Module R W] [Module.Free R W] {n : ℕ} (hn : n ≠ 0) (hVW : Module.rank R M = n • Module.rank R W) : Module.Finite R M ↔ Module.Finite R W := by simp only [← rank_lt_aleph0_iff, hVW, nsmul_lt_aleph0_iff_of_ne_zero hn] variable (R S M) in omit [Module.Free R M] in /-- Also see `Module.finrank_top_le_finrank_of_isScalarTower` for a version with different typeclass constraints. -/ lemma finrank_top_le_finrank_of_isScalarTower_of_free [Semiring S] [StrongRankCondition S] [Module S M] [Module R S] [FaithfulSMul R S] [Module.Finite R S] [IsScalarTower R S S] [IsScalarTower R S M] [Module.Free S M] : finrank S M ≤ finrank R M := by by_cases H : Module.Finite S M · have := Module.Finite.trans (R := R) S M exact finrank_top_le_finrank_of_isScalarTower R S M · rw [finrank, Cardinal.toNat_eq_zero.mpr (.inr _)] · exact zero_le _ · rwa [← not_lt, Module.rank_lt_aleph0_iff] variable (R) in /-- Also see `Module.finrank_bot_le_finrank_of_isScalarTower` for a version with different typeclass constraints. -/ lemma finrank_bot_le_finrank_of_isScalarTower_of_free (S T : Type*) [Semiring S] [Semiring T] [Module R T] [Module S T] [Module R S] [IsScalarTower R S T] [IsScalarTower S T T] [FaithfulSMul S T] [Module.Finite S T] [Module.Free R S] : finrank R S ≤ finrank R T := by by_cases H : Module.Finite R S · have := Module.Finite.trans (R := R) S T exact finrank_bot_le_finrank_of_isScalarTower R S T · rw [finrank, Cardinal.toNat_eq_zero.mpr (.inr _)] · exact zero_le _ · rwa [← not_lt, Module.rank_lt_aleph0_iff] variable (R M) /-- A finite rank free module has a basis indexed by `Fin (finrank R M)`. -/ noncomputable def finBasis [Module.Finite R M] : Basis (Fin (finrank R M)) R M := (Module.Free.chooseBasis R M).reindex (Fintype.equivFinOfCardEq (finrank_eq_card_chooseBasisIndex R M).symm) /-- A rank `n` free module has a basis indexed by `Fin n`. -/ noncomputable def finBasisOfFinrankEq [Module.Finite R M] {n : ℕ} (hn : finrank R M = n) : Basis (Fin n) R M := (finBasis R M).reindex (finCongr hn) variable {R M} /-- A free module with rank 1 has a basis with one element. -/ noncomputable def basisUnique (ι : Type*) [Unique ι] (h : finrank R M = 1) : Basis ι R M := haveI : Module.Finite R M := Module.finite_of_finrank_pos (_root_.zero_lt_one.trans_le h.symm.le) (finBasisOfFinrankEq R M h).reindex (Equiv.ofUnique _ _) /-- If a finite module of `finrank 1` has a basis, then this basis has a unique element. -/ theorem Basis.nonempty_unique_index_of_finrank_eq_one {ι : Type*} (b : Module.Basis ι R M) (d1 : Module.finrank R M = 1) : Nonempty (Unique ι) := by -- why isn't this an instance? have : Nontrivial R := nontrivial_of_invariantBasisNumber R haveI : Module.Finite R M := Module.finite_of_finrank_pos (Nat.lt_of_sub_eq_succ d1) have : Finite ι := Module.Finite.finite_basis b have : Fintype ι := Fintype.ofFinite ι rwa [Module.finrank_eq_card_basis b, Fintype.card_eq_one_iff_nonempty_unique] at d1 theorem nonempty_linearEquiv_of_finrank_eq_one (d1 : Module.finrank R M = 1) : Nonempty (R ≃ₗ[R] M) := by let ⟨ι, b⟩ := (Module.Free.exists_basis R M).some have : Unique ι := (b.nonempty_unique_index_of_finrank_eq_one d1).some exact ⟨((b.equivFun).trans (LinearEquiv.funUnique ι R R)).symm⟩ @[simp] theorem basisUnique_repr_eq_zero_iff {ι : Type*} [Unique ι] {h : finrank R M = 1} {v : M} {i : ι} : (basisUnique ι h).repr v i = 0 ↔ v = 0 := ⟨fun hv => (basisUnique ι h).repr.map_eq_zero_iff.mp (Finsupp.ext fun j => Subsingleton.elim i j ▸ hv), fun hv => by rw [hv, LinearEquiv.map_zero, Finsupp.zero_apply]⟩ variable {R : Type*} [CommSemiring R] [StrongRankCondition R] {M : Type*} [AddCommMonoid M] [Module R M] [Module.Free R M] theorem _root_.LinearMap.existsUnique_eq_smul_id_of_finrank_eq_one (d1 : Module.finrank R M = 1) (u : M →ₗ[R] M) : ∃! c : R, u = c • LinearMap.id := by let e := (nonempty_linearEquiv_of_finrank_eq_one d1).some set c := e.symm (u (e 1)) with hc suffices u = c • LinearMap.id by use c simp only [this, true_and] intro d hcd rw [LinearMap.ext_iff] at hcd simpa using (LinearEquiv.congr_arg (e := e.symm) (hcd (e 1))).symm ext x have (x : M) : x = (e.symm x) • (e 1) := by simp [← LinearEquiv.map_smul] rw [this x] simp only [hc, map_smul, LinearMap.smul_apply, LinearMap.id_coe, id_eq] rw [← this] /-- Endomorphisms of a free module of rank one are homotheties. -/ @[simps apply] noncomputable def _root_.LinearEquiv.smul_id_of_finrank_eq_one (d1 : Module.finrank R M = 1) : R ≃ₗ[R] (M →ₗ[R] M) where toFun := fun c ↦ c • LinearMap.id map_add' c d := by ext; simp [add_smul] map_smul' c d := by ext; simp [mul_smul] invFun u := (u.existsUnique_eq_smul_id_of_finrank_eq_one d1).choose left_inv c := by simp [← (LinearMap.existsUnique_eq_smul_id_of_finrank_eq_one d1 _).choose_spec.2 c] right_inv u := ((u.existsUnique_eq_smul_id_of_finrank_eq_one d1).choose_spec.1).symm end Module namespace Algebra instance (R S : Type*) [CommSemiring R] [StrongRankCondition R] [Semiring S] [Algebra R S] [IsQuadraticExtension R S] : Module.Finite R S := finite_of_finrank_eq_succ <| IsQuadraticExtension.finrank_eq_two R S end Algebra
.lake/packages/mathlib/Mathlib/LinearAlgebra/Dimension/FreeAndStrongRankCondition.lean
import Mathlib.LinearAlgebra.Dimension.Constructions import Mathlib.LinearAlgebra.Dimension.Subsingleton /-! # Some results on free modules over rings satisfying strong rank condition This file contains some results on free modules over rings satisfying strong rank condition. Most of them are generalized from the same result assuming the base ring being division ring, and are moved from the files `Mathlib/LinearAlgebra/Dimension/DivisionRing.lean` and `Mathlib/LinearAlgebra/FiniteDimensional.lean`. -/ open Cardinal Module Module Set Submodule universe u v section Module variable {K : Type u} {V : Type v} [Ring K] [StrongRankCondition K] [AddCommGroup V] [Module K V] /-- The `ι` indexed basis on `V`, where `ι` is an empty type and `V` is zero-dimensional. See also `Module.finBasis`. -/ noncomputable def Basis.ofRankEqZero [Module.Free K V] {ι : Type*} [IsEmpty ι] (hV : Module.rank K V = 0) : Basis ι K V := haveI : Subsingleton V := by obtain ⟨_, b⟩ := Module.Free.exists_basis (R := K) (M := V) haveI := mk_eq_zero_iff.1 (hV ▸ b.mk_eq_rank'') exact b.repr.toEquiv.subsingleton Basis.empty _ @[simp] theorem Basis.ofRankEqZero_apply [Module.Free K V] {ι : Type*} [IsEmpty ι] (hV : Module.rank K V = 0) (i : ι) : Basis.ofRankEqZero hV i = 0 := rfl theorem le_rank_iff_exists_linearIndependent [Module.Free K V] {c : Cardinal} : c ≤ Module.rank K V ↔ ∃ s : Set V, #s = c ∧ LinearIndepOn K id s := by haveI := nontrivial_of_invariantBasisNumber K constructor · intro h obtain ⟨κ, t'⟩ := Module.Free.exists_basis (R := K) (M := V) let t := t'.reindexRange have : LinearIndepOn K id (Set.range t') := by convert t.linearIndependent.linearIndepOn_id ext simp [t] rw [← t.mk_eq_rank'', le_mk_iff_exists_subset] at h rcases h with ⟨s, hst, hsc⟩ exact ⟨s, hsc, this.mono hst⟩ · rintro ⟨s, rfl, si⟩ exact si.cardinal_le_rank theorem le_rank_iff_exists_linearIndependent_finset [Module.Free K V] {n : ℕ} : ↑n ≤ Module.rank K V ↔ ∃ s : Finset V, s.card = n ∧ LinearIndependent K ((↑) : ↥(s : Set V) → V) := by simp only [le_rank_iff_exists_linearIndependent, mk_set_eq_nat_iff_finset] constructor · rintro ⟨s, ⟨t, rfl, rfl⟩, si⟩ exact ⟨t, rfl, si⟩ · rintro ⟨s, rfl, si⟩ exact ⟨s, ⟨s, rfl, rfl⟩, si⟩ /-- A vector space has dimension at most `1` if and only if there is a single vector of which all vectors are multiples. -/ theorem rank_le_one_iff [Module.Free K V] : Module.rank K V ≤ 1 ↔ ∃ v₀ : V, ∀ v, ∃ r : K, r • v₀ = v := by obtain ⟨κ, b⟩ := Module.Free.exists_basis (R := K) (M := V) constructor · intro hd rw [← b.mk_eq_rank'', le_one_iff_subsingleton] at hd rcases isEmpty_or_nonempty κ with hb | ⟨⟨i⟩⟩ · use 0 have h' : ∀ v : V, v = 0 := by simpa [range_eq_empty, Submodule.eq_bot_iff] using b.span_eq.symm intro v simp [h' v] · use b i have h' : (K ∙ b i) = ⊤ := (subsingleton_range b).eq_singleton_of_mem (mem_range_self i) ▸ b.span_eq intro v have hv : v ∈ (⊤ : Submodule K V) := mem_top rwa [← h', mem_span_singleton] at hv · rintro ⟨v₀, hv₀⟩ have h : (K ∙ v₀) = ⊤ := by ext simp [mem_span_singleton, hv₀] rw [← rank_top, ← h] refine (rank_span_le _).trans_eq ?_ simp /-- A vector space has dimension `1` if and only if there is a single non-zero vector of which all vectors are multiples. -/ theorem rank_eq_one_iff [Module.Free K V] : Module.rank K V = 1 ↔ ∃ v₀ : V, v₀ ≠ 0 ∧ ∀ v, ∃ r : K, r • v₀ = v := by haveI := nontrivial_of_invariantBasisNumber K refine ⟨fun h ↦ ?_, fun ⟨v₀, h, hv⟩ ↦ (rank_le_one_iff.2 ⟨v₀, hv⟩).antisymm ?_⟩ · obtain ⟨v₀, hv⟩ := rank_le_one_iff.1 h.le refine ⟨v₀, fun hzero ↦ ?_, hv⟩ simp_rw [hzero, smul_zero, exists_const] at hv haveI : Subsingleton V := .intro fun _ _ ↦ by simp_rw [← hv] exact one_ne_zero (h ▸ rank_subsingleton' K V) · by_contra H rw [not_le, lt_one_iff_zero] at H obtain ⟨κ, b⟩ := Module.Free.exists_basis (R := K) (M := V) haveI := mk_eq_zero_iff.1 (H ▸ b.mk_eq_rank'') haveI := b.repr.toEquiv.subsingleton exact h (Subsingleton.elim _ _) /-- A submodule has dimension at most `1` if and only if there is a single vector in the submodule such that the submodule is contained in its span. -/ theorem rank_submodule_le_one_iff (s : Submodule K V) [Module.Free K s] : Module.rank K s ≤ 1 ↔ ∃ v₀ ∈ s, s ≤ K ∙ v₀ := by simp_rw [rank_le_one_iff, le_span_singleton_iff] simp /-- A submodule has dimension `1` if and only if there is a single non-zero vector in the submodule such that the submodule is contained in its span. -/ theorem rank_submodule_eq_one_iff (s : Submodule K V) [Module.Free K s] : Module.rank K s = 1 ↔ ∃ v₀ ∈ s, v₀ ≠ 0 ∧ s ≤ K ∙ v₀ := by simp_rw [rank_eq_one_iff, le_span_singleton_iff] refine ⟨fun ⟨⟨v₀, hv₀⟩, H, h⟩ ↦ ⟨v₀, hv₀, fun h' ↦ by simp only [h', ne_eq] at H; exact H rfl, fun v hv ↦ ?_⟩, fun ⟨v₀, hv₀, H, h⟩ ↦ ⟨⟨v₀, hv₀⟩, fun h' ↦ H (by rwa [AddSubmonoid.mk_eq_zero] at h'), fun ⟨v, hv⟩ ↦ ?_⟩⟩ · obtain ⟨r, hr⟩ := h ⟨v, hv⟩ exact ⟨r, by rwa [Subtype.ext_iff, coe_smul] at hr⟩ · obtain ⟨r, hr⟩ := h v hv exact ⟨r, by rwa [Subtype.ext_iff, coe_smul]⟩ /-- A submodule has dimension at most `1` if and only if there is a single vector, not necessarily in the submodule, such that the submodule is contained in its span. -/ theorem rank_submodule_le_one_iff' (s : Submodule K V) [Module.Free K s] : Module.rank K s ≤ 1 ↔ ∃ v₀, s ≤ K ∙ v₀ := by haveI := nontrivial_of_invariantBasisNumber K constructor · rw [rank_submodule_le_one_iff] rintro ⟨v₀, _, h⟩ exact ⟨v₀, h⟩ · rintro ⟨v₀, h⟩ obtain ⟨κ, b⟩ := Module.Free.exists_basis (R := K) (M := s) simpa [b.mk_eq_rank''] using b.linearIndependent.map' _ (ker_inclusion _ _ h) |>.cardinal_le_rank.trans (rank_span_le {v₀}) theorem Submodule.rank_le_one_iff_isPrincipal (W : Submodule K V) [Module.Free K W] : Module.rank K W ≤ 1 ↔ W.IsPrincipal := by simp only [rank_le_one_iff, Submodule.isPrincipal_iff, le_antisymm_iff, le_span_singleton_iff, span_singleton_le_iff_mem] constructor · rintro ⟨⟨m, hm⟩, hm'⟩ choose f hf using hm' exact ⟨m, ⟨fun v hv => ⟨f ⟨v, hv⟩, congr_arg ((↑) : W → V) (hf ⟨v, hv⟩)⟩, hm⟩⟩ · rintro ⟨a, ⟨h, ha⟩⟩ choose f hf using h exact ⟨⟨a, ha⟩, fun v => ⟨f v.1 v.2, Subtype.ext (hf v.1 v.2)⟩⟩ theorem Module.rank_le_one_iff_top_isPrincipal [Module.Free K V] : Module.rank K V ≤ 1 ↔ (⊤ : Submodule K V).IsPrincipal := by haveI := Module.Free.of_equiv (topEquiv (R := K) (M := V)).symm rw [← Submodule.rank_le_one_iff_isPrincipal, rank_top] /-- A module has dimension 1 iff there is some `v : V` so `{v}` is a basis. See also `Module.Basis.nonempty_unique_index_of_finrank_eq_one` -/ theorem finrank_eq_one_iff [Module.Free K V] (ι : Type*) [Unique ι] : finrank K V = 1 ↔ Nonempty (Basis ι K V) := by constructor · intro h exact ⟨Module.basisUnique ι h⟩ · rintro ⟨b⟩ simpa using finrank_eq_card_basis b /-- A module has dimension 1 iff there is some nonzero `v : V` so every vector is a multiple of `v`. -/ theorem finrank_eq_one_iff' [Module.Free K V] : finrank K V = 1 ↔ ∃ v ≠ 0, ∀ w : V, ∃ c : K, c • v = w := by rw [← rank_eq_one_iff] exact toNat_eq_iff one_ne_zero /-- A finite-dimensional module has dimension at most 1 iff there is some `v : V` so every vector is a multiple of `v`. -/ theorem finrank_le_one_iff [Module.Free K V] [Module.Finite K V] : finrank K V ≤ 1 ↔ ∃ v : V, ∀ w : V, ∃ c : K, c • v = w := by rw [← rank_le_one_iff, ← finrank_eq_rank, Nat.cast_le_one] theorem Submodule.finrank_le_one_iff_isPrincipal (W : Submodule K V) [Module.Free K W] [Module.Finite K W] : finrank K W ≤ 1 ↔ W.IsPrincipal := by rw [← W.rank_le_one_iff_isPrincipal, ← finrank_eq_rank, Nat.cast_le_one] theorem Module.finrank_le_one_iff_top_isPrincipal [Module.Free K V] [Module.Finite K V] : finrank K V ≤ 1 ↔ (⊤ : Submodule K V).IsPrincipal := by rw [← Module.rank_le_one_iff_top_isPrincipal, ← finrank_eq_rank, Nat.cast_le_one] variable (K V) in theorem lift_cardinalMk_eq_lift_cardinalMk_field_pow_lift_rank [Module.Free K V] [Module.Finite K V] : lift.{u} #V = lift.{v} #K ^ lift.{u} (Module.rank K V) := by haveI := nontrivial_of_invariantBasisNumber K obtain ⟨s, hs⟩ := Module.Free.exists_basis (R := K) (M := V) -- `Module.Finite.finite_basis` is in a much later file, so we copy its proof to here haveI : Finite s := by obtain ⟨t, ht⟩ := ‹Module.Finite K V› exact basis_finite_of_finite_spans t.finite_toSet ht hs have := lift_mk_eq'.2 ⟨hs.repr.toEquiv⟩ rwa [Finsupp.equivFunOnFinite.cardinal_eq, mk_arrow, hs.mk_eq_rank'', lift_power, lift_lift, lift_lift, lift_umax] at this theorem cardinalMk_eq_cardinalMk_field_pow_rank (K V : Type u) [Ring K] [StrongRankCondition K] [AddCommGroup V] [Module K V] [Module.Free K V] [Module.Finite K V] : #V = #K ^ Module.rank K V := by simpa using lift_cardinalMk_eq_lift_cardinalMk_field_pow_lift_rank K V variable (K V) in theorem cardinal_lt_aleph0_of_finiteDimensional [Finite K] [Module.Free K V] [Module.Finite K V] : #V < ℵ₀ := by rw [← lift_lt_aleph0.{v, u}, lift_cardinalMk_eq_lift_cardinalMk_field_pow_lift_rank K V] exact power_lt_aleph0 (lift_lt_aleph0.2 (lt_aleph0_of_finite K)) (lift_lt_aleph0.2 (rank_lt_aleph0 K V)) end Module namespace Subalgebra variable {F E : Type*} [CommRing F] [StrongRankCondition F] [Ring E] [Algebra F E] {S : Subalgebra F E} theorem eq_bot_of_rank_le_one (h : Module.rank F S ≤ 1) [Module.Free F S] : S = ⊥ := by nontriviality E obtain ⟨κ, b⟩ := Module.Free.exists_basis (R := F) (M := S) by_cases h1 : Module.rank F S = 1 · refine bot_unique fun x hx ↦ Algebra.mem_bot.2 ?_ rw [← b.mk_eq_rank'', eq_one_iff_unique, ← unique_iff_subsingleton_and_nonempty] at h1 obtain ⟨h1⟩ := h1 obtain ⟨y, hy⟩ := (bijective_algebraMap_of_linearEquiv (b.repr ≪≫ₗ Finsupp.LinearEquiv.finsuppUnique _ _ _).symm).surjective ⟨x, hx⟩ exact ⟨y, congr(Subtype.val $(hy))⟩ haveI := mk_eq_zero_iff.1 (b.mk_eq_rank''.symm ▸ lt_one_iff_zero.1 (h.lt_of_ne h1)) haveI := b.repr.toEquiv.subsingleton exact False.elim <| one_ne_zero congr(S.val $(Subsingleton.elim 1 0)) theorem eq_bot_of_finrank_one (h : finrank F S = 1) [Module.Free F S] : S = ⊥ := by refine Subalgebra.eq_bot_of_rank_le_one ?_ rw [finrank, toNat_eq_one] at h rw [h] @[simp] theorem rank_eq_one_iff [Nontrivial E] [Module.Free F S] : Module.rank F S = 1 ↔ S = ⊥ := by refine ⟨fun h ↦ Subalgebra.eq_bot_of_rank_le_one h.le, ?_⟩ rintro rfl obtain ⟨κ, b⟩ := Module.Free.exists_basis (R := F) (M := (⊥ : Subalgebra F E)) refine le_antisymm ?_ ?_ · have := lift_rank_range_le (Algebra.linearMap F E) rwa [← one_eq_range, rank_self, lift_one, lift_le_one_iff, ← Algebra.toSubmodule_bot, rank_toSubmodule] at this · by_contra H rw [not_le, lt_one_iff_zero] at H haveI := mk_eq_zero_iff.1 (H ▸ b.mk_eq_rank'') haveI := b.repr.toEquiv.subsingleton exact one_ne_zero congr((⊥ : Subalgebra F E).val $(Subsingleton.elim 1 0)) @[simp] theorem finrank_eq_one_iff [Nontrivial E] [Module.Free F S] : finrank F S = 1 ↔ S = ⊥ := by rw [← Subalgebra.rank_eq_one_iff] exact toNat_eq_iff one_ne_zero theorem bot_eq_top_iff_rank_eq_one [Nontrivial E] [Module.Free F E] : (⊥ : Subalgebra F E) = ⊤ ↔ Module.rank F E = 1 := by haveI := Module.Free.of_equiv (Subalgebra.topEquiv (R := F) (A := E)).toLinearEquiv.symm rw [← rank_top, Subalgebra.rank_eq_one_iff, eq_comm] theorem bot_eq_top_iff_finrank_eq_one [Nontrivial E] [Module.Free F E] : (⊥ : Subalgebra F E) = ⊤ ↔ finrank F E = 1 := by haveI := Module.Free.of_equiv (Subalgebra.topEquiv (R := F) (A := E)).toLinearEquiv.symm rw [← finrank_top, ← subalgebra_top_finrank_eq_submodule_top_finrank, Subalgebra.finrank_eq_one_iff, eq_comm] alias ⟨_, bot_eq_top_of_rank_eq_one⟩ := bot_eq_top_iff_rank_eq_one alias ⟨_, bot_eq_top_of_finrank_eq_one⟩ := bot_eq_top_iff_finrank_eq_one attribute [simp] bot_eq_top_of_finrank_eq_one bot_eq_top_of_rank_eq_one end Subalgebra